Commit 7ac7efa1 by baiquan

添加上传视频代理验证

parent ebe6d9a1
...@@ -28,3 +28,5 @@ retrying~=1.4.0 ...@@ -28,3 +28,5 @@ retrying~=1.4.0
python-dateutil python-dateutil
python-socks~=2.7.1 python-socks~=2.7.1
PySocks PySocks
requests-toolbelt~=1.0.0
\ No newline at end of file
import json
import random import random
from urllib.parse import urlencode from urllib.parse import urlencode
...@@ -135,18 +136,45 @@ def upload_image_by_bytes(cookies, image_bytes): ...@@ -135,18 +136,45 @@ def upload_image_by_bytes(cookies, image_bytes):
return response.json()['data'] return response.json()['data']
def get_prettify_info(cookies, category_id, url_list): def get_prettify_info(category_id, url_list):
prettify_info = [] prettify_info = []
for url in url_list: for i, url in enumerate(url_list):
component_front_data = {
"imgList": [url],
"uploadSource": "local_upload",
'droppedEventTriggered': True,
"image": {
"url": url,
"width": 1080,
"height": 980
},
"$$name$$": f"图片{i+1}"
}
component_front_data = json.dumps(component_front_data)
component_data = {
"url": url
}
component_data = json.dumps(component_data)
prettify_info.append({ prettify_info.append({
'id': 2,
'component_type_id': 2,
'component_front_data': component_front_data,
'component_data': component_data,
'image': { 'image': {
'url': url, 'url': url,
'width': 1080,
'height': 980,
}, },
}) })
json_data = { json_data = {
'category_id': int(category_id), 'category_id': category_id,
'prettify_info': prettify_info 'prettify_info': prettify_info,
'check_status': 2,
'appid': 1,
} }
print(json_data)
response = requests.post( response = requests.post(
'https://fxg.jinritemai.com/product/prettify/formatPrettifyForProduct', 'https://fxg.jinritemai.com/product/prettify/formatPrettifyForProduct',
cookies=cookies, cookies=cookies,
......
...@@ -22,7 +22,7 @@ HEADERS = { ...@@ -22,7 +22,7 @@ HEADERS = {
@retry(stop_max_attempt_number=3, wait_fixed=3000) @retry(stop_max_attempt_number=3, wait_fixed=3000)
def doudian_request(method: str, url: str, proxies:dict, params:dict=None, data: str = None, json: dict = None,headers=None, cookies=None, match_str="") -> any: def doudian_request(method: str, url: str, proxies:dict, params:dict=None, data: any = None, json: dict = None,headers=None, cookies=None, match_str="") -> any:
if headers is None: if headers is None:
headers = HEADERS headers = HEADERS
logger.info(f'doudian_request-->{url}') logger.info(f'doudian_request-->{url}')
......
import asyncio
import hashlib
import json
import os
import requests
from loguru import logger
from requests_toolbelt.multipart.encoder import MultipartEncoder
from config import settings
from service.doudian_request import doudian_request
from service.upload_video import upload_video_with_multithreading, download_video
from utils.common import check_proxy
def get_local_path(item_id, url):
folder_path = os.path.join(settings.BASE_PATH, str(item_id))
if not os.path.exists(folder_path):
raise FileNotFoundError(f"文件夹不存在: {folder_path}")
if "?" in url:
url = url.split("?")[0]
file_path = os.path.join(folder_path, os.path.basename(url))
if not os.path.exists(file_path):
if file_path.endswith(".mp4"):
raise FileNotFoundError(f"文件不存在: {file_path}")
logger.info(f"{file_path} 文件不存在,开始下载")
img_bytes = requests.get(url, stream=True)
with open(file_path, 'wb') as f:
for chunk in img_bytes.iter_content(chunk_size=8192):
if chunk:
f.write(chunk)
return file_path
def upload_image_by_bytes(cookies, headers, proxies, image_path_list):
result_dict = {}
# 按每10张分批处理
for batch_start in range(0, len(image_path_list), 10):
batch_end = min(batch_start + 10, len(image_path_list))
batch = image_path_list[batch_start:batch_end]
# 准备多部分表单数据
fields = {}
file_handles = [] # 用于保存打开的文件对象
for idx, img_dict in enumerate(batch):
# 获取原始字典中的键和本地路径
original_key = next(iter(img_dict.keys()))
local_path = img_dict[original_key]
# 打开文件并添加到表单
file_handle = open(local_path, 'rb')
file_handles.append(file_handle)
fields[f'image[{idx}]'] = ('image.jpg',file_handle,'image/jpeg')
# 创建多部分编码器
multipart_data = MultipartEncoder(fields=fields)
headers_batch = headers.copy()
headers_batch['Content-Type'] = multipart_data.content_type
# 执行上传请求
url = 'https://fxg.jinritemai.com/product/img/batchupload?_bid=ffa_goods'
try:
response = doudian_request('POST', url, proxies, data=multipart_data, headers=headers_batch,cookies=cookies)
batch_urls = response['data']
# 收集结果到字典
for idx_in_batch, img_url in enumerate(batch_urls):
list_index = batch_start + idx_in_batch
img_dict = image_path_list[list_index]
original_key = next(iter(img_dict.keys()))
result_dict[original_key] = img_url
except Exception as e:
logger.error(e)
return None
finally:
# 确保关闭所有打开的文件
for fh in file_handles:
fh.close()
# 返回结果字典
logger.info(result_dict)
return result_dict
async def uploadImageAndVideo(task: dict = None):
"""
上传图片和视频
:param task:
:return:
"""
addr = task.get("proxies")["addr"]
port = task.get("proxies")["port"]
username = task.get("proxies")["username"]
password = task.get("proxies")["password"]
proxy_url = f"socks5h://{username}:{password}@{addr}:{port}"
proxies = check_proxy(proxy_url)
cookies = task.get('cookie')
headers = task.get('headers')
item_id = task.get('id')
skus = task.get('skus')
# 准备SKU图片上传
sku_image_list = []
for sku in skus:
for key, value in sku.items():
if isinstance(value, dict):
img_url = value.get('image')
if img_url:
md5_key = hashlib.md5(img_url.encode()).hexdigest()
local_path = get_local_path(item_id, img_url)
sku_image_list.append({md5_key: local_path})
# 准备主图上传
image_list = []
for url in task.get('images', []):
md5_key = hashlib.md5(url.encode()).hexdigest()
local_path = get_local_path(item_id, url)
image_list.append({md5_key: local_path})
# 准备详情图上传
description_list = []
for url in task.get('description', []):
md5_key = hashlib.md5(url.encode()).hexdigest()
local_path = get_local_path(item_id, url)
description_list.append({md5_key: local_path})
try:
# 并行处理所有上传任务
sku_image_dict, image_dict, description_dict, video_dict = await asyncio.gather(
run_in_executor(upload_image_by_bytes, cookies, headers, proxies, sku_image_list),
run_in_executor(upload_image_by_bytes, cookies, headers, proxies, image_list),
run_in_executor(upload_image_by_bytes, cookies, headers, proxies, description_list),
upload_videos(task, item_id)
)
except Exception as e:
logger.error(f"上传过程中发生错误: {str(e)}")
return None
# 构建回调数据结构
callback_data = {
"id": item_id,
"skus": sku_image_dict,
"images": image_dict,
"description": description_dict,
"video_list": video_dict
}
logger.info(json.dumps(callback_data))
# await callback_task(callback_data)
async def run_in_executor(func, *args):
"""在异步环境中运行同步函数"""
loop = asyncio.get_running_loop()
return await loop.run_in_executor(None, func, *args)
async def upload_videos(task: dict, item_id: str):
"""异步上传所有视频"""
video_dict = {}
video_tasks = []
for video_url in task.get('video_list', []):
logger.info(f"开始处理视频:{video_url}")
local_video_url = get_local_path(item_id, video_url)
video_task = upload_single_video(task.copy(), local_video_url, video_url)
video_tasks.append(video_task)
# 并行执行所有视频上传任务
results = await asyncio.gather(*video_tasks)
# 合并结果
for md5_key, result in results:
if result and 'video_info' in result and 'MainPlayUrl' in result['video_info']:
video_dict[md5_key] = result['video_info']['MainPlayUrl']
return video_dict
async def upload_single_video(task: dict, local_path: any, original_url: str):
"""上传单个视频"""
task['file_path'] = local_path
md5_key = hashlib.md5(original_url.encode()).hexdigest()
try:
result = await run_in_executor(upload_video_with_multithreading, task)
logger.success(f'视频上传成功: {result}')
return md5_key, result
except Exception as e:
logger.error(f'视频上传失败: {original_url}, 错误: {str(e)}')
return md5_key, None
...@@ -684,8 +684,10 @@ def prepare_video_file(task): ...@@ -684,8 +684,10 @@ def prepare_video_file(task):
download_video(task['video_url'], file_path, headers=task['headers']) download_video(task['video_url'], file_path, headers=task['headers'])
else: else:
file_path = task.get("file_path") file_path = task.get("file_path")
if not os.path.exists(file_path):
raise Exception(f"视频文件 {file_path} 不存在")
if is_video_corrupted(file_path): if is_video_corrupted(file_path):
raise Exception("视频文件已损坏") raise Exception(f"视频文件 {file_path} 已损坏")
video_duration = get_video_duration(file_path) video_duration = get_video_duration(file_path)
if video_duration > 60: if video_duration > 60:
logger.error("视频时长大于60秒,上传失败") logger.error("视频时长大于60秒,上传失败")
......
[development] [development]
HUB_APP_ID = "password" HUB_APP_ID = "password"
[production] [production]
DEBUG = true DEBUG = true
...@@ -30,3 +31,5 @@ DB_USER = "root" ...@@ -30,3 +31,5 @@ DB_USER = "root"
DB_PASSWORD = "123456" DB_PASSWORD = "123456"
DB_NAME = "doudian" DB_NAME = "doudian"
DB_CHARSET = "utf8mb4" DB_CHARSET = "utf8mb4"
BASE_PATH = "D://"
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment