diff --git a/潇洒/api.json b/潇洒/api.json
index f677e09..637a553 100644
--- a/潇洒/api.json
+++ b/潇洒/api.json
@@ -974,6 +974,12 @@
"searchable": 1,
"changeable": 0
},
+ {
+ "key": "米搜",
+ "name": "米搜|搜索",
+ "type": 3,
+ "api": "csp_MiSou"
+ },
{
"key": "聚搜",
"name": "聚搜|搜索",
@@ -1034,10 +1040,10 @@
"filterable": 0
},
{
- "key": "河马短剧",
- "name": "河马|短剧",
+ "key": "甜圈短剧",
+ "name": "甜圈|短剧",
"type": 3,
- "api": "./py/河马短剧.py",
+ "api": "./py/甜圈短剧.py",
"searchable": 1,
"changeable": 1,
"quickSearch": 1,
diff --git a/潇洒/py/河马短剧.py b/潇洒/py/河马短剧.py
deleted file mode 100644
index 0085507..0000000
--- a/潇洒/py/河马短剧.py
+++ /dev/null
@@ -1,380 +0,0 @@
-# -*- coding: utf-8 -*-
-import requests
-import re
-import json
-import traceback
-import sys
-from urllib.parse import quote
-
-sys.path.append('../../')
-try:
- from base.spider import Spider
-except ImportError:
- # 定义一个基础接口类,用于本地测试
- class Spider:
- def init(self, extend=""):
- pass
-
-class Spider(Spider):
- def __init__(self):
- self.siteUrl = "https://www.kuaikaw.cn"
- self.cateManual = {
- "甜宠": "462",
- "古装仙侠": "1102",
- "现代言情": "1145",
- "青春": "1170",
- "豪门恩怨": "585",
- "逆袭": "417-464",
- "重生": "439-465",
- "系统": "1159",
- "总裁": "1147",
- "职场商战": "943"
- }
- self.headers = {
- "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36 Edg/120.0.0.0",
- "Referer": self.siteUrl,
- "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8",
- "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8"
- }
-
- def getName(self):
- return "河马短剧"
-
- def init(self, extend=""):
- return
-
- def fetch(self, url, headers=None, retry=2):
- """统一的网络请求接口"""
- if headers is None:
- headers = self.headers
-
- for i in range(retry + 1):
- try:
- response = requests.get(url, headers=headers, timeout=10, allow_redirects=True)
- response.raise_for_status()
- return response
- except Exception as e:
- if i == retry:
- print(f"请求异常: {url}, 错误: {str(e)}")
- return None
- continue
-
- def isVideoFormat(self, url):
- video_formats = ['.mp4', '.mkv', '.avi', '.wmv', '.m3u8', '.flv', '.rmvb']
- return any(format in url.lower() for format in video_formats)
-
- def manualVideoCheck(self):
- return False
-
- def homeContent(self, filter):
- result = {}
- classes = [{'type_name': k, 'type_id': v} for k, v in self.cateManual.items()]
- result['class'] = classes
-
- try:
- result['list'] = self.homeVideoContent()['list']
- except:
- result['list'] = []
- return result
-
- def homeVideoContent(self):
- videos = []
- try:
- response = self.fetch(self.siteUrl)
- if not response:
- return {'list': []}
-
- html_content = response.text
- next_data_pattern = r''
- next_data_match = re.search(next_data_pattern, html_content, re.DOTALL)
- if not next_data_match:
- return {'list': []}
-
- next_data_json = json.loads(next_data_match.group(1))
- page_props = next_data_json.get("props", {}).get("pageProps", {})
-
- # 处理轮播图数据
- if "bannerList" in page_props:
- for banner in page_props["bannerList"]:
- if banner.get("bookId"):
- videos.append({
- "vod_id": f"/drama/{banner['bookId']}",
- "vod_name": banner.get("bookName", ""),
- "vod_pic": banner.get("coverWap", ""),
- "vod_remarks": f"{banner.get('statusDesc', '')} {banner.get('totalChapterNum', '')}集".strip()
- })
-
- # 处理SEO分类推荐
- if "seoColumnVos" in page_props:
- for column in page_props["seoColumnVos"]:
- for book in column.get("bookInfos", []):
- if book.get("bookId"):
- videos.append({
- "vod_id": f"/drama/{book['bookId']}",
- "vod_name": book.get("bookName", ""),
- "vod_pic": book.get("coverWap", ""),
- "vod_remarks": f"{book.get('statusDesc', '')} {book.get('totalChapterNum', '')}集".strip()
- })
-
- # 去重处理
- seen = set()
- unique_videos = []
- for video in videos:
- key = (video["vod_id"], video["vod_name"])
- if key not in seen:
- seen.add(key)
- unique_videos.append(video)
-
- except Exception as e:
- print(f"获取首页推荐内容出错: {e}")
- unique_videos = []
-
- return {'list': unique_videos}
-
- def categoryContent(self, tid, pg, filter, extend):
- result = {'list': [], 'page': pg, 'pagecount': 1, 'limit': 20, 'total': 0}
- url = f"{self.siteUrl}/browse/{tid}/{pg}"
-
- response = self.fetch(url)
- if not response:
- return result
-
- html_content = response.text
- next_data_match = re.search(r'', html_content, re.DOTALL)
- if not next_data_match:
- return result
-
- try:
- next_data_json = json.loads(next_data_match.group(1))
- page_props = next_data_json.get("props", {}).get("pageProps", {})
-
- current_page = page_props.get("page", 1)
- total_pages = page_props.get("pages", 1)
- book_list = page_props.get("bookList", [])
-
- videos = []
- for book in book_list:
- if book.get("bookId"):
- videos.append({
- "vod_id": f"/drama/{book['bookId']}",
- "vod_name": book.get("bookName", ""),
- "vod_pic": book.get("coverWap", ""),
- "vod_remarks": f"{book.get('statusDesc', '')} {book.get('totalChapterNum', '')}集".strip()
- })
-
- result.update({
- 'list': videos,
- 'page': int(current_page),
- 'pagecount': total_pages,
- 'limit': len(videos),
- 'total': len(videos) * total_pages if videos else 0
- })
-
- except Exception as e:
- print(f"分类内容获取出错: {e}")
-
- return result
-
- def searchContent(self, key, quick, pg=1):
- return self.searchContentPage(key, quick, pg)
-
- def searchContentPage(self, key, quick, pg=1):
- result = {'list': [], 'page': pg, 'pagecount': 1, 'limit': 20, 'total': 0}
- search_url = f"{self.siteUrl}/search?searchValue={quote(key)}&page={pg}"
-
- response = self.fetch(search_url)
- if not response:
- return result
-
- html_content = response.text
- next_data_match = re.search(r'', html_content, re.DOTALL)
- if not next_data_match:
- return result
-
- try:
- next_data_json = json.loads(next_data_match.group(1))
- page_props = next_data_json.get("props", {}).get("pageProps", {})
-
- total_pages = page_props.get("pages", 1)
- book_list = page_props.get("bookList", [])
-
- videos = []
- for book in book_list:
- if book.get("bookId"):
- videos.append({
- "vod_id": f"/drama/{book['bookId']}",
- "vod_name": book.get("bookName", ""),
- "vod_pic": book.get("coverWap", ""),
- "vod_remarks": f"{book.get('statusDesc', '')} {book.get('totalChapterNum', '')}集".strip()
- })
-
- result.update({
- 'list': videos,
- 'pagecount': total_pages,
- 'total': len(videos) * total_pages if videos else 0
- })
-
- except Exception as e:
- print(f"搜索内容出错: {e}")
-
- return result
-
- def detailContent(self, ids):
- result = {'list': []}
- if not ids:
- return result
-
- vod_id = ids[0]
- if not vod_id.startswith('/drama/'):
- vod_id = f'/drama/{vod_id}'
-
- drama_url = f"{self.siteUrl}{vod_id}"
- response = self.fetch(drama_url)
- if not response:
- return result
-
- html = response.text
- next_data_match = re.search(r'', html, re.DOTALL)
- if not next_data_match:
- return result
-
- try:
- next_data = json.loads(next_data_match.group(1))
- page_props = next_data.get("props", {}).get("pageProps", {})
- book_info = page_props.get("bookInfoVo", {})
- chapter_list = page_props.get("chapterList", [])
-
- if not book_info.get("bookId"):
- return result
-
- # 基本信息
- categories = [c.get("name", "") for c in book_info.get("categoryList", [])]
- performers = [p.get("name", "") for p in book_info.get("performerList", [])]
-
- vod = {
- "vod_id": vod_id,
- "vod_name": book_info.get("title", ""),
- "vod_pic": book_info.get("coverWap", ""),
- "type_name": ",".join(categories),
- "vod_year": "",
- "vod_area": book_info.get("countryName", ""),
- "vod_remarks": f"{book_info.get('statusDesc', '')} {book_info.get('totalChapterNum', '')}集".strip(),
- "vod_actor": ", ".join(performers),
- "vod_director": "",
- "vod_content": book_info.get("introduction", "")
- }
-
- # 处理剧集
- play_urls = self.processEpisodes(vod_id, chapter_list)
- if play_urls:
- vod['vod_play_from'] = '河马剧场'
- vod['vod_play_url'] = '$$$'.join(play_urls)
-
- result['list'] = [vod]
-
- except Exception as e:
- print(f"详情页解析出错: {e}")
- traceback.print_exc()
-
- return result
-
- def processEpisodes(self, vod_id, chapter_list):
- play_urls = []
- episodes = []
-
- for chapter in chapter_list:
- chapter_id = chapter.get("chapterId", "")
- chapter_name = chapter.get("chapterName", "")
-
- if not chapter_id or not chapter_name:
- continue
-
- # 尝试获取直接视频链接
- video_url = self.getDirectVideoUrl(chapter)
- if video_url:
- episodes.append(f"{chapter_name}${video_url}")
- continue
-
- # 回退方案
- episodes.append(f"{chapter_name}${vod_id}${chapter_id}${chapter_name}")
-
- if episodes:
- play_urls.append("#".join(episodes))
-
- return play_urls
-
- def getDirectVideoUrl(self, chapter):
- if "chapterVideoVo" not in chapter or not chapter["chapterVideoVo"]:
- return None
-
- video_info = chapter["chapterVideoVo"]
- for key in ["mp4", "mp4720p", "vodMp4Url"]:
- if key in video_info and video_info[key] and ".mp4" in video_info[key].lower():
- return video_info[key]
- return None
-
- def playerContent(self, flag, id, vipFlags):
- result = {
- "parse": 0,
- "url": id,
- "header": json.dumps(self.headers)
- }
-
- # 如果已经是视频链接直接返回
- if 'http' in id and ('.mp4' in id or '.m3u8' in id):
- return result
-
- # 解析参数
- parts = id.split('$')
- if len(parts) < 2:
- return result
-
- drama_id = parts[0].replace('/drama/', '')
- chapter_id = parts[1]
-
- # 尝试获取视频链接
- video_url = self.getEpisodeVideoUrl(drama_id, chapter_id)
- if video_url:
- result["url"] = video_url
-
- return result
-
- def getEpisodeVideoUrl(self, drama_id, chapter_id):
- episode_url = f"{self.siteUrl}/episode/{drama_id}/{chapter_id}"
- response = self.fetch(episode_url)
- if not response:
- return None
-
- html = response.text
-
- # 方法1: 从NEXT_DATA提取
- next_data_match = re.search(r'', html, re.DOTALL)
- if next_data_match:
- try:
- next_data = json.loads(next_data_match.group(1))
- page_props = next_data.get("props", {}).get("pageProps", {})
- chapter_info = page_props.get("chapterInfo", {})
-
- if chapter_info and "chapterVideoVo" in chapter_info:
- video_info = chapter_info["chapterVideoVo"]
- for key in ["mp4", "mp4720p", "vodMp4Url"]:
- if key in video_info and video_info[key] and ".mp4" in video_info[key].lower():
- return video_info[key]
- except:
- pass
-
- # 方法2: 直接从HTML提取
- mp4_matches = re.findall(r'(https?://[^"\']+\.mp4)', html)
- if mp4_matches:
- for url in mp4_matches:
- if chapter_id in url or drama_id in url:
- return url
- return mp4_matches[0]
-
- return None
-
- def localProxy(self, param):
- return [200, "video/MP2T", {}, param]
-
- def destroy(self):
- pass
\ No newline at end of file
diff --git a/潇洒/py/甜圈短剧.py b/潇洒/py/甜圈短剧.py
new file mode 100644
index 0000000..40cac38
--- /dev/null
+++ b/潇洒/py/甜圈短剧.py
@@ -0,0 +1,156 @@
+# -*- coding: utf-8 -*-
+# by @嗷呜
+import sys
+sys.path.append('..')
+from base.spider import Spider
+
+class Spider(Spider):
+
+ def init(self, extend=""):
+ pass
+
+ def getName(self):
+ return "甜圈短剧"
+
+ def isVideoFormat(self, url):
+ return True
+
+ def manualVideoCheck(self):
+ return False
+
+ def destroy(self):
+ pass
+
+ # 更新为新的域名
+ ahost = 'https://mov.cenguigui.cn'
+
+ headers = {
+ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36',
+ 'sec-ch-ua-platform': '"macOS"',
+ 'sec-ch-ua': '"Not/A)Brand";v="8", "Chromium";v="134", "Google Chrome";v="134"',
+ 'DNT': '1',
+ 'sec-ch-ua-mobile': '?0',
+ 'Sec-Fetch-Site': 'cross-site',
+ 'Sec-Fetch-Mode': 'no-cors',
+ 'Sec-Fetch-Dest': 'video',
+ 'Sec-Fetch-Storage-Access': 'active',
+ 'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
+ }
+
+ def homeContent(self, filter):
+ result = {'class': [{'type_id': '推荐榜', 'type_name': '🔥 推荐榜'},
+ {'type_id': '新剧', 'type_name': '🎬 新剧'},
+ {'type_id': '逆袭', 'type_name': '🎬 逆袭'},
+ {'type_id': '霸总', 'type_name': '🎬 霸总'},
+ {'type_id': '现代言情', 'type_name': '🎬 现代言情'},
+ {'type_id': '打脸虐渣', 'type_name': '🎬 打脸虐渣'},
+ {'type_id': '豪门恩怨', 'type_name': '🎬 豪门恩怨'},
+ {'type_id': '神豪', 'type_name': '🎬 神豪'},
+ {'type_id': '马甲', 'type_name': '🎬 马甲'},
+ {'type_id': '都市日常', 'type_name': '🎬 都市日常'},
+ {'type_id': '战神归来', 'type_name': '🎬 战神归来'},
+ {'type_id': '小人物', 'type_name': '🎬 小人物'},
+ {'type_id': '女性成长', 'type_name': '🎬 女性成长'},
+ {'type_id': '大女主', 'type_name': '🎬 大女主'},
+ {'type_id': '穿越', 'type_name': '🎬 穿越'},
+ {'type_id': '都市修仙', 'type_name': '🎬 都市修仙'},
+ {'type_id': '强者回归', 'type_name': '🎬 强者回归'},
+ {'type_id': '亲情', 'type_name': '🎬 亲情'},
+ {'type_id': '古装', 'type_name': '🎬 古装'},
+ {'type_id': '重生', 'type_name': '🎬 重生'},
+ {'type_id': '闪婚', 'type_name': '🎬 闪婚'},
+ {'type_id': '赘婿逆袭', 'type_name': '🎬 赘婿逆袭'},
+ {'type_id': '虐恋', 'type_name': '🎬 虐恋'},
+ {'type_id': '追妻', 'type_name': '🎬 追妻'},
+ {'type_id': '天下无敌', 'type_name': '🎬 天下无敌'},
+ {'type_id': '家庭伦理', 'type_name': '🎬 家庭伦理'},
+ {'type_id': '萌宝', 'type_name': '🎬 萌宝'},
+ {'type_id': '古风权谋', 'type_name': '🎬 古风权谋'},
+ {'type_id': '职场', 'type_name': '🎬 职场'},
+ {'type_id': '奇幻脑洞', 'type_name': '🎬 奇幻脑洞'},
+ {'type_id': '异能', 'type_name': '🎬 异能'},
+ {'type_id': '无敌神医', 'type_name': '🎬 无敌神医'},
+ {'type_id': '古风言情', 'type_name': '🎬 古风言情'},
+ {'type_id': '传承觉醒', 'type_name': '🎬 传承觉醒'},
+ {'type_id': '现言甜宠', 'type_name': '🎬 现言甜宠'},
+ {'type_id': '奇幻爱情', 'type_name': '🎬 奇幻爱情'},
+ {'type_id': '乡村', 'type_name': '🎬 乡村'},
+ {'type_id': '历史古代', 'type_name': '🎬 历史古代'},
+ {'type_id': '王妃', 'type_name': '🎬 王妃'},
+ {'type_id': '高手下山', 'type_name': '🎬 高手下山'},
+ {'type_id': '娱乐圈', 'type_name': '🎬 娱乐圈'},
+ {'type_id': '强强联合', 'type_name': '🎬 强强联合'},
+ {'type_id': '破镜重圆', 'type_name': '🎬 破镜重圆'},
+ {'type_id': '暗恋成真', 'type_name': '🎬 暗恋成真'},
+ {'type_id': '民国', 'type_name': '🎬 民国'},
+ {'type_id': '欢喜冤家', 'type_name': '🎬 欢喜冤家'},
+ {'type_id': '系统', 'type_name': '🎬 系统'},
+ {'type_id': '真假千金', 'type_name': '🎬 真假千金'},
+ {'type_id': '龙王', 'type_name': '🎬 龙王'},
+ {'type_id': '校园', 'type_name': '🎬 校园'},
+ {'type_id': '穿书', 'type_name': '🎬 穿书'},
+ {'type_id': '女帝', 'type_name': '🎬 女帝'},
+ {'type_id': '团宠', 'type_name': '🎬 团宠'},
+ {'type_id': '年代爱情', 'type_name': '🎬 年代爱情'},
+ {'type_id': '玄幻仙侠', 'type_name': '🎬 玄幻仙侠'},
+ {'type_id': '青梅竹马', 'type_name': '🎬 青梅竹马'},
+ {'type_id': '悬疑推理', 'type_name': '🎬 悬疑推理'},
+ {'type_id': '皇后', 'type_name': '🎬 皇后'},
+ {'type_id': '替身', 'type_name': '🎬 替身'},
+ {'type_id': '大叔', 'type_name': '🎬 大叔'},
+ {'type_id': '喜剧', 'type_name': '🎬 喜剧'},
+ {'type_id': '剧情', 'type_name': '🎬 剧情'}]}
+ return result
+
+ def homeVideoContent(self):
+ return []
+
+ def categoryContent(self, tid, pg, filter, extend):
+ params = {
+ 'classname': tid,
+ 'offset': str((int(pg) - 1)),
+ }
+ # 更新请求路径为 /duanju/api.php
+ data = self.fetch(f'{self.ahost}/duanju/api.php', params=params, headers=self.headers).json()
+ videos = []
+ for k in data['data']:
+ videos.append({
+ 'vod_id': k.get('book_id'),
+ 'vod_name': k.get('title'),
+ 'vod_pic': k.get('cover'),
+ 'vod_year': k.get('score'),
+ 'vod_remarks': f"{k.get('sub_title')}|{k.get('episode_cnt')}"
+ })
+ result = {}
+ result['list'] = videos
+ result['page'] = pg
+ result['pagecount'] = 9999
+ result['limit'] = 90
+ result['total'] = 999999
+ return result
+
+ def detailContent(self, ids):
+ # 更新请求路径为 /duanju/api.php
+ v = self.fetch(f'{self.ahost}/duanju/api.php', params={'book_id': ids[0]}, headers=self.headers).json()
+ vod = {
+ 'vod_id': ids[0],
+ 'vod_name': v.get('title'),
+ 'type_name': v.get('category'),
+ 'vod_year': v.get('time'),
+ 'vod_remarks': v.get('duration'),
+ 'vod_content': v.get('desc'),
+ 'vod_play_from': '爱看短剧',
+ 'vod_play_url': '#'.join([f"{i['title']}${i['video_id']}" for i in v['data']])
+ }
+ return {'list': [vod]}
+
+ def searchContent(self, key, quick, pg="1"):
+ return self.categoryContent(key, pg, True, {})
+
+ def playerContent(self, flag, id, vipFlags):
+ # 更新请求路径为 /duanju/api.php
+ data = self.fetch(f'{self.ahost}/duanju/api.php', params={'video_id': id}, headers=self.headers).json()
+ return {'parse': 0, 'url': data['data']['url'], 'header': self.headers}
+
+ def localProxy(self, param):
+ pass
\ No newline at end of file
diff --git a/潇洒/spider.jar b/潇洒/spider.jar
index 96bde97..940812c 100644
Binary files a/潇洒/spider.jar and b/潇洒/spider.jar differ