五五短剧.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. """
  2. 作者 凯悦宾馆 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
  3. ====================kaiyuebinguan====================
  4. """
  5. import requests
  6. from bs4 import BeautifulSoup
  7. import re
  8. from base.spider import Spider
  9. import sys
  10. import json
  11. import base64
  12. import urllib.parse
  13. sys.path.append('..')
  14. xurl = "http://www.45b7.com"
  15. headerx1 = {
  16. 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
  17. }
  18. headerx = {
  19. 'User-Agent': 'Linux; Android 12; Pixel 3 XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.101 Mobile Safari/537.36'
  20. }
  21. pm = ''
  22. class Spider(Spider):
  23. global xurl
  24. global headerx
  25. def getName(self):
  26. return "首页"
  27. def init(self, extend):
  28. pass
  29. def isVideoFormat(self, url):
  30. pass
  31. def manualVideoCheck(self):
  32. pass
  33. def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
  34. if pl == 3:
  35. plx = []
  36. while True:
  37. start_index = text.find(start_str)
  38. if start_index == -1:
  39. break
  40. end_index = text.find(end_str, start_index + len(start_str))
  41. if end_index == -1:
  42. break
  43. middle_text = text[start_index + len(start_str):end_index]
  44. plx.append(middle_text)
  45. text = text.replace(start_str + middle_text + end_str, '')
  46. if len(plx) > 0:
  47. purl = ''
  48. for i in range(len(plx)):
  49. matches = re.findall(start_index1, plx[i])
  50. output = ""
  51. for match in matches:
  52. match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
  53. if match3:
  54. number = match3.group(1)
  55. else:
  56. number = 0
  57. if 'http' not in match[0]:
  58. output += f"#{'📽️拾光👉' + match[1]}${number}{xurl}{match[0]}"
  59. else:
  60. output += f"#{'📽️拾光👉' + match[1]}${number}{match[0]}"
  61. output = output[1:]
  62. purl = purl + output + "$$$"
  63. purl = purl[:-3]
  64. return purl
  65. else:
  66. return ""
  67. else:
  68. start_index = text.find(start_str)
  69. if start_index == -1:
  70. return ""
  71. end_index = text.find(end_str, start_index + len(start_str))
  72. if end_index == -1:
  73. return ""
  74. if pl == 0:
  75. middle_text = text[start_index + len(start_str):end_index]
  76. return middle_text.replace("\\", "")
  77. if pl == 1:
  78. middle_text = text[start_index + len(start_str):end_index]
  79. matches = re.findall(start_index1, middle_text)
  80. if matches:
  81. jg = ' '.join(matches)
  82. return jg
  83. if pl == 2:
  84. middle_text = text[start_index + len(start_str):end_index]
  85. matches = re.findall(start_index1, middle_text)
  86. if matches:
  87. new_list = [f'✨拾光👉{item}' for item in matches]
  88. jg = '$$$'.join(new_list)
  89. return jg
  90. def homeContent(self, filter):
  91. result = {}
  92. result = {"class": [{"type_id": "都市", "type_name": "都市🌠"},
  93. {"type_id": "赘婿", "type_name": "赘婿🌠"},
  94. {"type_id": "战神", "type_name": "战神🌠"},
  95. {"type_id": "古代", "type_name": "古代🌠"},
  96. {"type_id": "现代", "type_name": "现代🌠"},
  97. {"type_id": "历史", "type_name": "历史🌠"},
  98. {"type_id": "脑洞", "type_name": "脑洞🌠"},
  99. {"type_id": "玄幻", "type_name": "玄幻🌠"},
  100. {"type_id": "搞笑", "type_name": "搞笑🌠"},
  101. {"type_id": "喜剧", "type_name": "喜剧🌠"},
  102. {"type_id": "萌宝", "type_name": "萌宝🌠"},
  103. {"type_id": "神豪", "type_name": "神豪🌠"},
  104. {"type_id": "致富", "type_name": "致富🌠"},
  105. {"type_id": "奇幻", "type_name": "奇幻🌠"},
  106. {"type_id": "超能", "type_name": "超能🌠"},
  107. {"type_id": "强者", "type_name": "强者🌠"},
  108. {"type_id": "甜宠", "type_name": "甜宠🌠"},
  109. {"type_id": "励志", "type_name": "励志🌠"},
  110. {"type_id": "豪门", "type_name": "豪门🌠"},
  111. {"type_id": "复仇", "type_name": "复仇🌠"},
  112. {"type_id": "长生", "type_name": "长生🌠"},
  113. {"type_id": "神医", "type_name": "神医🌠"},
  114. {"type_id": "马甲", "type_name": "马甲🌠"},
  115. {"type_id": "亲情", "type_name": "亲情🌠"},
  116. {"type_id": "人物", "type_name": "人物🌠"},
  117. {"type_id": "奇幻", "type_name": "奇幻🌠"},
  118. {"type_id": "无敌", "type_name": "无敌🌠"},
  119. {"type_id": "现实", "type_name": "现实🌠"},
  120. {"type_id": "重生", "type_name": "重生🌠"},
  121. {"type_id": "闪婚", "type_name": "闪婚🌠"},
  122. {"type_id": "职场", "type_name": "职场🌠"},
  123. {"type_id": "穿越", "type_name": "穿越🌠"},
  124. {"type_id": "年代", "type_name": "年代🌠"},
  125. {"type_id": "权谋", "type_name": "权谋🌠"},
  126. {"type_id": "高手", "type_name": "高手🌠"},
  127. {"type_id": "悬疑", "type_name": "悬疑🌠"},
  128. {"type_id": "情仇", "type_name": "情仇🌠"},
  129. {"type_id": "虐恋", "type_name": "虐恋🌠"},
  130. {"type_id": "古装", "type_name": "古装🌠"},
  131. {"type_id": "时空", "type_name": "时空🌠"},
  132. {"type_id": "玄幻", "type_name": "玄幻🌠"},
  133. {"type_id": "欢喜", "type_name": "欢喜🌠"},
  134. {"type_id": "觉醒", "type_name": "觉醒🌠"},
  135. {"type_id": "情感", "type_name": "情感🌠"},
  136. {"type_id": "逆袭", "type_name": "逆袭🌠"},
  137. {"type_id": "家庭", "type_name": "家庭🌠"}]
  138. }
  139. return result
  140. def homeVideoContent(self):
  141. videos = []
  142. try:
  143. detail = requests.get(url=xurl, headers=headerx)
  144. detail.encoding = "utf-8"
  145. res = detail.text
  146. doc = BeautifulSoup(res, "lxml")
  147. soups = doc.find_all('div', class_="FeaturedList_featuredBox")
  148. for soup in soups:
  149. vods = soup.find_all('div', class_="FeaturedList_featuredItem")
  150. for vod in vods:
  151. names = vod.find('a', class_="FeaturedList_bookName")
  152. name = names.text.strip()
  153. ids = vod.find('a', class_="FeaturedList_bookName")
  154. id = ids['href']
  155. pics = vod.find('a', class_="image_imageBox")
  156. pic = pics.find('img')['src']
  157. if 'http' not in pic:
  158. pic = xurl + pic
  159. remarks = vod.find('a', class_="FeaturedList_lastChapter")
  160. remark = remarks.text.strip()
  161. video = {
  162. "vod_id": id,
  163. "vod_name": name,
  164. "vod_pic": pic,
  165. "vod_remarks": '拾光推荐📽️' + remark
  166. }
  167. videos.append(video)
  168. result = {'list': videos}
  169. return result
  170. except:
  171. pass
  172. def categoryContent(self, cid, pg, filter, ext):
  173. result = {}
  174. if pg:
  175. page = int(pg)
  176. else:
  177. page = 1
  178. page = int(pg)
  179. videos = []
  180. if page == '1':
  181. url = f'{xurl}/vodshow/1---{cid}--------.html'
  182. else:
  183. url = f'{xurl}/vodshow/1---{cid}-----{str(page)}---.html'
  184. try:
  185. detail = requests.get(url=url, headers=headerx1)
  186. detail.encoding = "utf-8"
  187. res = detail.text
  188. doc = BeautifulSoup(res, "lxml")
  189. soups = doc.find_all('div', class_="BrowseList_listBox")
  190. for soup in soups:
  191. vods = soup.find_all('div', class_="BrowseList_itemBox")
  192. for vod in vods:
  193. names = vod.find('a', class_="image_imageScaleBox")
  194. name = names.find('img')['alt']
  195. ids = vod.find('a', class_="image_imageScaleBox")
  196. id = ids['href']
  197. pics = vod.find('a', class_="image_imageScaleBox")
  198. pic = pics.find('img')['src']
  199. if 'http' not in pic:
  200. pic = xurl + pic
  201. remarks = vod.find('a', class_="BrowseList_totalChapterNum")
  202. remark = remarks.text.strip()
  203. video = {
  204. "vod_id": id,
  205. "vod_name": name,
  206. "vod_pic": pic,
  207. "vod_remarks": '集多推荐📽️' + remark
  208. }
  209. videos.append(video)
  210. except:
  211. pass
  212. result = {'list': videos}
  213. result['page'] = pg
  214. result['pagecount'] = 9999
  215. result['limit'] = 90
  216. result['total'] = 999999
  217. return result
  218. def detailContent(self, ids):
  219. global pm
  220. did = ids[0]
  221. result = {}
  222. videos = []
  223. playurl = ''
  224. if 'http' not in did:
  225. did = xurl + did
  226. res1 = requests.get(url=did, headers=headerx)
  227. res1.encoding = "utf-8"
  228. res = res1.text
  229. content = '😸拾光趣乐屋🎉为您介绍剧情📢本资源来源于网络🚓侵权请联系删除👉' + self.extract_middle_text(res,'name="description" content=','/>', 0)
  230. content = content.replace('\r', '').replace('\n', '').replace(' ', '')
  231. bofang = self.extract_middle_text(res, '<div class="adm-swiper-item', '</div>', 3,'href="(.*?)">\s+(.*?)\s+</a>')
  232. bofang = bofang.replace('$$$', '#')
  233. videos.append({
  234. "vod_id": did,
  235. "vod_actor": '😸拾光',
  236. "vod_director": '😸拾光',
  237. "vod_content": content,
  238. "vod_play_from": '😸拾光专线',
  239. "vod_play_url": bofang
  240. })
  241. result['list'] = videos
  242. return result
  243. def playerContent(self, flag, id, vipFlags):
  244. parts = id.split("http")
  245. xiutan = 0
  246. if xiutan == 0:
  247. if len(parts) > 1:
  248. before_https, after_https = parts[0], 'http' + parts[1]
  249. res = requests.get(url=after_https, headers=headerx)
  250. res = res.text
  251. url = self.extract_middle_text(res, '},"url":"', '"', 0).replace('\\', '')
  252. result = {}
  253. result["parse"] = xiutan
  254. result["playUrl"] = ''
  255. result["url"] = url
  256. result["header"] = headerx
  257. return result
  258. def searchContentPage(self, key, quick, page):
  259. result = {}
  260. videos = []
  261. if not page:
  262. page = '1'
  263. if page == '1':
  264. url = f'{xurl}/vodsearch/-------------.html?wd={key}'
  265. else:
  266. url = f'{xurl}/vodsearch/{key}----------{str(page)}---.html'
  267. detail = requests.get(url=url, headers=headerx)
  268. detail.encoding = "utf-8"
  269. res = detail.text
  270. doc = BeautifulSoup(res, "lxml")
  271. soups = doc.find_all('div', class_="MTagBookList_tagBookItem")
  272. for vod in soups:
  273. names = vod.find('a', class_="MTagBookList_bookName")
  274. name = names.text.strip()
  275. ids = vod.find('a', class_="MTagBookList_bookName")
  276. id = ids['href']
  277. pics = vod.find('a', class_="image_imageScaleBox")
  278. pic = pics.find('img')['src']
  279. if 'http' not in pic:
  280. pic = xurl + pic
  281. remarks = vod.find('a', class_="image_imageScaleBox")
  282. remark = remarks.find('img')['alt']
  283. video = {
  284. "vod_id": id,
  285. "vod_name": name,
  286. "vod_pic": pic,
  287. "vod_remarks": '拾光推荐📽️' + remark
  288. }
  289. videos.append(video)
  290. result['list'] = videos
  291. result['page'] = page
  292. result['pagecount'] = 9999
  293. result['limit'] = 90
  294. result['total'] = 999999
  295. return result
  296. def searchContent(self, key, quick):
  297. return self.searchContentPage(key, quick, '1')
  298. def localProxy(self, params):
  299. if params['type'] == "m3u8":
  300. return self.proxyM3u8(params)
  301. elif params['type'] == "media":
  302. return self.proxyMedia(params)
  303. elif params['type'] == "ts":
  304. return self.proxyTs(params)
  305. return None