首映网.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374
  1. """
  2. 作者 凯悦宾馆 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
  3. ====================kaiyuebinguan====================
  4. """
  5. import requests
  6. from bs4 import BeautifulSoup
  7. import re
  8. from base.spider import Spider
  9. import sys
  10. import json
  11. import base64
  12. import urllib.parse
  13. sys.path.append('..')
  14. xurl = "https://www.tpua.vip"
  15. headerx = {
  16. 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.87 Safari/537.36'
  17. }
  18. pm = ''
  19. class Spider(Spider):
  20. global xurl
  21. global headerx
  22. def getName(self):
  23. return "首页"
  24. def init(self, extend):
  25. pass
  26. def isVideoFormat(self, url):
  27. pass
  28. def manualVideoCheck(self):
  29. pass
  30. def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
  31. if pl == 3:
  32. plx = []
  33. while True:
  34. start_index = text.find(start_str)
  35. if start_index == -1:
  36. break
  37. end_index = text.find(end_str, start_index + len(start_str))
  38. if end_index == -1:
  39. break
  40. middle_text = text[start_index + len(start_str):end_index]
  41. plx.append(middle_text)
  42. text = text.replace(start_str + middle_text + end_str, '')
  43. if len(plx) > 0:
  44. purl = ''
  45. for i in range(len(plx)):
  46. matches = re.findall(start_index1, plx[i])
  47. output = ""
  48. for match in matches:
  49. match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
  50. if match3:
  51. number = match3.group(1)
  52. else:
  53. number = 0
  54. if 'http' not in match[0]:
  55. output += f"#{'📽️丢丢👉' + match[1]}${number}{xurl}{match[0]}"
  56. else:
  57. output += f"#{'📽️丢丢👉' + match[1]}${number}{match[0]}"
  58. output = output[1:]
  59. purl = purl + output + "$$$"
  60. purl = purl[:-3]
  61. return purl
  62. else:
  63. return ""
  64. else:
  65. start_index = text.find(start_str)
  66. if start_index == -1:
  67. return ""
  68. end_index = text.find(end_str, start_index + len(start_str))
  69. if end_index == -1:
  70. return ""
  71. if pl == 0:
  72. middle_text = text[start_index + len(start_str):end_index]
  73. return middle_text.replace("\\", "")
  74. if pl == 1:
  75. middle_text = text[start_index + len(start_str):end_index]
  76. matches = re.findall(start_index1, middle_text)
  77. if matches:
  78. jg = ' '.join(matches)
  79. return jg
  80. if pl == 2:
  81. middle_text = text[start_index + len(start_str):end_index]
  82. matches = re.findall(start_index1, middle_text)
  83. if matches:
  84. new_list = [f'✨丢丢👉{item}' for item in matches]
  85. jg = '$$$'.join(new_list)
  86. return jg
  87. def homeContent(self, filter):
  88. result = {}
  89. result = {"class": [{"type_id": "dianying", "type_name": "丢丢电影🌠"},
  90. {"type_id": "dianshiju", "type_name": "丢丢剧集🌠"},
  91. {"type_id": "zongyi", "type_name": "丢丢动漫🌠"},
  92. {"type_id": "duanju", "type_name": "丢丢短剧🌠"},
  93. {"type_id": "dongman", "type_name": "丢丢综艺🌠"}],
  94. "list": [],
  95. "filters": {"dianying": [{"key": "年代",
  96. "name": "年代",
  97. "value": [{"n": "全部", "v": ""},
  98. {"n": "2024", "v": "2024"},
  99. {"n": "2023", "v": "2023"},
  100. {"n": "2022", "v": "2022"},
  101. {"n": "2021", "v": "2021"},
  102. {"n": "2020", "v": "2020"},
  103. {"n": "2019", "v": "2019"},
  104. {"n": "2018", "v": "2018"}]}],
  105. "dianshiju": [{"key": "年代",
  106. "name": "年代",
  107. "value": [{"n": "全部", "v": ""},
  108. {"n": "2024", "v": "2024"},
  109. {"n": "2023", "v": "2023"},
  110. {"n": "2022", "v": "2022"},
  111. {"n": "2021", "v": "2021"},
  112. {"n": "2020", "v": "2020"},
  113. {"n": "2019", "v": "2019"},
  114. {"n": "2018", "v": "2018"}]}],
  115. "duanju": [{"key": "年代",
  116. "name": "年代",
  117. "value": [{"n": "全部", "v": ""},
  118. {"n": "2024", "v": "2024"},
  119. {"n": "2023", "v": "2023"},
  120. {"n": "2022", "v": "2022"},
  121. {"n": "2021", "v": "2021"},
  122. {"n": "2020", "v": "2020"},
  123. {"n": "2019", "v": "2019"},
  124. {"n": "2018", "v": "2018"}]}],
  125. "zongyi": [{"key": "年代",
  126. "name": "年代",
  127. "value": [{"n": "全部", "v": ""},
  128. {"n": "2024", "v": "2024"},
  129. {"n": "2023", "v": "2023"},
  130. {"n": "2022", "v": "2022"},
  131. {"n": "2021", "v": "2021"},
  132. {"n": "2020", "v": "2020"},
  133. {"n": "2019", "v": "2019"},
  134. {"n": "2018", "v": "2018"}]}],
  135. "dongman": [{"key": "年代",
  136. "name": "年代",
  137. "value": [{"n": "全部", "v": ""},
  138. {"n": "2024", "v": "2024"},
  139. {"n": "2023", "v": "2023"},
  140. {"n": "2022", "v": "2022"},
  141. {"n": "2021", "v": "2021"},
  142. {"n": "2020", "v": "2020"},
  143. {"n": "2019", "v": "2019"},
  144. {"n": "2018", "v": "2018"}]}]}}
  145. return result
  146. def homeVideoContent(self):
  147. videos = []
  148. try:
  149. detail = requests.get(url=xurl, headers=headerx)
  150. detail.encoding = "utf-8"
  151. res = detail.text
  152. doc = BeautifulSoup(res, "lxml")
  153. soups = doc.find_all('ul', class_="clearfix")
  154. for soup in soups:
  155. vods = soup.find_all('a', class_="video-pic")
  156. for vod in vods:
  157. name = vod['title']
  158. id = vod['href']
  159. pic = vod['data-original']
  160. if 'http' not in pic:
  161. pic = xurl + pic
  162. remarks = vod.find('span', class_="note")
  163. remark = remarks.text.strip()
  164. video = {
  165. "vod_id": id,
  166. "vod_name": '丢丢📽️' + name,
  167. "vod_pic": pic,
  168. "vod_remarks": '丢丢▶️' + remark
  169. }
  170. videos.append(video)
  171. result = {'list': videos}
  172. return result
  173. except:
  174. pass
  175. def categoryContent(self, cid, pg, filter, ext):
  176. result = {}
  177. if pg:
  178. page = int(pg)
  179. else:
  180. page = 1
  181. page = int(pg)
  182. videos = []
  183. if '年代' in ext.keys():
  184. NdType = ext['年代']
  185. else:
  186. NdType = ''
  187. if page == '1':
  188. url = f'{xurl}/list/{cid}___2024__.html'
  189. else:
  190. url = f'{xurl}/list/{cid}___{NdType}___{str(page)}.html'
  191. try:
  192. detail = requests.get(url=url, headers=headerx)
  193. detail.encoding = "utf-8"
  194. res = detail.text
  195. doc = BeautifulSoup(res, "lxml")
  196. soups = doc.find_all('ul', class_="clearfix")
  197. for soup in soups:
  198. vods = soup.find_all('a', class_="video-pic")
  199. for vod in vods:
  200. name = vod['title']
  201. id = vod['href']
  202. pic = vod['data-original']
  203. if 'http' not in pic:
  204. pic = xurl + pic
  205. remarks = vod.find('span', class_="note")
  206. remark = remarks.text.strip()
  207. video = {
  208. "vod_id": id,
  209. "vod_name": '丢丢📽️' + name,
  210. "vod_pic": pic,
  211. "vod_remarks": '丢丢▶️' + remark
  212. }
  213. videos.append(video)
  214. except:
  215. pass
  216. result = {'list': videos}
  217. result['page'] = pg
  218. result['pagecount'] = 9999
  219. result['limit'] = 90
  220. result['total'] = 999999
  221. return result
  222. def detailContent(self, ids):
  223. global pm
  224. did = ids[0]
  225. result = {}
  226. videos = []
  227. playurl = ''
  228. if 'http' not in did:
  229. did = xurl + did
  230. res1 = requests.get(url=did, headers=headerx)
  231. res1.encoding = "utf-8"
  232. res = res1.text
  233. content = '😸丢丢🎉为您介绍剧情📢本资源来源于网络🚓侵权请联系删除👉' + self.extract_middle_text(res,'details-content-all collapse">','</span>', 0)
  234. content = content.replace('&lt;p&gt;', '').replace('&lt;br/&gt;', '').replace(' ', '').replace('&lt;/p&gt', '').replace('\u3000', '')
  235. xianlu = self.extract_middle_text(res, '<ul class="nav nav-tabs hidden-xs"','</ul>',2, 'data-toggle=".*?">(.*?)</a>')
  236. bofang = self.extract_middle_text(res, '<ul class="clearfix fade in active"', '</ul>', 3,'href="(.*?)">(.*?)<')
  237. videos.append({
  238. "vod_id": did,
  239. "vod_actor": '😸皮皮 😸灰灰',
  240. "vod_director": '😸丢丢',
  241. "vod_content": content,
  242. "vod_play_from": xianlu,
  243. "vod_play_url": bofang
  244. })
  245. result['list'] = videos
  246. return result
  247. def playerContent(self, flag, id, vipFlags):
  248. parts = id.split("http")
  249. xiutan = 0
  250. if xiutan == 0:
  251. if len(parts) > 1:
  252. before_https, after_https = parts[0], 'http' + parts[1]
  253. res = requests.get(url=after_https, headers=headerx)
  254. res = res.text
  255. url = self.extract_middle_text(res, '","url":"', '"', 0).replace('\\', '')
  256. result = {}
  257. result["parse"] = xiutan
  258. result["playUrl"] = ''
  259. result["url"] = url
  260. result["header"] = headerx
  261. return result
  262. def searchContentPage(self, key, quick, page):
  263. result = {}
  264. videos = []
  265. if not page:
  266. page = '1'
  267. if page == '1':
  268. url = f'{xurl}/search/{key}-1.html'
  269. else:
  270. url = f'{xurl}/search/{key}-{str(page)}.html'
  271. detail = requests.get(url=url, headers=headerx)
  272. detail.encoding = "utf-8"
  273. res = detail.text
  274. doc = BeautifulSoup(res, "lxml")
  275. soups = doc.find_all('div', class_="details-info-min")
  276. for vod in soups:
  277. names = vod.find('a', class_="video-pic")
  278. name = names['title']
  279. ids = vod.find('a', class_="video-pic")
  280. id = ids['href']
  281. pics = vod.find('a', class_="video-pic")
  282. pic = pics['data-original']
  283. remark = self.extract_middle_text(str(vod), '状态:</span>', '</li>', 0)
  284. video = {
  285. "vod_id": id,
  286. "vod_name": '丢丢📽️' + name,
  287. "vod_pic": pic,
  288. "vod_remarks": '丢丢▶️' + remark
  289. }
  290. videos.append(video)
  291. result['list'] = videos
  292. result['page'] = page
  293. result['pagecount'] = 9999
  294. result['limit'] = 90
  295. result['total'] = 999999
  296. return result
  297. def searchContent(self, key, quick):
  298. return self.searchContentPage(key, quick, '1')
  299. def localProxy(self, params):
  300. if params['type'] == "m3u8":
  301. return self.proxyM3u8(params)
  302. elif params['type'] == "media":
  303. return self.proxyMedia(params)
  304. elif params['type'] == "ts":
  305. return self.proxyTs(params)
  306. return None