奈飞影视.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. """
  2. 作者 凯悦宾馆 🚓 内容均从互联网收集而来 仅供交流学习使用 版权归原创者所有 如侵犯了您的权益 请通知作者 将及时删除侵权内容
  3. ====================kaiyuebinguan====================
  4. """
  5. import requests
  6. from bs4 import BeautifulSoup
  7. import re
  8. from base.spider import Spider
  9. import sys
  10. import json
  11. import base64
  12. import urllib.parse
  13. sys.path.append('..')
  14. xurl = "https://www.netfly.tv"
  15. headerx = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '
  16. 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36'}
  17. pm = ''
  18. class Spider(Spider):
  19. global xurl
  20. global headerx
  21. def getName(self):
  22. return "首页"
  23. def init(self, extend):
  24. pass
  25. def isVideoFormat(self, url):
  26. pass
  27. def manualVideoCheck(self):
  28. pass
  29. def extract_middle_text(self, text, start_str, end_str, pl, start_index1: str = '', end_index2: str = ''):
  30. if pl == 3:
  31. plx = []
  32. while True:
  33. start_index = text.find(start_str)
  34. if start_index == -1:
  35. break
  36. end_index = text.find(end_str, start_index + len(start_str))
  37. if end_index == -1:
  38. break
  39. middle_text = text[start_index + len(start_str):end_index]
  40. plx.append(middle_text)
  41. text = text.replace(start_str + middle_text + end_str, '')
  42. if len(plx) > 0:
  43. purl = ''
  44. for i in range(len(plx)):
  45. matches = re.findall(start_index1, plx[i])
  46. output = ""
  47. for match in matches:
  48. match3 = re.search(r'(?:^|[^0-9])(\d+)(?:[^0-9]|$)', match[1])
  49. if match3:
  50. number = match3.group(1)
  51. else:
  52. number = 0
  53. if 'http' not in match[0]:
  54. output += f"#{'📽️丢丢👉' + match[1]}${number}{xurl}{match[0]}"
  55. else:
  56. output += f"#{'📽️丢丢👉' + match[1]}${number}{match[0]}"
  57. output = output[1:]
  58. purl = purl + output + "$$$"
  59. purl = purl[:-3]
  60. return purl
  61. else:
  62. return ""
  63. else:
  64. start_index = text.find(start_str)
  65. if start_index == -1:
  66. return ""
  67. end_index = text.find(end_str, start_index + len(start_str))
  68. if end_index == -1:
  69. return ""
  70. if pl == 0:
  71. middle_text = text[start_index + len(start_str):end_index]
  72. return middle_text.replace("\\", "")
  73. if pl == 1:
  74. middle_text = text[start_index + len(start_str):end_index]
  75. matches = re.findall(start_index1, middle_text)
  76. if matches:
  77. jg = ' '.join(matches)
  78. return jg
  79. if pl == 2:
  80. middle_text = text[start_index + len(start_str):end_index]
  81. matches = re.findall(start_index1, middle_text)
  82. if matches:
  83. new_list = [f'✨丢丢👉{item}' for item in matches]
  84. jg = '$$$'.join(new_list)
  85. return jg
  86. def homeContent(self, filter):
  87. result = {}
  88. result = {"class": [{"type_id": "/vod/show/1", "type_name": "丢丢电影🌠"},
  89. {"type_id": "/vod/show/2", "type_name": "丢丢剧集🌠"},
  90. {"type_id": "/vod/show/3", "type_name": "丢丢综艺🌠"},
  91. {"type_id": "/vod/show/4", "type_name": "丢丢动漫🌠"},
  92. {"type_id": "/vod/show/5", "type_name": "丢丢其他🌠"}],
  93. "list": [],
  94. "filters": {"/vod/show/1": [{"key": "年代",
  95. "name": "年代",
  96. "value": [{"n": "全部", "v": ""},
  97. {"n": "2024", "v": "2024"},
  98. {"n": "2023", "v": "2023"},
  99. {"n": "2022", "v": "2022"},
  100. {"n": "2021", "v": "2021"},
  101. {"n": "2020", "v": "2020"},
  102. {"n": "2019", "v": "2019"},
  103. {"n": "2018", "v": "2018"}]}],
  104. "/vod/show/2": [{"key": "年代",
  105. "name": "年代",
  106. "value": [{"n": "全部", "v": ""},
  107. {"n": "2024", "v": "2024"},
  108. {"n": "2023", "v": "2023"},
  109. {"n": "2022", "v": "2022"},
  110. {"n": "2021", "v": "2021"},
  111. {"n": "2020", "v": "2020"},
  112. {"n": "2019", "v": "2019"},
  113. {"n": "2018", "v": "2018"}]}],
  114. "/vod/show/3": [{"key": "年代",
  115. "name": "年代",
  116. "value": [{"n": "全部", "v": ""},
  117. {"n": "2024", "v": "2024"},
  118. {"n": "2023", "v": "2023"},
  119. {"n": "2022", "v": "2022"},
  120. {"n": "2021", "v": "2021"},
  121. {"n": "2020", "v": "2020"},
  122. {"n": "2019", "v": "2019"},
  123. {"n": "2018", "v": "2018"}]}],
  124. "/vod/show/4": [{"key": "年代",
  125. "name": "年代",
  126. "value": [{"n": "全部", "v": ""},
  127. {"n": "2024", "v": "2024"},
  128. {"n": "2023", "v": "2023"},
  129. {"n": "2022", "v": "2022"},
  130. {"n": "2021", "v": "2021"},
  131. {"n": "2020", "v": "2020"},
  132. {"n": "2019", "v": "2019"},
  133. {"n": "2018", "v": "2018"}]}],
  134. "/vod/show/5": [{"key": "年代",
  135. "name": "年代",
  136. "value": [{"n": "全部", "v": ""},
  137. {"n": "2024", "v": "2024"},
  138. {"n": "2023", "v": "2023"},
  139. {"n": "2022", "v": "2022"},
  140. {"n": "2021", "v": "2021"},
  141. {"n": "2020", "v": "2020"},
  142. {"n": "2019", "v": "2019"},
  143. {"n": "2018", "v": "2018"}]}]}}
  144. return result
  145. def homeVideoContent(self):
  146. videos = []
  147. try:
  148. detail = requests.get(url=xurl, headers=headerx)
  149. detail.encoding = "utf-8"
  150. res = detail.text
  151. res = self.extract_middle_text(res, '<div class="module">', '专题片单', 0)
  152. doc = BeautifulSoup(res, "lxml")
  153. soups = doc.find_all('div', class_="module-items")
  154. for soup in soups:
  155. vods = soup.find_all('a')
  156. for vod in vods:
  157. name = vod['title']
  158. id = vod['href']
  159. pics = vod.find('div', class_="module-item-pic")
  160. pic = pics.find('img')['data-original']
  161. if 'http' not in pic:
  162. pic = xurl + pic
  163. remark = self.extract_middle_text(str(vod), 'module-item-note">', '</div>', 0)
  164. video = {
  165. "vod_id": id,
  166. "vod_name": '丢丢📽️' + name,
  167. "vod_pic": pic,
  168. "vod_remarks": '丢丢▶️' + remark
  169. }
  170. videos.append(video)
  171. result = {'list': videos}
  172. return result
  173. except:
  174. pass
  175. def categoryContent(self, cid, pg, filter, ext):
  176. result = {}
  177. if pg:
  178. page = int(pg)
  179. else:
  180. page = 1
  181. page = int(pg)
  182. videos = []
  183. if '年代' in ext.keys():
  184. NdType = ext['年代']
  185. else:
  186. NdType = ''
  187. if page == '1':
  188. url = f'{xurl}{cid}-----------.html'
  189. else:
  190. url = f'{xurl}{cid}--------{str(page)}---{NdType}.html'
  191. try:
  192. detail = requests.get(url=url, headers=headerx)
  193. detail.encoding = "utf-8"
  194. res = detail.text
  195. res = self.extract_middle_text(res, '按评分排序', '<div id="page">', 0)
  196. doc = BeautifulSoup(res, "lxml")
  197. for soup in doc:
  198. vods = soup.find_all('a')
  199. for vod in vods:
  200. name = vod['title']
  201. id = vod['href']
  202. pics = vod.find('div', class_="module-item-pic")
  203. pic = pics.find('img')['data-original']
  204. if 'http' not in pic:
  205. pic = xurl + pic
  206. remark = self.extract_middle_text(str(vod), 'module-item-note">', '</div>', 0)
  207. video = {
  208. "vod_id": id,
  209. "vod_name": '丢丢📽️' + name,
  210. "vod_pic": pic,
  211. "vod_remarks": '丢丢▶️' + remark
  212. }
  213. videos.append(video)
  214. except:
  215. pass
  216. result = {'list': videos}
  217. result['page'] = pg
  218. result['pagecount'] = 9999
  219. result['limit'] = 90
  220. result['total'] = 999999
  221. return result
  222. def detailContent(self, ids):
  223. global pm
  224. did = ids[0]
  225. result = {}
  226. videos = []
  227. playurl = ''
  228. if 'http' not in did:
  229. did = xurl + did
  230. res1 = requests.get(url=did, headers=headerx)
  231. res1.encoding = "utf-8"
  232. res = res1.text
  233. content = '😸丢丢🎉为您介绍剧情📢本资源来源于网络🚓侵权请联系删除👉' + self.extract_middle_text(res,'20px;">','</p>', 0)
  234. content = content.replace('\u3000', '').replace(' ', '').replace('<p>', '').replace('\n', '')
  235. xianlu = self.extract_middle_text(res, '<div class="module-tab-items-box hisSwiper"','<div class="shortcuts-mobile-overlay">',2, 'data-dropdown-value=".*?"><span>(.*?)</span>')
  236. bofang = self.extract_middle_text(res, '<div class="module-play-list-content', '</div>', 3,'href="(.*?)" title=".*?"><span>(.*?)</span>')
  237. videos.append({
  238. "vod_id": did,
  239. "vod_actor": '😸皮皮 😸灰灰',
  240. "vod_director": '😸丢丢',
  241. "vod_content": content,
  242. "vod_play_from": xianlu,
  243. "vod_play_url": bofang
  244. })
  245. result['list'] = videos
  246. return result
  247. def playerContent(self, flag, id, vipFlags):
  248. parts = id.split("http")
  249. xiutan = 0
  250. if xiutan == 0:
  251. if len(parts) > 1:
  252. before_https, after_https = parts[0], 'http' + parts[1]
  253. res = requests.get(url=after_https, headers=headerx)
  254. url = self.extract_middle_text(res.text, '},"url":"', '"', 0).replace('\\', '')
  255. result = {}
  256. result["parse"] = xiutan
  257. result["playUrl"] = ''
  258. result["url"] = url
  259. result["header"] = headerx
  260. return result
  261. def searchContentPage(self, key, quick, page):
  262. result = {}
  263. videos = []
  264. if not page:
  265. page = '1'
  266. if page == '1':
  267. url = f'{xurl}/vod/search/-------------.html?wd={key}'
  268. else:
  269. url = f'{xurl}/vod/search/{key}----------{str(page)}---.html'
  270. detail = requests.get(url=url, headers=headerx)
  271. detail.encoding = "utf-8"
  272. res = detail.text
  273. doc = BeautifulSoup(res, "lxml")
  274. soups = doc.find_all('div', class_="module-card-item")
  275. for vod in soups:
  276. names = vod.find('div', class_="module-item-pic")
  277. name = names.find('img')['alt']
  278. ids = vod.find('div', class_="module-card-item-title")
  279. id = ids.find('a')['href']
  280. pics = vod.find('div', class_="module-item-pic")
  281. pic = pics.find('img')['data-original']
  282. if 'http' not in pic:
  283. pic = xurl + pic
  284. remark = self.extract_middle_text(str(vod), 'module-item-note">', '</div>', 0)
  285. video = {
  286. "vod_id": id,
  287. "vod_name": '丢丢📽️' + name,
  288. "vod_pic": pic,
  289. "vod_remarks": '丢丢▶️' + remark
  290. }
  291. videos.append(video)
  292. result['list'] = videos
  293. result['page'] = page
  294. result['pagecount'] = 9999
  295. result['limit'] = 90
  296. result['total'] = 999999
  297. return result
  298. def searchContent(self, key, quick):
  299. return self.searchContentPage(key, quick, '1')
  300. def localProxy(self, params):
  301. if params['type'] == "m3u8":
  302. return self.proxyM3u8(params)
  303. elif params['type'] == "media":
  304. return self.proxyMedia(params)
  305. elif params['type'] == "ts":
  306. return self.proxyTs(params)
  307. return None