hnitv.py 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. import time
  2. import os
  3. import concurrent.futures
  4. from selenium import webdriver
  5. from selenium.webdriver.chrome.options import Options
  6. import requests
  7. import re
  8. urls = [
  9. "https://fofa.info/result?qbase64=ImlwdHYvbGl2ZS96aF9jbi5qcyIgJiYgY291bnRyeT0iQ04iICYmIHJlZ2lvbj0i5rKz5Y2XIg%3D%3D", # 河南
  10. "https://fofa.info/result?qbase64=ImlwdHYvbGl2ZS96aF9jbi5qcyIgJiYgY291bnRyeT0iQ04iICYmIGNpdHk9Inh1Y2hhbmci", # 河南xc
  11. "https://fofa.info/result?qbase64=ImlwdHYvbGl2ZS96aF9jbi5qcyIgJiYgY291bnRyeT0iQ04iICYmIGNpdHk9InpoZW5nemhvdSI%3D", # 河南zz
  12. "https://fofa.info/result?qbase64=ImlwdHYvbGl2ZS96aF9jbi5qcyIgJiYgY291bnRyeT0iQ04iICYmIGNpdHk9ImthaWZlbmci", # 河南kf
  13. "https://fofa.info/result?qbase64=ImlwdHYvbGl2ZS96aF9jbi5qcyIgJiYgY291bnRyeT0iQ04iICYmIGNpdHk9Imx1b3lhbmci", # 河南ly
  14. "https://www.zoomeye.org/searchResult?q=%2Fiptv%2Flive%2Fzh_cn.js%20%2Bcountry%3A%22CN%22%20%2Bsubdivisions%3A%22henan%22", #河南
  15. "https://www.zoomeye.org/searchResult?q=%2Fiptv%2Flive%2Fzh_cn.js%20%2Bcountry%3A%22CN%22%20%2Bcity%3A%22xuchang%22", #河南xc
  16. "https://www.zoomeye.org/searchResult?q=%2Fiptv%2Flive%2Fzh_cn.js%20%2Bcountry%3A%22CN%22%20%2Bcity%3A%22zhengzhou%22", #河南zz
  17. "https://www.zoomeye.org/searchResult?q=%2Fiptv%2Flive%2Fzh_cn.js%20%2Bcountry%3A%22CN%22%20%2Bcity%3A%22kaifeng%22", #河南kf
  18. "https://www.zoomeye.org/searchResult?q=%2Fiptv%2Flive%2Fzh_cn.js%20%2Bcountry%3A%22CN%22%20%2Bcity%3A%22luoyang%22", #河南ly
  19. ]
  20. def modify_urls(url):
  21. modified_urls = []
  22. ip_start_index = url.find("//") + 2
  23. ip_end_index = url.find(":", ip_start_index)
  24. base_url = url[:ip_start_index] # http:// or https://
  25. ip_address = url[ip_start_index:ip_end_index]
  26. port = url[ip_end_index:]
  27. ip_end = "/iptv/live/1000.json?key=txiptv"
  28. for i in range(1, 256):
  29. modified_ip = f"{ip_address[:-1]}{i}"
  30. modified_url = f"{base_url}{modified_ip}{port}{ip_end}"
  31. modified_urls.append(modified_url)
  32. return modified_urls
  33. def is_url_accessible(url):
  34. try:
  35. response = requests.get(url, timeout=0.5)
  36. if response.status_code == 200:
  37. return url
  38. except requests.exceptions.RequestException:
  39. pass
  40. return None
  41. results = []
  42. for url in urls:
  43. # 创建一个Chrome WebDriver实例
  44. chrome_options = Options()
  45. chrome_options.add_argument('--headless')
  46. chrome_options.add_argument('--no-sandbox')
  47. chrome_options.add_argument('--disable-dev-shm-usage')
  48. driver = webdriver.Chrome(options=chrome_options)
  49. # 使用WebDriver访问网页
  50. driver.get(url) # 将网址替换为你要访问的网页地址
  51. time.sleep(10)
  52. # 获取网页内容
  53. page_content = driver.page_source
  54. # 关闭WebDriver
  55. driver.quit()
  56. # 查找所有符合指定格式的网址
  57. pattern = r"http://\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d+" # 设置匹配的格式,如http://8.8.8.8:8888
  58. urls_all = re.findall(pattern, page_content)
  59. # urls = list(set(urls_all)) # 去重得到唯一的URL列表
  60. urls = set(urls_all) # 去重得到唯一的URL列表
  61. x_urls = []
  62. for url in urls: # 对urls进行处理,ip第四位修改为1,并去重
  63. url = url.strip()
  64. ip_start_index = url.find("//") + 2
  65. ip_end_index = url.find(":", ip_start_index)
  66. ip_dot_start = url.find(".") + 1
  67. ip_dot_second = url.find(".", ip_dot_start) + 1
  68. ip_dot_three = url.find(".", ip_dot_second) + 1
  69. base_url = url[:ip_start_index] # http:// or https://
  70. ip_address = url[ip_start_index:ip_dot_three]
  71. port = url[ip_end_index:]
  72. ip_end = "1"
  73. modified_ip = f"{ip_address}{ip_end}"
  74. x_url = f"{base_url}{modified_ip}{port}"
  75. x_urls.append(x_url)
  76. urls = set(x_urls) # 去重得到唯一的URL列表
  77. valid_urls = []
  78. # 多线程获取可用url
  79. with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:
  80. futures = []
  81. for url in urls:
  82. url = url.strip()
  83. modified_urls = modify_urls(url)
  84. for modified_url in modified_urls:
  85. futures.append(executor.submit(is_url_accessible, modified_url))
  86. for future in concurrent.futures.as_completed(futures):
  87. result = future.result()
  88. if result:
  89. valid_urls.append(result)
  90. for url in valid_urls:
  91. print(url)
  92. # 遍历网址列表,获取JSON文件并解析
  93. for url in valid_urls:
  94. try:
  95. # 发送GET请求获取JSON文件,设置超时时间为0.5秒
  96. ip_start_index = url.find("//") + 2
  97. ip_dot_start = url.find(".") + 1
  98. ip_index_second = url.find("/", ip_dot_start)
  99. base_url = url[:ip_start_index] # http:// or https://
  100. ip_address = url[ip_start_index:ip_index_second]
  101. url_x = f"{base_url}{ip_address}"
  102. json_url = f"{url}"
  103. response = requests.get(json_url, timeout=0.5)
  104. json_data = response.json()
  105. try:
  106. # 解析JSON文件,获取name和url字段
  107. for item in json_data['data']:
  108. if isinstance(item, dict):
  109. name = item.get('name')
  110. urlx = item.get('url')
  111. if ',' in urlx:
  112. urlx=f"aaaaaaaa"
  113. #if 'http' in urlx or 'udp' in urlx or 'rtp' in urlx:
  114. if 'http' in urlx:
  115. urld = f"{urlx}"
  116. else:
  117. urld = f"{url_x}{urlx}"
  118. if name and urld:
  119. # 删除特定文字
  120. name = name.replace("cctv", "CCTV")
  121. name = name.replace("中央", "CCTV")
  122. name = name.replace("央视", "CCTV")
  123. name = name.replace("高清", "")
  124. name = name.replace("超高", "")
  125. name = name.replace("HD", "")
  126. name = name.replace("标清", "")
  127. name = name.replace("频道", "")
  128. name = name.replace("-", "")
  129. name = name.replace(" ", "")
  130. name = name.replace("PLUS", "+")
  131. name = name.replace("+", "+")
  132. name = name.replace("(", "")
  133. name = name.replace(")", "")
  134. name = re.sub(r"CCTV(\d+)台", r"CCTV\1", name)
  135. name = name.replace("CCTV1综合", "CCTV1")
  136. name = name.replace("CCTV2财经", "CCTV2")
  137. name = name.replace("CCTV3综艺", "CCTV3")
  138. name = name.replace("CCTV4国际", "CCTV4")
  139. name = name.replace("CCTV4中文国际", "CCTV4")
  140. name = name.replace("CCTV4欧洲", "CCTV4")
  141. name = name.replace("CCTV5体育", "CCTV5")
  142. name = name.replace("CCTV6电影", "CCTV6")
  143. name = name.replace("CCTV7军事", "CCTV7")
  144. name = name.replace("CCTV7军农", "CCTV7")
  145. name = name.replace("CCTV7农业", "CCTV7")
  146. name = name.replace("CCTV7国防军事", "CCTV7")
  147. name = name.replace("CCTV8电视剧", "CCTV8")
  148. name = name.replace("CCTV9记录", "CCTV9")
  149. name = name.replace("CCTV9纪录", "CCTV9")
  150. name = name.replace("CCTV10科教", "CCTV10")
  151. name = name.replace("CCTV11戏曲", "CCTV11")
  152. name = name.replace("CCTV12社会与法", "CCTV12")
  153. name = name.replace("CCTV13新闻", "CCTV13")
  154. name = name.replace("CCTV新闻", "CCTV13")
  155. name = name.replace("CCTV14少儿", "CCTV14")
  156. name = name.replace("CCTV15音乐", "CCTV15")
  157. name = name.replace("CCTV16奥林匹克", "CCTV16")
  158. name = name.replace("CCTV17农业农村", "CCTV17")
  159. name = name.replace("CCTV17农业", "CCTV17")
  160. name = name.replace("CCTV5+体育赛视", "CCTV5+")
  161. name = name.replace("CCTV5+体育赛事", "CCTV5+")
  162. name = name.replace("CCTV5+体育", "CCTV5+")
  163. name = name.replace("CCTV17军事", "CCTV17")
  164. name = name.replace("CCTV17农村", "CCTV17")
  165. name = name.replace("梨园", "河南梨园")
  166. name = name.replace("河南河南梨园", "河南梨园")
  167. name = name.replace("河南法制", "河南法治")
  168. name = name.replace("法制", "河南法治")
  169. name = name.replace("新闻", "新闻")
  170. name = name.replace("都市", "都市")
  171. name = name.replace("公共", "公共")
  172. name = name.replace("民生", "民生")
  173. if 'udp' not in urld or 'rtp' not in urld:
  174. results.append(f"{name},{urld}")
  175. except:
  176. continue
  177. except:
  178. continue
  179. results = set(results) # 去重得到唯一的URL列表
  180. results = sorted(results)
  181. with open("hnitv.txt", 'w', encoding='utf-8') as file:
  182. for result in results:
  183. file.write(result + "\n")
  184. print(result)