update.py 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # File : update.py
  4. # Author: DaShenHan&道长-----先苦后甜,任凭晚风拂柳颜------
  5. # Date : 2022/9/6
  6. import re
  7. from time import time as getTime
  8. import sys
  9. import requests
  10. import os
  11. import zipfile
  12. import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581
  13. from utils.log import logger
  14. from utils.web import get_interval
  15. from utils.htmlParser import jsoup
  16. import ujson
  17. headers = {
  18. 'Referer': 'https://gitcode.net/',
  19. 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
  20. }
  21. def getHotSuggest1(url='http://4g.v.sogou.com/hotsugg',size=0):
  22. jsp = jsoup(url)
  23. pdfh = jsp.pdfh
  24. pdfa = jsp.pdfa
  25. pd = jsp.pd
  26. try:
  27. r = requests.get(url,headers=headers,timeout=2)
  28. html = r.text
  29. data = pdfa(html,'ul.hot-list&&li')
  30. suggs = [{'title':pdfh(dt,'a&&Text'),'url':pd(dt,'a&&href')} for dt in data]
  31. # print(html)
  32. # print(suggs)
  33. return suggs
  34. except:
  35. return []
  36. def getHotSuggest2(url='https://pbaccess.video.qq.com/trpc.videosearch.hot_rank.HotRankServantHttp/HotRankHttp',size=0):
  37. size = int(size) if size else 50
  38. pdata = ujson.dumps({"pageNum":0,"pageSize":size})
  39. try:
  40. r = requests.post(url,headers={'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36', 'content-type': 'application/json'},data=pdata,timeout=2)
  41. html = r.json()
  42. # print(html)
  43. data = html['data']['navItemList'][0]['hotRankResult']['rankItemList']
  44. suggs = [{'title':dt['title'],'url':dt['url']} for dt in data]
  45. # print(html)
  46. # print(suggs)
  47. return suggs
  48. except:
  49. return []
  50. def getHotSuggest(s_from,size):
  51. if s_from == 'sougou':
  52. return getHotSuggest1(size=size)
  53. else:
  54. return getHotSuggest2(size=size)
  55. def getLocalVer():
  56. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  57. version_path = os.path.join(base_path, f'js/version.txt')
  58. if not os.path.exists(version_path):
  59. with open(version_path,mode='w+',encoding='utf-8') as f:
  60. version = '1.0.0'
  61. f.write(version)
  62. else:
  63. with open(version_path,encoding='utf-8') as f:
  64. version = f.read()
  65. return version
  66. def getOnlineVer():
  67. ver = '1.0.1'
  68. msg = ''
  69. try:
  70. # r = requests.get('https://gitcode.net/qq_32394351/dr_py/-/raw/master/js/version.txt',timeout=(2,2))
  71. # r = requests.get('https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/raw/master/js/version.txt',timeout=(2,2))
  72. r = requests.get('https://ghproxy.net/https://raw.githubusercontent.com/hjdhnx/dr_py/main/js/version.txt',timeout=(2,2))
  73. ver = r.text
  74. except Exception as e:
  75. # print(f'{e}')
  76. msg = f'{e}'
  77. logger.info(msg)
  78. return ver,msg
  79. def checkUpdate():
  80. local_ver = getLocalVer()
  81. online_ver,msg = getOnlineVer()
  82. if local_ver != online_ver:
  83. return True
  84. return False
  85. def del_file(filepath):
  86. """
  87. 删除execl目录下的所有文件或文件夹
  88. :param filepath: 路径
  89. :return:
  90. """
  91. del_list = os.listdir(filepath)
  92. for f in del_list:
  93. file_path = os.path.join(filepath, f)
  94. if os.path.isfile(file_path):
  95. os.remove(file_path)
  96. def copytree(src, dst, ignore=None):
  97. if ignore is None:
  98. ignore = []
  99. dirs = os.listdir(src) # 获取目录下的所有文件包括文件夹
  100. logger.info(f'{dirs}')
  101. for dir in dirs: # 遍历文件或文件夹
  102. from_dir = os.path.join(src, dir) # 将要复制的文件夹或文件路径
  103. to_dir = os.path.join(dst, dir) # 将要复制到的文件夹或文件路径
  104. if os.path.isdir(from_dir): # 判断是否为文件夹
  105. if not os.path.exists(to_dir): # 判断目标文件夹是否存在,不存在则创建
  106. os.mkdir(to_dir)
  107. copytree(from_dir, to_dir,ignore) # 迭代 遍历子文件夹并复制文件
  108. elif os.path.isfile(from_dir): # 如果为文件,则直接复制文件
  109. if ignore:
  110. regxp = '|'.join(ignore).replace('\\','/') # 组装正则
  111. to_dir_str = str(to_dir).replace('\\','/')
  112. if not re.search(rf'{regxp}', to_dir_str, re.M):
  113. shutil.copy(from_dir, to_dir) # 复制文件
  114. else:
  115. shutil.copy(from_dir, to_dir) # 复制文件
  116. def force_copy_files(from_path, to_path, exclude_files=None):
  117. # print(f'开始拷贝文件{from_path}=>{to_path}')
  118. if exclude_files is None:
  119. exclude_files = []
  120. logger.info(f'开始拷贝文件{from_path}=>{to_path}')
  121. if not os.path.exists(to_path):
  122. os.makedirs(to_path,exist_ok=True)
  123. try:
  124. if sys.version_info < (3, 8):
  125. copytree(from_path, to_path,exclude_files)
  126. else:
  127. if len(exclude_files) > 0:
  128. shutil.copytree(from_path, to_path, dirs_exist_ok=True,ignore=shutil.ignore_patterns(*exclude_files))
  129. else:
  130. shutil.copytree(from_path, to_path, dirs_exist_ok=True)
  131. except Exception as e:
  132. logger.info(f'拷贝文件{from_path}=>{to_path}发生错误:{e}')
  133. def copy_to_update():
  134. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  135. tmp_path = os.path.join(base_path, f'tmp')
  136. dr_path = os.path.join(tmp_path, f'dr_py-main')
  137. if not os.path.exists(dr_path):
  138. # print(f'升级失败,找不到目录{dr_path}')
  139. logger.info(f'升级失败,找不到目录{dr_path}')
  140. return False
  141. # 千万不能覆盖super,base
  142. paths = ['js','models','controllers','libs','static','templates','utils','txt','jiexi','py','whl','doc']
  143. exclude_files = ['txt/pycms0.json','txt/pycms1.json','txt/pycms2.json','base/rules.db']
  144. for path in paths:
  145. force_copy_files(os.path.join(dr_path, path),os.path.join(base_path, path),exclude_files)
  146. try:
  147. shutil.copy(os.path.join(dr_path, 'app.py'), os.path.join(base_path, 'app.py')) # 复制文件
  148. except Exception as e:
  149. logger.info(f'更新app.py发生错误:{e}')
  150. logger.info(f'升级程序执行完毕,全部文件已拷贝覆盖')
  151. return True
  152. def download_new_version():
  153. t1 = getTime()
  154. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  155. tmp_path = os.path.join(base_path, f'tmp')
  156. os.makedirs(tmp_path,exist_ok=True)
  157. # url = 'https://gitcode.net/qq_32394351/dr_py/-/archive/master/dr_py-master.zip'
  158. # url = 'https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/archive/master.zip'
  159. url = 'https://ghproxy.net/https://github.com/hjdhnx/dr_py/archive/refs/heads/main.zip'
  160. # tmp_files = os.listdir(tmp_path)
  161. # for tp in tmp_files:
  162. # print(f'清除缓存文件:{tp}')
  163. # os.remove(os.path.join(tmp_path, tp))
  164. del_file(tmp_path)
  165. msg = ''
  166. try:
  167. # print(f'开始下载:{url}')
  168. logger.info(f'开始下载:{url}')
  169. r = requests.get(url,headers=headers,timeout=(20,20))
  170. rb = r.content
  171. download_path = os.path.join(tmp_path, 'dr_py.zip')
  172. with open(download_path,mode='wb+') as f:
  173. f.write(rb)
  174. # print(f'开始解压文件:{download_path}')
  175. logger.info(f'开始解压文件:{download_path}')
  176. f = zipfile.ZipFile(download_path, 'r') # 压缩文件位置
  177. for file in f.namelist():
  178. f.extract(file, tmp_path) # 解压位置
  179. f.close()
  180. # print('解压完毕,开始升级')
  181. logger.info('解压完毕,开始升级')
  182. ret = copy_to_update()
  183. logger.info(f'升级完毕,结果为:{ret}')
  184. # print(f'升级完毕,结果为:{ret}')
  185. msg = '升级成功'
  186. except Exception as e:
  187. msg = f'升级失败:{e}'
  188. logger.info(f'系统升级共计耗时:{get_interval(t1)}毫秒')
  189. return msg
  190. def download_lives(live_url:str):
  191. t1 = getTime()
  192. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  193. live_path = os.path.join(base_path, f'base/直播.txt')
  194. logger.info(f'尝试同步{live_url}远程内容到{live_path}')
  195. try:
  196. r = requests.get(live_url,headers=headers,timeout=3)
  197. auto_encoding = r.apparent_encoding
  198. if auto_encoding.lower() in ['utf-8','gbk','bg2312','gb18030']:
  199. r.encoding = auto_encoding
  200. # print(r.encoding)
  201. html = r.text
  202. # print(len(html))
  203. if re.search('cctv|.m3u8',html,re.M|re.I) and len(html) > 1000:
  204. logger.info(f'直播源同步成功,耗时{get_interval(t1)}毫秒')
  205. with open(live_path,mode='w+',encoding='utf-8') as f:
  206. f.write(html)
  207. return True
  208. else:
  209. logger.info(f'直播源同步失败,远程文件看起来不是直播源。耗时{get_interval(t1)}毫秒')
  210. return False
  211. except Exception as e:
  212. logger.info(f'直播源同步失败,耗时{get_interval(t1)}毫秒\n{e}')
  213. return False