update_old.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. # File : update.py
  4. # Author: DaShenHan&道长-----先苦后甜,任凭晚风拂柳颜------
  5. # Date : 2022/9/6
  6. import re
  7. from time import time as getTime
  8. import sys
  9. import requests
  10. import os
  11. import zipfile
  12. import shutil # https://blog.csdn.net/weixin_33130113/article/details/112336581
  13. from utils.log import logger
  14. from utils.web import get_interval
  15. from utils.htmlParser import jsoup
  16. headers = {
  17. 'Referer': 'https://gitcode.net/',
  18. # 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36',
  19. 'X-T5-Auth': 'ZjQxNDIh',
  20. 'User-Agent': 'baiduboxapp',
  21. }
  22. proxies={"https":"http://cloudnproxy.baidu.com:443","http":"http://cloudnproxy.baidu.com:443"}
  23. def getHotSuggest(url='http://4g.v.sogou.com/hotsugg'):
  24. jsp = jsoup(url)
  25. pdfh = jsp.pdfh
  26. pdfa = jsp.pdfa
  27. pd = jsp.pd
  28. try:
  29. r = requests.get(url,headers=headers,timeout=2)
  30. html = r.text
  31. data = pdfa(html,'ul.hot-list&&li')
  32. suggs = [{'title':pdfh(dt,'a&&Text'),'url':pd(dt,'a&&href')} for dt in data]
  33. # print(html)
  34. # print(suggs)
  35. return suggs
  36. except:
  37. return []
  38. def getLocalVer():
  39. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  40. version_path = os.path.join(base_path, f'js/version.txt')
  41. if not os.path.exists(version_path):
  42. with open(version_path,mode='w+',encoding='utf-8') as f:
  43. version = '1.0.0'
  44. f.write(version)
  45. else:
  46. with open(version_path,encoding='utf-8') as f:
  47. version = f.read()
  48. return version
  49. def getOnlineVer():
  50. ver = '1.0.1'
  51. msg = ''
  52. try:
  53. # r = requests.get('https://gitcode.net/qq_32394351/dr_py/-/raw/master/js/version.txt',timeout=(2,2),proxies=proxies)
  54. r = requests.get('https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/raw/master/js/version.txt',
  55. timeout=(2, 2),proxies=proxies)
  56. ver = r.text
  57. except Exception as e:
  58. # print(f'{e}')
  59. msg = f'{e}'
  60. logger.info(msg)
  61. return ver,msg
  62. def checkUpdate():
  63. local_ver = getLocalVer()
  64. online_ver,msg = getOnlineVer()
  65. if local_ver != online_ver:
  66. return True
  67. return False
  68. def del_file(filepath):
  69. """
  70. 删除execl目录下的所有文件或文件夹
  71. :param filepath: 路径
  72. :return:
  73. """
  74. del_list = os.listdir(filepath)
  75. for f in del_list:
  76. file_path = os.path.join(filepath, f)
  77. if os.path.isfile(file_path):
  78. os.remove(file_path)
  79. def copytree(src, dst, ignore=None):
  80. if ignore is None:
  81. ignore = []
  82. dirs = os.listdir(src) # 获取目录下的所有文件包括文件夹
  83. logger.info(f'{dirs}')
  84. for dir in dirs: # 遍历文件或文件夹
  85. from_dir = os.path.join(src, dir) # 将要复制的文件夹或文件路径
  86. to_dir = os.path.join(dst, dir) # 将要复制到的文件夹或文件路径
  87. if os.path.isdir(from_dir): # 判断是否为文件夹
  88. if not os.path.exists(to_dir): # 判断目标文件夹是否存在,不存在则创建
  89. os.mkdir(to_dir)
  90. copytree(from_dir, to_dir,ignore) # 迭代 遍历子文件夹并复制文件
  91. elif os.path.isfile(from_dir): # 如果为文件,则直接复制文件
  92. if ignore:
  93. regxp = '|'.join(ignore).replace('\\','/') # 组装正则
  94. to_dir_str = str(to_dir).replace('\\','/')
  95. if not re.search(rf'{regxp}', to_dir_str, re.M):
  96. shutil.copy(from_dir, to_dir) # 复制文件
  97. else:
  98. shutil.copy(from_dir, to_dir) # 复制文件
  99. def force_copy_files(from_path, to_path, exclude_files=None):
  100. # print(f'开始拷贝文件{from_path}=>{to_path}')
  101. if exclude_files is None:
  102. exclude_files = []
  103. logger.info(f'开始拷贝文件{from_path}=>{to_path}')
  104. if not os.path.exists(to_path):
  105. os.makedirs(to_path,exist_ok=True)
  106. try:
  107. if sys.version_info < (3, 8):
  108. copytree(from_path, to_path,exclude_files)
  109. else:
  110. if len(exclude_files) > 0:
  111. shutil.copytree(from_path, to_path, dirs_exist_ok=True,ignore=shutil.ignore_patterns(*exclude_files))
  112. else:
  113. shutil.copytree(from_path, to_path, dirs_exist_ok=True)
  114. except Exception as e:
  115. logger.info(f'拷贝文件{from_path}=>{to_path}发生错误:{e}')
  116. def copy_to_update():
  117. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  118. tmp_path = os.path.join(base_path, f'tmp')
  119. dr_path = os.path.join(tmp_path, f'dr_py')
  120. if not os.path.exists(dr_path):
  121. # print(f'升级失败,找不到目录{dr_path}')
  122. logger.info(f'升级失败,找不到目录{dr_path}')
  123. return False
  124. # 千万不能覆盖super,base
  125. paths = ['js','models','controllers','libs','static','templates','utils','txt','jiexi','py','whl']
  126. exclude_files = ['txt/pycms0.json','txt/pycms1.json','txt/pycms2.json','base/rules.db','utils/update.py']
  127. for path in paths:
  128. force_copy_files(os.path.join(dr_path, path),os.path.join(base_path, path),exclude_files)
  129. try:
  130. shutil.copy(os.path.join(dr_path, 'app.py'), os.path.join(base_path, 'app.py')) # 复制文件
  131. except Exception as e:
  132. logger.info(f'更新app.py发生错误:{e}')
  133. logger.info(f'升级程序执行完毕,全部文件已拷贝覆盖')
  134. return True
  135. def download_new_version():
  136. t1 = getTime()
  137. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  138. tmp_path = os.path.join(base_path, f'tmp')
  139. os.makedirs(tmp_path,exist_ok=True)
  140. # url = 'https://gitcode.net/qq_32394351/dr_py/-/archive/master/dr_py-master.zip'
  141. url = 'https://code.gitlink.org.cn/api/v1/repos/hjdhnx/dr_py/archive/master.zip'
  142. # tmp_files = os.listdir(tmp_path)
  143. # for tp in tmp_files:
  144. # print(f'清除缓存文件:{tp}')
  145. # os.remove(os.path.join(tmp_path, tp))
  146. del_file(tmp_path)
  147. msg = ''
  148. try:
  149. # print(f'开始下载:{url}')
  150. logger.info(f'开始下载:{url}')
  151. r = requests.get(url,headers=headers,timeout=(20,20),proxies=proxies)
  152. rb = r.content
  153. download_path = os.path.join(tmp_path, 'dr_py.zip')
  154. with open(download_path,mode='wb+') as f:
  155. f.write(rb)
  156. # print(f'开始解压文件:{download_path}')
  157. logger.info(f'开始解压文件:{download_path}')
  158. f = zipfile.ZipFile(download_path, 'r') # 压缩文件位置
  159. for file in f.namelist():
  160. f.extract(file, tmp_path) # 解压位置
  161. f.close()
  162. # print('解压完毕,开始升级')
  163. logger.info('解压完毕,开始升级')
  164. ret = copy_to_update()
  165. logger.info(f'升级完毕,结果为:{ret}')
  166. # print(f'升级完毕,结果为:{ret}')
  167. msg = '升级成功'
  168. except Exception as e:
  169. msg = f'升级失败:{e}'
  170. logger.info(f'系统升级共计耗时:{get_interval(t1)}毫秒')
  171. return msg
  172. def download_lives(live_url:str):
  173. t1 = getTime()
  174. base_path = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) # 上级目录
  175. live_path = os.path.join(base_path, f'base/直播.txt')
  176. logger.info(f'尝试同步{live_url}远程内容到{live_path}')
  177. try:
  178. r = requests.get(live_url,headers=headers,timeout=3)
  179. auto_encoding = r.apparent_encoding
  180. if auto_encoding.lower() in ['utf-8','gbk','bg2312','gb18030']:
  181. r.encoding = auto_encoding
  182. # print(r.encoding)
  183. html = r.text
  184. # print(len(html))
  185. if re.search('cctv|.m3u8',html,re.M|re.I) and len(html) > 1000:
  186. logger.info(f'直播源同步成功,耗时{get_interval(t1)}毫秒')
  187. with open(live_path,mode='w+',encoding='utf-8') as f:
  188. f.write(html)
  189. return True
  190. else:
  191. logger.info(f'直播源同步失败,远程文件看起来不是直播源。耗时{get_interval(t1)}毫秒')
  192. return False
  193. except Exception as e:
  194. logger.info(f'直播源同步失败,耗时{get_interval(t1)}毫秒\n{e}')
  195. return False