HtmlParser.java 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295
  1. package com.github.tvbox.osc.util.js;
  2. import android.text.TextUtils;
  3. import com.quickjs.android.JSUtils;
  4. import org.jsoup.Jsoup;
  5. import org.jsoup.nodes.Document;
  6. import org.jsoup.nodes.Element;
  7. import org.jsoup.select.Elements;
  8. import java.net.MalformedURLException;
  9. import java.net.URL;
  10. import java.util.ArrayList;
  11. import java.util.Arrays;
  12. import java.util.List;
  13. import java.util.regex.Matcher;
  14. import java.util.regex.Pattern;
  15. public class HtmlParser {
  16. private static String pdfh_html = "";
  17. private static String pdfa_html = "";
  18. private static final Pattern p = Pattern.compile("url\\((.*?)\\)", Pattern.MULTILINE | Pattern.DOTALL);
  19. private static final Pattern NOADD_INDEX = Pattern.compile(":eq|:lt|:gt|:first|:last|^body$|^#"); // 不自动加eq下标索引
  20. private static final Pattern URLJOIN_ATTR = Pattern.compile("(url|src|href|-original|-src|-play|-url)$", Pattern.MULTILINE | Pattern.CASE_INSENSITIVE); // 需要自动urljoin的属性
  21. private static Document pdfh_doc = null;
  22. private static Document pdfa_doc = null;
  23. public static String joinUrl(String parent, String child) {
  24. if (JSUtils.isEmpty(parent)) {
  25. return child;
  26. }
  27. URL url;
  28. String q = parent;
  29. try {
  30. url = new URL(new URL(parent), child);
  31. q = url.toExternalForm();
  32. } catch (MalformedURLException e) {
  33. e.printStackTrace();
  34. }
  35. // if (q.contains("#")) {
  36. // q = q.replaceAll("^(.+?)#.*?$", "$1");
  37. // }
  38. return q;
  39. }
  40. public static class Painfo {
  41. public String nparse_rule;
  42. public int nparse_index;
  43. public List<String> excludes;
  44. }
  45. private static Painfo getParseInfo(String nparse) {
  46. /*
  47. 根据传入的单规则获取 parse规则,索引位置,排除列表 -- 可以用于剔除元素,支持多个,按标签剔除,按id剔除等操作
  48. :param nparse:
  49. :return:*/
  50. Painfo painfo = new Painfo();
  51. //List<String> excludes = new ArrayList<>(); //定义排除列表默认值为空
  52. //int nparse_index; //定义位置索引默认值为0
  53. painfo.nparse_rule = nparse; //定义规则默认值为本身
  54. if (nparse.contains(":eq")) {
  55. painfo.nparse_rule = nparse.split(":")[0];
  56. String nparse_pos = nparse.split(":")[1];
  57. if (painfo.nparse_rule.contains("--")) {
  58. String[] rules = painfo.nparse_rule.split("--");
  59. painfo.excludes = new ArrayList<>(Arrays.asList(rules));
  60. painfo.excludes.remove(0);
  61. painfo.nparse_rule = rules[0];
  62. } else if (nparse_pos.contains("--")) {
  63. String[] rules = nparse_pos.split("--");
  64. painfo.excludes = new ArrayList<>(Arrays.asList(rules));
  65. painfo.excludes.remove(0);
  66. nparse_pos = rules[0];
  67. }
  68. try {
  69. painfo.nparse_index = Integer.parseInt(nparse_pos.replace("eq(", "").replace(")", ""));
  70. } catch (Exception e1) {
  71. painfo.nparse_index = 0;
  72. }
  73. } else {
  74. if (nparse.contains("--")) {
  75. String[] rules = painfo.nparse_rule.split("--");
  76. painfo.excludes = new ArrayList<>(Arrays.asList(rules));
  77. painfo.excludes.remove(0);
  78. painfo.nparse_rule = rules[0];
  79. }
  80. }
  81. return painfo;
  82. }
  83. public static boolean isIndex(String str) {
  84. if (JSUtils.isEmpty(str)) {
  85. return false;
  86. }
  87. for (String str2 : new String[]{":eq", ":lt", ":gt", ":first", ":last", "body", "#"}) {
  88. if (str.contains(str2)) {
  89. if (str2.equals("body") || str2.equals("#")) {
  90. return str.startsWith(str2);
  91. }
  92. return true;
  93. }
  94. }
  95. return false;
  96. }
  97. public static boolean isUrl(String str) {
  98. if (JSUtils.isEmpty(str)) {
  99. return false;
  100. }
  101. for (String str2 : new String[]{"url", "src", "href", "-original", "-play"}) {
  102. if (str.contains(str2)) {
  103. return true;
  104. }
  105. }
  106. return false;
  107. }
  108. private static String parseHikerToJq(String parse, boolean first) {
  109. /*
  110. 海阔解析表达式转原生表达式,自动补eq,如果传了first就最后一个也取eq(0)
  111. :param parse:
  112. :param first:
  113. :return:
  114. */
  115. // 不自动加eq下标索引
  116. if (parse.contains("&&")) {
  117. String[] parses = parse.split("&&"); //带&&的重新拼接
  118. List<String> new_parses = new ArrayList<>(); //构造新的解析表达式列表
  119. for (int i = 0; i < parses.length; i++) {
  120. String[] pss = parses[i].split(" ");
  121. String ps = pss[pss.length - 1]; //如果分割&&后带空格就取最后一个元素
  122. Matcher m = NOADD_INDEX.matcher(ps);
  123. //if (!isIndex(ps)) {
  124. if (!m.find()) {
  125. if (!first && i >= parses.length - 1) { //不传first且遇到最后一个,不用补eq(0)
  126. new_parses.add(parses[i]);
  127. } else {
  128. new_parses.add(parses[i] + ":eq(0)");
  129. }
  130. } else {
  131. new_parses.add(parses[i]);
  132. }
  133. }
  134. parse = TextUtils.join(" ", new_parses);
  135. } else {
  136. String[] pss = parse.split(" ");
  137. String ps = pss[pss.length - 1]; //如果分割&&后带空格就取最后一个元素
  138. Matcher m = NOADD_INDEX.matcher(ps);
  139. //if (!isIndex(ps) && first) {
  140. if (!m.find() && first) {
  141. parse = parse + ":eq(0)";
  142. }
  143. }
  144. return parse;
  145. }
  146. public static String parseDomForUrl(String html, String rule, String add_url) {
  147. if (!pdfh_html.equals(html)) {
  148. pdfh_html = html;
  149. pdfh_doc = Jsoup.parse(html);
  150. }
  151. Document doc = pdfh_doc;
  152. if (rule.equals("body&&Text") || rule.equals("Text")) {
  153. return doc.text();
  154. } else if (rule.equals("body&&Html") || rule.equals("Html")) {
  155. return doc.html();
  156. }
  157. String option = "";
  158. if (rule.contains("&&")) {
  159. String[] rs = rule.split("&&");
  160. option = rs[rs.length - 1];
  161. List<String> excludes = new ArrayList<>(Arrays.asList(rs));
  162. excludes.remove(rs.length - 1);
  163. rule = TextUtils.join("&&", excludes);
  164. }
  165. rule = parseHikerToJq(rule, true);
  166. String[] parses = rule.split(" ");
  167. Elements ret = new Elements();
  168. for (String nparse : parses) {
  169. ret = parseOneRule(doc, nparse, ret);
  170. if (ret.isEmpty()) {
  171. return "";
  172. }
  173. }
  174. String result;
  175. if (JSUtils.isNotEmpty(option)) {
  176. if (option.equals("Text")) {
  177. result = ret.text();
  178. } else if (option.equals("Html")) {
  179. result = ret.html();
  180. } else {
  181. result = ret.attr(option);
  182. if (option.toLowerCase().contains("style") && result.contains("url(")) {
  183. Matcher m = p.matcher(result);
  184. if (m.find()) {
  185. result = m.group(1);
  186. }
  187. }
  188. if (JSUtils.isNotEmpty(result) && JSUtils.isNotEmpty(add_url)) {
  189. // 需要自动urljoin的属性
  190. Matcher m = URLJOIN_ATTR.matcher(option);
  191. //if (isUrl(option)) {
  192. if (m.find()) {
  193. if (result.contains("http")) {
  194. result = result.substring(result.indexOf("http"));
  195. } else {
  196. result = joinUrl(add_url, result);
  197. }
  198. }
  199. }
  200. }
  201. } else {
  202. result = ret.outerHtml();
  203. }
  204. return result;
  205. }
  206. public static List<String> parseDomForArray(String html, String rule) {
  207. if (!pdfa_html.equals(html)) {
  208. pdfa_html = html;
  209. pdfa_doc = Jsoup.parse(html);
  210. }
  211. Document doc = pdfa_doc;
  212. rule = parseHikerToJq(rule, false);
  213. String[] parses = rule.split(" ");
  214. Elements ret = new Elements();
  215. for (String pars : parses) {
  216. ret = parseOneRule(doc, pars, ret);
  217. if (ret.isEmpty()) {
  218. return new ArrayList<>();
  219. }
  220. }
  221. List<String> eleHtml = new ArrayList<>();
  222. for (int i = 0; i < ret.size(); i++) {
  223. Element element1 = ret.get(i);
  224. eleHtml.add(element1.outerHtml());
  225. }
  226. return eleHtml;
  227. }
  228. private static Elements parseOneRule(Document doc, String nparse, Elements ret) {
  229. Painfo painfo = getParseInfo(nparse);
  230. if (ret.isEmpty()) {
  231. ret = doc.select(painfo.nparse_rule);
  232. } else {
  233. ret = ret.select(painfo.nparse_rule);
  234. }
  235. if (nparse.contains(":eq")) {
  236. if(painfo.nparse_index < 0){
  237. ret = ret.eq(ret.size() + painfo.nparse_index);
  238. } else {
  239. ret = ret.eq(painfo.nparse_index);
  240. }
  241. }
  242. if (painfo.excludes != null && !ret.isEmpty()) {
  243. ret = ret.clone(); //克隆一个, 免得直接remove会影响doc的缓存
  244. for (int i = 0; i < painfo.excludes.size(); i++) {
  245. ret.select(painfo.excludes.get(i)).remove();
  246. }
  247. }
  248. return ret;
  249. }
  250. public static List<String> parseDomForList(String html, String p1, String list_text, String list_url, String add_url) {
  251. if (!pdfa_html.equals(html)) {
  252. pdfa_html = html;
  253. pdfa_doc = Jsoup.parse(html);
  254. }
  255. Document doc = pdfa_doc;
  256. p1 = parseHikerToJq(p1, false);
  257. String[] parses = p1.split(" ");
  258. Elements ret = new Elements();
  259. for (String pars : parses) {
  260. ret = parseOneRule(doc, pars, ret);
  261. if (ret.isEmpty()) {
  262. return new ArrayList<>();
  263. }
  264. }
  265. List<String> new_vod_list = new ArrayList<>();
  266. for(int i = 0; i < ret.size(); i++){
  267. String it = ret.get(i).outerHtml();
  268. new_vod_list.add(parseDomForUrl(it, list_text, "").trim() + '$' + parseDomForUrl(it, list_url, add_url));
  269. }
  270. return new_vod_list;
  271. }
  272. }