update_engine_descriptions.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. #!/usr/bin/env python
  2. # SPDX-License-Identifier: AGPL-3.0-or-later
  3. """Fetch website description from websites and from
  4. :origin:`searx/engines/wikidata.py` engine.
  5. Output file: :origin:`searx/data/engine_descriptions.json`.
  6. """
  7. # pylint: disable=invalid-name, global-statement
  8. import json
  9. from urllib.parse import urlparse
  10. from os.path import join
  11. from lxml.html import fromstring
  12. from searx.engines import wikidata, set_loggers
  13. from searx.utils import extract_text, searx_useragent
  14. from searx.locales import LOCALE_NAMES, locales_initialize, match_locale
  15. from searx import searx_dir
  16. from searx.utils import gen_useragent, detect_language
  17. import searx.search
  18. import searx.network
  19. from searx.data import data_dir
  20. DATA_FILE = data_dir / 'engine_descriptions.json'
  21. set_loggers(wikidata, 'wikidata')
  22. locales_initialize()
  23. # you can run the query in https://query.wikidata.org
  24. # replace %IDS% by Wikidata entities separated by spaces with the prefix wd:
  25. # for example wd:Q182496 wd:Q1540899
  26. # replace %LANGUAGES_SPARQL% by languages
  27. SPARQL_WIKIPEDIA_ARTICLE = """
  28. SELECT DISTINCT ?item ?name ?article ?lang
  29. WHERE {
  30. hint:Query hint:optimizer "None".
  31. VALUES ?item { %IDS% }
  32. ?article schema:about ?item ;
  33. schema:inLanguage ?lang ;
  34. schema:name ?name ;
  35. schema:isPartOf [ wikibase:wikiGroup "wikipedia" ] .
  36. FILTER(?lang in (%LANGUAGES_SPARQL%)) .
  37. FILTER (!CONTAINS(?name, ':')) .
  38. }
  39. ORDER BY ?item ?lang
  40. """
  41. SPARQL_DESCRIPTION = """
  42. SELECT DISTINCT ?item ?itemDescription
  43. WHERE {
  44. VALUES ?item { %IDS% }
  45. ?item schema:description ?itemDescription .
  46. FILTER (lang(?itemDescription) in (%LANGUAGES_SPARQL%))
  47. }
  48. ORDER BY ?itemLang
  49. """
  50. NOT_A_DESCRIPTION = [
  51. 'web site',
  52. 'site web',
  53. 'komputa serĉilo',
  54. 'interreta serĉilo',
  55. 'bilaketa motor',
  56. 'web search engine',
  57. 'wikimedia täpsustuslehekülg',
  58. ]
  59. SKIP_ENGINE_SOURCE = [
  60. # fmt: off
  61. ('gitlab', 'wikidata')
  62. # descriptions are about wikipedia disambiguation pages
  63. # fmt: on
  64. ]
  65. WIKIPEDIA_LANGUAGES = {}
  66. LANGUAGES_SPARQL = ''
  67. IDS = None
  68. WIKIPEDIA_LANGUAGE_VARIANTS = {'zh_Hant': 'zh-tw'}
  69. descriptions = {}
  70. wd_to_engine_name = {}
  71. def normalize_description(description):
  72. for c in [chr(c) for c in range(0, 31)]:
  73. description = description.replace(c, ' ')
  74. description = ' '.join(description.strip().split())
  75. return description
  76. def update_description(engine_name, lang, description, source, replace=True):
  77. if not isinstance(description, str):
  78. return
  79. description = normalize_description(description)
  80. if description.lower() == engine_name.lower():
  81. return
  82. if description.lower() in NOT_A_DESCRIPTION:
  83. return
  84. if (engine_name, source) in SKIP_ENGINE_SOURCE:
  85. return
  86. if ' ' not in description:
  87. # skip unique word description (like "website")
  88. return
  89. if replace or lang not in descriptions[engine_name]:
  90. descriptions[engine_name][lang] = [description, source]
  91. def get_wikipedia_summary(wikipedia_url, searxng_locale):
  92. # get the REST API URL from the HTML URL
  93. # Headers
  94. headers = {'User-Agent': searx_useragent()}
  95. if searxng_locale in WIKIPEDIA_LANGUAGE_VARIANTS:
  96. headers['Accept-Language'] = WIKIPEDIA_LANGUAGE_VARIANTS.get(searxng_locale)
  97. # URL path : from HTML URL to REST API URL
  98. parsed_url = urlparse(wikipedia_url)
  99. # remove the /wiki/ prefix
  100. article_name = parsed_url.path.split('/wiki/')[1]
  101. # article_name is already encoded but not the / which is required for the REST API call
  102. encoded_article_name = article_name.replace('/', '%2F')
  103. path = '/api/rest_v1/page/summary/' + encoded_article_name
  104. wikipedia_rest_url = parsed_url._replace(path=path).geturl()
  105. try:
  106. response = searx.network.get(wikipedia_rest_url, headers=headers, timeout=10)
  107. response.raise_for_status()
  108. except Exception as e: # pylint: disable=broad-except
  109. print(" ", wikipedia_url, e)
  110. return None
  111. api_result = json.loads(response.text)
  112. return api_result.get('extract')
  113. def get_website_description(url, lang1, lang2=None):
  114. headers = {
  115. 'User-Agent': gen_useragent(),
  116. 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
  117. 'DNT': '1',
  118. 'Upgrade-Insecure-Requests': '1',
  119. 'Sec-GPC': '1',
  120. 'Cache-Control': 'max-age=0',
  121. }
  122. if lang1 is not None:
  123. lang_list = [lang1]
  124. if lang2 is not None:
  125. lang_list.append(lang2)
  126. headers['Accept-Language'] = f'{",".join(lang_list)};q=0.8'
  127. try:
  128. response = searx.network.get(url, headers=headers, timeout=10)
  129. response.raise_for_status()
  130. except Exception: # pylint: disable=broad-except
  131. return (None, None)
  132. try:
  133. html = fromstring(response.text)
  134. except ValueError:
  135. html = fromstring(response.content)
  136. description = extract_text(html.xpath('/html/head/meta[@name="description"]/@content'))
  137. if not description:
  138. description = extract_text(html.xpath('/html/head/meta[@property="og:description"]/@content'))
  139. if not description:
  140. description = extract_text(html.xpath('/html/head/title'))
  141. lang = extract_text(html.xpath('/html/@lang'))
  142. if lang is None and len(lang1) > 0:
  143. lang = lang1
  144. lang = detect_language(description) or lang or 'en'
  145. lang = lang.split('_')[0]
  146. lang = lang.split('-')[0]
  147. return (lang, description)
  148. def initialize():
  149. global IDS, LANGUAGES_SPARQL
  150. searx.search.initialize()
  151. wikipedia_engine = searx.engines.engines['wikipedia']
  152. locale2lang = {'nl-BE': 'nl'}
  153. for sxng_ui_lang in LOCALE_NAMES:
  154. sxng_ui_alias = locale2lang.get(sxng_ui_lang, sxng_ui_lang)
  155. wiki_lang = None
  156. if sxng_ui_alias in wikipedia_engine.traits.custom['WIKIPEDIA_LANGUAGES']:
  157. wiki_lang = sxng_ui_alias
  158. if not wiki_lang:
  159. wiki_lang = wikipedia_engine.traits.get_language(sxng_ui_alias)
  160. if not wiki_lang:
  161. print(f"WIKIPEDIA_LANGUAGES missing {sxng_ui_lang}")
  162. continue
  163. WIKIPEDIA_LANGUAGES[sxng_ui_lang] = wiki_lang
  164. LANGUAGES_SPARQL = ', '.join(f"'{l}'" for l in set(WIKIPEDIA_LANGUAGES.values()))
  165. for engine_name, engine in searx.engines.engines.items():
  166. descriptions[engine_name] = {}
  167. wikidata_id = getattr(engine, "about", {}).get('wikidata_id')
  168. if wikidata_id is not None:
  169. wd_to_engine_name.setdefault(wikidata_id, set()).add(engine_name)
  170. IDS = ' '.join(list(map(lambda wd_id: 'wd:' + wd_id, wd_to_engine_name.keys())))
  171. def fetch_wikidata_descriptions():
  172. print('Fetching wikidata descriptions')
  173. searx.network.set_timeout_for_thread(60)
  174. result = wikidata.send_wikidata_query(
  175. SPARQL_DESCRIPTION.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
  176. )
  177. if result is not None:
  178. for binding in result['results']['bindings']:
  179. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  180. wikidata_lang = binding['itemDescription']['xml:lang']
  181. desc = binding['itemDescription']['value']
  182. for engine_name in wd_to_engine_name[wikidata_id]:
  183. for searxng_locale in LOCALE_NAMES:
  184. if WIKIPEDIA_LANGUAGES[searxng_locale] != wikidata_lang:
  185. continue
  186. print(
  187. f" engine: {engine_name:20} / wikidata_lang: {wikidata_lang:5}",
  188. f"/ len(wikidata_desc): {len(desc)}",
  189. )
  190. update_description(engine_name, searxng_locale, desc, 'wikidata')
  191. def fetch_wikipedia_descriptions():
  192. print('Fetching wikipedia descriptions')
  193. result = wikidata.send_wikidata_query(
  194. SPARQL_WIKIPEDIA_ARTICLE.replace('%IDS%', IDS).replace('%LANGUAGES_SPARQL%', LANGUAGES_SPARQL)
  195. )
  196. if result is not None:
  197. for binding in result['results']['bindings']:
  198. wikidata_id = binding['item']['value'].replace('http://www.wikidata.org/entity/', '')
  199. wikidata_lang = binding['name']['xml:lang']
  200. wikipedia_url = binding['article']['value'] # for example the URL https://de.wikipedia.org/wiki/PubMed
  201. for engine_name in wd_to_engine_name[wikidata_id]:
  202. for searxng_locale in LOCALE_NAMES:
  203. if WIKIPEDIA_LANGUAGES[searxng_locale] != wikidata_lang:
  204. continue
  205. desc = get_wikipedia_summary(wikipedia_url, searxng_locale)
  206. if not desc:
  207. continue
  208. print(
  209. f" engine: {engine_name:20} / wikidata_lang: {wikidata_lang:5}",
  210. f"/ len(wikipedia_desc): {len(desc)}",
  211. )
  212. update_description(engine_name, searxng_locale, desc, 'wikipedia')
  213. def normalize_url(url):
  214. url = url.replace('{language}', 'en')
  215. url = urlparse(url)._replace(path='/', params='', query='', fragment='').geturl()
  216. url = url.replace('https://api.', 'https://')
  217. return url
  218. def fetch_website_description(engine_name, website):
  219. print(f"- fetch website descr: {engine_name} / {website}")
  220. default_lang, default_description = get_website_description(website, None, None)
  221. if default_lang is None or default_description is None:
  222. # the front page can't be fetched: skip this engine
  223. return
  224. # to specify an order in where the most common languages are in front of the
  225. # language list ..
  226. languages = ['en', 'es', 'pt', 'ru', 'tr', 'fr']
  227. languages = languages + [l for l in LOCALE_NAMES if l not in languages]
  228. previous_matched_lang = None
  229. previous_count = 0
  230. for lang in languages:
  231. if lang in descriptions[engine_name]:
  232. continue
  233. fetched_lang, desc = get_website_description(website, lang, WIKIPEDIA_LANGUAGES[lang])
  234. if fetched_lang is None or desc is None:
  235. continue
  236. # check if desc changed with the different lang values
  237. if fetched_lang == previous_matched_lang:
  238. previous_count += 1
  239. if previous_count == 6:
  240. # the website has returned the same description for 6 different languages in Accept-Language header
  241. # stop now
  242. break
  243. else:
  244. previous_matched_lang = fetched_lang
  245. previous_count = 0
  246. # Don't trust in the value of fetched_lang, some websites return
  247. # for some inappropriate values, by example bing-images::
  248. #
  249. # requested lang: zh-Hans-CN / fetched lang: ceb / desc: 查看根据您的兴趣量身定制的提要
  250. #
  251. # The lang ceb is "Cebuano" but the description is given in zh-Hans-CN
  252. print(
  253. f" engine: {engine_name:20} / requested lang:{lang:7}"
  254. f" / fetched lang: {fetched_lang:7} / len(desc): {len(desc)}"
  255. )
  256. matched_lang = match_locale(fetched_lang, LOCALE_NAMES.keys(), fallback=lang)
  257. update_description(engine_name, matched_lang, desc, website, replace=False)
  258. def fetch_website_descriptions():
  259. print('Fetching website descriptions')
  260. for engine_name, engine in searx.engines.engines.items():
  261. website = getattr(engine, "about", {}).get('website')
  262. if website is None and hasattr(engine, "search_url"):
  263. website = normalize_url(getattr(engine, "search_url"))
  264. if website is None and hasattr(engine, "base_url"):
  265. website = normalize_url(getattr(engine, "base_url"))
  266. if website is not None:
  267. fetch_website_description(engine_name, website)
  268. def get_engine_descriptions_filename():
  269. return join(join(searx_dir, "data"), "engine_descriptions.json")
  270. def get_output():
  271. """
  272. From descriptions[engine][language] = [description, source]
  273. To
  274. * output[language][engine] = description_and_source
  275. * description_and_source can be:
  276. * [description, source]
  277. * description (if source = "wikipedia")
  278. * [f"engine:lang", "ref"] (reference to another existing description)
  279. """
  280. output = {locale: {} for locale in LOCALE_NAMES}
  281. seen_descriptions = {}
  282. for engine_name, lang_descriptions in descriptions.items():
  283. for language, description in lang_descriptions.items():
  284. if description[0] in seen_descriptions:
  285. ref = seen_descriptions[description[0]]
  286. description = [f'{ref[0]}:{ref[1]}', 'ref']
  287. else:
  288. seen_descriptions[description[0]] = (engine_name, language)
  289. if description[1] == 'wikipedia':
  290. description = description[0]
  291. output.setdefault(language, {}).setdefault(engine_name, description)
  292. return output
  293. def main():
  294. initialize()
  295. fetch_wikidata_descriptions()
  296. fetch_wikipedia_descriptions()
  297. fetch_website_descriptions()
  298. output = get_output()
  299. with DATA_FILE.open('w', encoding='utf8') as f:
  300. f.write(json.dumps(output, indent=1, separators=(',', ':'), sort_keys=True, ensure_ascii=False))
  301. if __name__ == "__main__":
  302. main()