google.py 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Google (Web)
  3. :website: https://www.google.com
  4. :provide-api: yes (https://developers.google.com/custom-search/)
  5. :using-api: not the offical, since it needs registration to another service
  6. :results: HTML
  7. :stable: no
  8. :parse: url, title, content, number_of_results, answer, suggestion, correction
  9. For detailed description of the *REST-full* API see: `Query Parameter
  10. Definitions`_.
  11. .. _Query Parameter Definitions:
  12. https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
  13. """
  14. # pylint: disable=invalid-name, missing-function-docstring
  15. from lxml import html
  16. from flask_babel import gettext
  17. from searx.engines.xpath import extract_text
  18. from searx import logger
  19. from searx.url_utils import urlencode, urlparse
  20. from searx.utils import match_language, eval_xpath
  21. logger = logger.getChild('google engine')
  22. # engine dependent config
  23. categories = ['general']
  24. paging = True
  25. language_support = True
  26. time_range_support = True
  27. safesearch = True
  28. supported_languages_url = 'https://www.google.com/preferences?#languages'
  29. # based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
  30. google_domains = {
  31. 'BG': 'google.bg', # Bulgaria
  32. 'CZ': 'google.cz', # Czech Republic
  33. 'DE': 'google.de', # Germany
  34. 'DK': 'google.dk', # Denmark
  35. 'AT': 'google.at', # Austria
  36. 'CH': 'google.ch', # Switzerland
  37. 'GR': 'google.gr', # Greece
  38. 'AU': 'google.com.au', # Australia
  39. 'CA': 'google.ca', # Canada
  40. 'GB': 'google.co.uk', # United Kingdom
  41. 'ID': 'google.co.id', # Indonesia
  42. 'IE': 'google.ie', # Ireland
  43. 'IN': 'google.co.in', # India
  44. 'MY': 'google.com.my', # Malaysia
  45. 'NZ': 'google.co.nz', # New Zealand
  46. 'PH': 'google.com.ph', # Philippines
  47. 'SG': 'google.com.sg', # Singapore
  48. # 'US': 'google.us', # United States, redirect to .com
  49. 'ZA': 'google.co.za', # South Africa
  50. 'AR': 'google.com.ar', # Argentina
  51. 'CL': 'google.cl', # Chile
  52. 'ES': 'google.es', # Spain
  53. 'MX': 'google.com.mx', # Mexico
  54. 'EE': 'google.ee', # Estonia
  55. 'FI': 'google.fi', # Finland
  56. 'BE': 'google.be', # Belgium
  57. 'FR': 'google.fr', # France
  58. 'IL': 'google.co.il', # Israel
  59. 'HR': 'google.hr', # Croatia
  60. 'HU': 'google.hu', # Hungary
  61. 'IT': 'google.it', # Italy
  62. 'JP': 'google.co.jp', # Japan
  63. 'KR': 'google.co.kr', # South Korea
  64. 'LT': 'google.lt', # Lithuania
  65. 'LV': 'google.lv', # Latvia
  66. 'NO': 'google.no', # Norway
  67. 'NL': 'google.nl', # Netherlands
  68. 'PL': 'google.pl', # Poland
  69. 'BR': 'google.com.br', # Brazil
  70. 'PT': 'google.pt', # Portugal
  71. 'RO': 'google.ro', # Romania
  72. 'RU': 'google.ru', # Russia
  73. 'SK': 'google.sk', # Slovakia
  74. 'SI': 'google.si', # Slovenia
  75. 'SE': 'google.se', # Sweden
  76. 'TH': 'google.co.th', # Thailand
  77. 'TR': 'google.com.tr', # Turkey
  78. 'UA': 'google.com.ua', # Ukraine
  79. # 'CN': 'google.cn', # China, only from China ?
  80. 'HK': 'google.com.hk', # Hong Kong
  81. 'TW': 'google.com.tw' # Taiwan
  82. }
  83. time_range_dict = {
  84. 'day': 'd',
  85. 'week': 'w',
  86. 'month': 'm',
  87. 'year': 'y'
  88. }
  89. # Filter results. 0: None, 1: Moderate, 2: Strict
  90. filter_mapping = {
  91. 0: 'off',
  92. 1: 'medium',
  93. 2: 'high'
  94. }
  95. # specific xpath variables
  96. # ------------------------
  97. # google results are grouped into <div class="g" ../>
  98. results_xpath = '//div[@class="g"]'
  99. # google *sections* are no usual *results*, we ignore them
  100. g_section_with_header = './g-section-with-header'
  101. # the title is a h3 tag relative to the result group
  102. title_xpath = './/h3[1]'
  103. # in the result group there is <div class="r" ../> it's first child is a <a
  104. # href=...> (on some results, the <a> is the first "descendant", not ""child")
  105. href_xpath = './/div[@class="r"]//a/@href'
  106. # in the result group there is <div class="s" ../> containing he *content*
  107. content_xpath = './/div[@class="s"]'
  108. # Suggestions are links placed in a *card-section*, we extract only the text
  109. # from the links not the links itself.
  110. suggestion_xpath = '//div[contains(@class, "card-section")]//a'
  111. # Since google does *auto-correction* on the first query these are not really
  112. # *spelling suggestions*, we use them anyway.
  113. spelling_suggestion_xpath = '//div[@class="med"]/p/a'
  114. def extract_text_from_dom(result, xpath):
  115. """returns extract_text on the first result selected by the xpath or None"""
  116. r = eval_xpath(result, xpath)
  117. if len(r) > 0:
  118. return extract_text(r[0])
  119. return None
  120. def get_lang_country(params, lang_list, custom_aliases):
  121. """Returns a tuple with *langauage* on its first and *country* on its second
  122. position."""
  123. language = params['language']
  124. if language == 'all':
  125. language = 'en-US'
  126. language_array = language.split('-')
  127. if len(language_array) == 2:
  128. country = language_array[1]
  129. else:
  130. country = language_array[0].upper()
  131. language = match_language(language, lang_list, custom_aliases)
  132. lang_country = '%s-%s' % (language, country)
  133. if lang_country == 'en-EN':
  134. lang_country = 'en'
  135. return language, country, lang_country
  136. def request(query, params):
  137. """Google search request"""
  138. offset = (params['pageno'] - 1) * 10
  139. language, country, lang_country = get_lang_country(
  140. # pylint: disable=undefined-variable
  141. params, supported_languages, language_aliases
  142. )
  143. subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')
  144. # https://www.google.de/search?q=corona&hl=de-DE&lr=lang_de&start=0&tbs=qdr%3Ad&safe=medium
  145. query_url = 'https://' + subdomain + '/search' + "?" + urlencode({
  146. 'q': query,
  147. 'hl': lang_country,
  148. 'lr': "lang_" + language,
  149. 'ie': "utf8",
  150. 'oe': "utf8",
  151. 'start': offset,
  152. })
  153. if params['time_range'] in time_range_dict:
  154. query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
  155. if params['safesearch']:
  156. query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
  157. params['url'] = query_url
  158. logger.debug("query_url --> %s", query_url)
  159. # en-US,en;q=0.8,en;q=0.5
  160. params['headers']['Accept-Language'] = (
  161. lang_country + ',' + language + ';q=0.8,' + language + ';q=0.5'
  162. )
  163. logger.debug("HTTP header Accept-Language --> %s",
  164. params['headers']['Accept-Language'])
  165. params['headers']['Accept'] = (
  166. 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
  167. )
  168. # params['google_subdomain'] = subdomain
  169. return params
  170. def response(resp):
  171. """Get response from google's search request"""
  172. results = []
  173. # detect google sorry
  174. resp_url = urlparse(resp.url)
  175. if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
  176. raise RuntimeWarning('sorry.google.com')
  177. if resp_url.path.startswith('/sorry'):
  178. raise RuntimeWarning(gettext('CAPTCHA required'))
  179. # which subdomain ?
  180. # subdomain = resp.search_params.get('google_subdomain')
  181. # convert the text to dom
  182. dom = html.fromstring(resp.text)
  183. # results --> answer
  184. answer = eval_xpath(dom, '//div[contains(@class, "LGOjhe")]//text()')
  185. if answer:
  186. results.append({'answer': ' '.join(answer)})
  187. else:
  188. logger.debug("did not found 'answer'")
  189. # results --> number_of_results
  190. try:
  191. _txt = eval_xpath(dom, '//div[@id="result-stats"]//text()')[0]
  192. _digit = ''.join([n for n in _txt if n.isdigit()])
  193. number_of_results = int(_digit)
  194. results.append({'number_of_results': number_of_results})
  195. except Exception as e: # pylint: disable=broad-except
  196. logger.debug("did not 'number_of_results'")
  197. logger.error(e, exc_info=True)
  198. # parse results
  199. for result in eval_xpath(dom, results_xpath):
  200. # google *sections*
  201. if extract_text(eval_xpath(result, g_section_with_header)):
  202. logger.debug("ingoring <g-section-with-header>")
  203. continue
  204. try:
  205. title = extract_text(eval_xpath(result, title_xpath)[0])
  206. url = eval_xpath(result, href_xpath)[0]
  207. content = extract_text_from_dom(result, content_xpath)
  208. results.append({
  209. 'url': url,
  210. 'title': title,
  211. 'content': content
  212. })
  213. except Exception as e: # pylint: disable=broad-except
  214. logger.error(e, exc_info=True)
  215. # from lxml import etree
  216. # logger.debug(etree.tostring(result, pretty_print=True))
  217. # import pdb
  218. # pdb.set_trace()
  219. continue
  220. # parse suggestion
  221. for suggestion in eval_xpath(dom, suggestion_xpath):
  222. # append suggestion
  223. results.append({'suggestion': extract_text(suggestion)})
  224. for correction in eval_xpath(dom, spelling_suggestion_xpath):
  225. results.append({'correction': extract_text(correction)})
  226. # return results
  227. return results
  228. # get supported languages from their site
  229. def _fetch_supported_languages(resp):
  230. ret_val = {}
  231. dom = html.fromstring(resp.text)
  232. radio_buttons = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lang"]')
  233. for x in radio_buttons:
  234. name = x.get("data-name")
  235. code = x.get("value")
  236. ret_val[code] = {"name": name}
  237. return ret_val