google_images.py 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """Google (Images)
  3. :website: https://images.google.com (redirected to subdomain www.)
  4. :provide-api: yes (https://developers.google.com/custom-search/)
  5. :using-api: not the offical, since it needs registration to another service
  6. :results: HTML
  7. :stable: no
  8. :template: images.html
  9. :parse: url, title, content, source, thumbnail_src, img_src
  10. For detailed description of the *REST-full* API see: `Query Parameter
  11. Definitions`_.
  12. .. _admonition:: Content-Security-Policy (CSP)
  13. This engine needs to allow images from the `data URLs`_ (prefixed with the
  14. ``data:` scheme).::
  15. Header set Content-Security-Policy "img-src 'self' data: ;"
  16. .. _Query Parameter Definitions:
  17. https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
  18. """
  19. from lxml import html
  20. from flask_babel import gettext
  21. from searx import logger
  22. from searx.url_utils import urlencode, urlparse
  23. from searx.utils import eval_xpath
  24. from searx.engines.xpath import extract_text
  25. # pylint: disable=unused-import
  26. from searx.engines.google import (
  27. supported_languages_url,
  28. _fetch_supported_languages,
  29. )
  30. # pylint: enable=unused-import
  31. from searx.engines.google import (
  32. get_lang_country,
  33. google_domains,
  34. time_range_dict,
  35. )
  36. logger = logger.getChild('google images')
  37. # engine dependent config
  38. categories = ['images']
  39. paging = False
  40. language_support = True
  41. use_locale_domain = True
  42. time_range_support = True
  43. safesearch = True
  44. filter_mapping = {
  45. 0: 'images',
  46. 1: 'active',
  47. 2: 'active'
  48. }
  49. def scrap_out_thumbs(dom):
  50. """Scrap out thumbnail data from <script> tags.
  51. """
  52. ret_val = dict()
  53. for script in eval_xpath(dom, '//script[contains(., "_setImgSrc(")]'):
  54. _script = script.text
  55. # _setImgSrc('0','data:image\/jpeg;base64,\/9j\/4AAQSkZJR ....');
  56. _thumb_no, _img_data = _script[len("_setImgSrc("):-2].split(",", 1)
  57. _thumb_no = _thumb_no.replace("'", "")
  58. _img_data = _img_data.replace("'", "")
  59. _img_data = _img_data.replace(r"\/", r"/")
  60. ret_val[_thumb_no] = _img_data.replace(r"\x3d", "=")
  61. return ret_val
  62. def request(query, params):
  63. """Google-Video search request"""
  64. language, country, lang_country = get_lang_country(
  65. # pylint: disable=undefined-variable
  66. params, supported_languages, language_aliases
  67. )
  68. subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')
  69. query_url = 'https://' + subdomain + '/search' + "?" + urlencode({
  70. 'q': query,
  71. 'tbm': "isch",
  72. 'hl': lang_country,
  73. 'lr': "lang_" + language,
  74. 'ie': "utf8",
  75. 'oe': "utf8",
  76. 'num': 30,
  77. })
  78. if params['time_range'] in time_range_dict:
  79. query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
  80. if params['safesearch']:
  81. query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
  82. params['url'] = query_url
  83. logger.debug("query_url --> %s", query_url)
  84. params['headers']['Accept-Language'] = (
  85. "%s,%s;q=0.8,%s;q=0.5" % (lang_country, language, language))
  86. logger.debug(
  87. "HTTP Accept-Language --> %s", params['headers']['Accept-Language'])
  88. params['headers']['Accept'] = (
  89. 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
  90. )
  91. # params['google_subdomain'] = subdomain
  92. return params
  93. def response(resp):
  94. """Get response from google's search request"""
  95. results = []
  96. # detect google sorry
  97. resp_url = urlparse(resp.url)
  98. if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
  99. raise RuntimeWarning('sorry.google.com')
  100. if resp_url.path.startswith('/sorry'):
  101. raise RuntimeWarning(gettext('CAPTCHA required'))
  102. # which subdomain ?
  103. # subdomain = resp.search_params.get('google_subdomain')
  104. # convert the text to dom
  105. dom = html.fromstring(resp.text)
  106. img_bas64_map = scrap_out_thumbs(dom)
  107. # parse results
  108. #
  109. # root element::
  110. # <div id="islmp" ..>
  111. # result div per image::
  112. # <div jsmodel="tTXmib"> / <div jsaction="..." data-id="..."
  113. # The data-id matches to a item in a json-data structure in::
  114. # <script nonce="I+vqelcy/01CKiBJi5Z1Ow">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...
  115. # In this structure the ling to the origin PNG, JPG or whatever is given
  116. # (we do not blow out the link there, you could still implement that)
  117. # first link per image-div contains a <img> with the data-iid for bas64 encoded image data::
  118. # <img class="rg_i Q4LuWd" data-iid="0"
  119. # second link per image-div is the target link::
  120. # <a class="VFACy kGQAp" href="https://en.wikipedia.org/wiki/The_Sacrament_of_the_Last_Supper">
  121. # the second link also contains two div tags with the *description* and *publisher*::
  122. # <div class="WGvvNb">The Sacrament of the Last Supper ...</div>
  123. # <div class="fxgdke">en.wikipedia.org</div>
  124. root = eval_xpath(dom, '//div[@id="islmp"]')
  125. if not root:
  126. logger.error("did not find root element id='islmp'")
  127. return results
  128. root = root[0]
  129. for img_node in eval_xpath(root, './/img[contains(@class, "rg_i")]'):
  130. try:
  131. img_alt = eval_xpath(img_node, '@alt')[0]
  132. img_base64_id = eval_xpath(img_node, '@data-iid')
  133. if img_base64_id:
  134. img_base64_id = img_base64_id[0]
  135. thumbnail_src = img_bas64_map[img_base64_id]
  136. else:
  137. thumbnail_src = eval_xpath(img_node, '@src')
  138. if not thumbnail_src:
  139. thumbnail_src = eval_xpath(img_node, '@data-src')
  140. if thumbnail_src:
  141. thumbnail_src = thumbnail_src[0]
  142. else:
  143. thumbnail_src = ''
  144. link_node = eval_xpath(img_node, '../../../a[2]')[0]
  145. url = eval_xpath(link_node, '@href')[0]
  146. pub_nodes = eval_xpath(link_node, './div/div')
  147. pub_descr = img_alt
  148. pub_source = ''
  149. if pub_nodes:
  150. pub_descr = extract_text(pub_nodes[0])
  151. pub_source = extract_text(pub_nodes[1])
  152. results.append({
  153. 'url': url,
  154. 'title': img_alt,
  155. 'content': pub_descr,
  156. 'source': pub_source,
  157. 'img_src': url,
  158. # 'img_format': img_format,
  159. 'thumbnail_src': thumbnail_src,
  160. 'template': 'images.html'
  161. })
  162. except Exception as e: # pylint: disable=broad-except
  163. logger.error(e, exc_info=True)
  164. # from lxml import etree
  165. # logger.debug(etree.tostring(img_node, pretty_print=True))
  166. # import pdb
  167. # pdb.set_trace()
  168. continue
  169. return results