bing_images.py 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. """
  3. Bing (Images)
  4. """
  5. from urllib.parse import urlencode
  6. from lxml import html
  7. from json import loads
  8. from searx.utils import match_language
  9. from searx.engines.bing import language_aliases
  10. from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
  11. # about
  12. about = {
  13. "website": 'https://www.bing.com/images',
  14. "wikidata_id": 'Q182496',
  15. "official_api_documentation": 'https://www.microsoft.com/en-us/bing/apis/bing-image-search-api',
  16. "use_official_api": False,
  17. "require_api_key": False,
  18. "results": 'HTML',
  19. }
  20. # engine dependent config
  21. categories = ['images']
  22. paging = True
  23. safesearch = True
  24. time_range_support = True
  25. supported_languages_url = 'https://www.bing.com/account/general'
  26. number_of_results = 28
  27. # search-url
  28. base_url = 'https://www.bing.com/'
  29. search_string = 'images/search'\
  30. '?{query}'\
  31. '&count={count}'\
  32. '&first={first}'\
  33. '&FORM=IBASEP'
  34. time_range_string = '&qft=+filterui:age-lt{interval}'
  35. time_range_dict = {'day': '1440',
  36. 'week': '10080',
  37. 'month': '43200',
  38. 'year': '525600'}
  39. # safesearch definitions
  40. safesearch_types = {2: 'STRICT',
  41. 1: 'DEMOTE',
  42. 0: 'OFF'}
  43. # do search-request
  44. def request(query, params):
  45. offset = ((params['pageno'] - 1) * number_of_results) + 1
  46. search_path = search_string.format(
  47. query=urlencode({'q': query}),
  48. count=number_of_results,
  49. first=offset)
  50. language = match_language(params['language'], supported_languages, language_aliases).lower()
  51. params['cookies']['SRCHHPGUSR'] = \
  52. 'ADLT=' + safesearch_types.get(params['safesearch'], 'DEMOTE')
  53. params['cookies']['_EDGE_S'] = 'mkt=' + language +\
  54. '&ui=' + language + '&F=1'
  55. params['url'] = base_url + search_path
  56. if params['time_range'] in time_range_dict:
  57. params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])
  58. return params
  59. # get response from search-request
  60. def response(resp):
  61. results = []
  62. dom = html.fromstring(resp.text)
  63. # parse results
  64. for result in dom.xpath('//div[@class="imgpt"]'):
  65. try:
  66. img_format = result.xpath('./div[contains(@class, "img_info")]/span/text()')[0]
  67. # Microsoft seems to experiment with this code so don't make the path too specific,
  68. # just catch the text section for the first anchor in img_info assuming this to be
  69. # the originating site.
  70. source = result.xpath('./div[contains(@class, "img_info")]//a/text()')[0]
  71. m = loads(result.xpath('./a/@m')[0])
  72. # strip 'Unicode private use area' highlighting, they render to Tux
  73. # the Linux penguin and a standing diamond on my machine...
  74. title = m.get('t', '').replace('\ue000', '').replace('\ue001', '')
  75. results.append({'template': 'images.html',
  76. 'url': m['purl'],
  77. 'thumbnail_src': m['turl'],
  78. 'img_src': m['murl'],
  79. 'content': '',
  80. 'title': title,
  81. 'source': source,
  82. 'img_format': img_format})
  83. except:
  84. continue
  85. return results