duckduckgo_definitions.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. """
  2. DuckDuckGo (definitions)
  3. - `Instant Answer API`_
  4. - `DuckDuckGo query`_
  5. .. _Instant Answer API: https://duckduckgo.com/api
  6. .. _DuckDuckGo query: https://api.duckduckgo.com/?q=DuckDuckGo&format=json&pretty=1
  7. """
  8. import json
  9. from lxml import html
  10. from re import compile
  11. from searx.engines.xpath import extract_text
  12. from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url, language_aliases
  13. from searx.url_utils import urlencode
  14. from searx.utils import html_to_text, match_language
  15. url = 'https://api.duckduckgo.com/'\
  16. + '?{query}&format=json&pretty=0&no_redirect=1&d=1'
  17. http_regex = compile(r'^http:')
  18. def result_to_text(url, text, htmlResult):
  19. # TODO : remove result ending with "Meaning" or "Category"
  20. dom = html.fromstring(htmlResult)
  21. a = dom.xpath('//a')
  22. if len(a) >= 1:
  23. return extract_text(a[0])
  24. else:
  25. return text
  26. def request(query, params):
  27. params['url'] = url.format(query=urlencode({'q': query}))
  28. language = match_language(params['language'], supported_languages, language_aliases)
  29. language = language.split('-')[0]
  30. params['headers']['Accept-Language'] = language
  31. return params
  32. def response(resp):
  33. results = []
  34. search_res = json.loads(resp.text)
  35. content = ''
  36. heading = search_res.get('Heading', '')
  37. attributes = []
  38. urls = []
  39. infobox_id = None
  40. relatedTopics = []
  41. # add answer if there is one
  42. answer = search_res.get('Answer', '')
  43. if answer:
  44. if search_res.get('AnswerType', '') not in ['calc']:
  45. results.append({'answer': html_to_text(answer)})
  46. # add infobox
  47. if 'Definition' in search_res:
  48. content = content + search_res.get('Definition', '')
  49. if 'Abstract' in search_res:
  50. content = content + search_res.get('Abstract', '')
  51. # image
  52. image = search_res.get('Image', '')
  53. image = None if image == '' else image
  54. # attributes
  55. if 'Infobox' in search_res:
  56. infobox = search_res.get('Infobox', None)
  57. if 'content' in infobox:
  58. for info in infobox.get('content'):
  59. attributes.append({'label': info.get('label'),
  60. 'value': info.get('value')})
  61. # urls
  62. for ddg_result in search_res.get('Results', []):
  63. if 'FirstURL' in ddg_result:
  64. firstURL = ddg_result.get('FirstURL', '')
  65. text = ddg_result.get('Text', '')
  66. urls.append({'title': text, 'url': firstURL})
  67. results.append({'title': heading, 'url': firstURL})
  68. # related topics
  69. for ddg_result in search_res.get('RelatedTopics', []):
  70. if 'FirstURL' in ddg_result:
  71. suggestion = result_to_text(ddg_result.get('FirstURL', None),
  72. ddg_result.get('Text', None),
  73. ddg_result.get('Result', None))
  74. if suggestion != heading:
  75. results.append({'suggestion': suggestion})
  76. elif 'Topics' in ddg_result:
  77. suggestions = []
  78. relatedTopics.append({'name': ddg_result.get('Name', ''),
  79. 'suggestions': suggestions})
  80. for topic_result in ddg_result.get('Topics', []):
  81. suggestion = result_to_text(topic_result.get('FirstURL', None),
  82. topic_result.get('Text', None),
  83. topic_result.get('Result', None))
  84. if suggestion != heading:
  85. suggestions.append(suggestion)
  86. # abstract
  87. abstractURL = search_res.get('AbstractURL', '')
  88. if abstractURL != '':
  89. # add as result ? problem always in english
  90. infobox_id = abstractURL
  91. urls.append({'title': search_res.get('AbstractSource'),
  92. 'url': abstractURL})
  93. # definition
  94. definitionURL = search_res.get('DefinitionURL', '')
  95. if definitionURL != '':
  96. # add as result ? as answer ? problem always in english
  97. infobox_id = definitionURL
  98. urls.append({'title': search_res.get('DefinitionSource'),
  99. 'url': definitionURL})
  100. # to merge with wikidata's infobox
  101. if infobox_id:
  102. infobox_id = http_regex.sub('https:', infobox_id)
  103. # entity
  104. entity = search_res.get('Entity', None)
  105. # TODO continent / country / department / location / waterfall /
  106. # mountain range :
  107. # link to map search, get weather, near by locations
  108. # TODO musician : link to music search
  109. # TODO concert tour : ??
  110. # TODO film / actor / television / media franchise :
  111. # links to IMDB / rottentomatoes (or scrap result)
  112. # TODO music : link tu musicbrainz / last.fm
  113. # TODO book : ??
  114. # TODO artist / playwright : ??
  115. # TODO compagny : ??
  116. # TODO software / os : ??
  117. # TODO software engineer : ??
  118. # TODO prepared food : ??
  119. # TODO website : ??
  120. # TODO performing art : ??
  121. # TODO prepared food : ??
  122. # TODO programming language : ??
  123. # TODO file format : ??
  124. if len(heading) > 0:
  125. # TODO get infobox.meta.value where .label='article_title'
  126. if image is None and len(attributes) == 0 and len(urls) == 1 and\
  127. len(relatedTopics) == 0 and len(content) == 0:
  128. results.append({
  129. 'url': urls[0]['url'],
  130. 'title': heading,
  131. 'content': content
  132. })
  133. else:
  134. results.append({
  135. 'infobox': heading,
  136. 'id': infobox_id,
  137. 'entity': entity,
  138. 'content': content,
  139. 'img_src': image,
  140. 'attributes': attributes,
  141. 'urls': urls,
  142. 'relatedTopics': relatedTopics
  143. })
  144. return results