deviantart.py 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. # SPDX-License-Identifier: AGPL-3.0-or-later
  2. # lint: pylint
  3. """Deviantart (Images)
  4. """
  5. import urllib.parse
  6. from lxml import html
  7. from searx.utils import extract_text, eval_xpath, eval_xpath_list
  8. # about
  9. about = {
  10. "website": 'https://www.deviantart.com/',
  11. "wikidata_id": 'Q46523',
  12. "official_api_documentation": 'https://www.deviantart.com/developers/',
  13. "use_official_api": False,
  14. "require_api_key": False,
  15. "results": 'HTML',
  16. }
  17. # engine dependent config
  18. categories = ['images']
  19. paging = True
  20. # search-url
  21. base_url = 'https://www.deviantart.com'
  22. results_xpath = '//div[@class="_2pZkk"]/div/div/a'
  23. url_xpath = './@href'
  24. thumbnail_src_xpath = './div/img/@src'
  25. img_src_xpath = './div/img/@srcset'
  26. title_xpath = './@aria-label'
  27. premium_xpath = '../div/div/div/text()'
  28. premium_keytext = 'Watch the artist to view this deviation'
  29. cursor_xpath = '(//a[@class="_1OGeq"]/@href)[last()]'
  30. def request(query, params):
  31. # https://www.deviantart.com/search?q=foo
  32. nextpage_url = params['engine_data'].get('nextpage')
  33. # don't use nextpage when user selected to jump back to page 1
  34. if params['pageno'] > 1 and nextpage_url is not None:
  35. params['url'] = nextpage_url
  36. else:
  37. params['url'] = f"{base_url}/search?{urllib.parse.urlencode({'q': query})}"
  38. return params
  39. def response(resp):
  40. results = []
  41. dom = html.fromstring(resp.text)
  42. for result in eval_xpath_list(dom, results_xpath):
  43. # skip images that are blurred
  44. _text = extract_text(eval_xpath(result, premium_xpath))
  45. if _text and premium_keytext in _text:
  46. continue
  47. img_src = extract_text(eval_xpath(result, img_src_xpath))
  48. if img_src:
  49. img_src = img_src.split(' ')[0]
  50. parsed_url = urllib.parse.urlparse(img_src)
  51. img_src = parsed_url._replace(path=parsed_url.path.split('/v1')[0]).geturl()
  52. results.append(
  53. {
  54. 'template': 'images.html',
  55. 'url': extract_text(eval_xpath(result, url_xpath)),
  56. 'img_src': img_src,
  57. 'thumbnail_src': extract_text(eval_xpath(result, thumbnail_src_xpath)),
  58. 'title': extract_text(eval_xpath(result, title_xpath)),
  59. }
  60. )
  61. nextpage_url = extract_text(eval_xpath(dom, cursor_xpath))
  62. if nextpage_url:
  63. results.append(
  64. {
  65. 'engine_data': nextpage_url.replace("http://", "https://"),
  66. 'key': 'nextpage',
  67. }
  68. )
  69. return results