1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768 |
- # SPDX-License-Identifier: AGPL-3.0-or-later
- """
- Seznam
- """
- from urllib.parse import urlencode, urlparse
- from lxml import html
- from searx.poolrequests import get
- from searx.exceptions import SearxEngineAccessDeniedException
- from searx.utils import (
- extract_text,
- eval_xpath_list,
- eval_xpath_getindex,
- eval_xpath,
- )
- # about
- about = {
- "website": "https://www.seznam.cz/",
- "wikidata_id": "Q3490485",
- "official_api_documentation": "https://api.sklik.cz/",
- "use_official_api": False,
- "require_api_key": False,
- "results": "HTML",
- }
- base_url = 'https://search.seznam.cz/'
- def request(query, params):
- response_index = get(base_url, headers=params['headers'], raise_for_httperror=True)
- dom = html.fromstring(response_index.text)
- url_params = {
- 'q': query,
- 'oq': query,
- }
- for e in eval_xpath_list(dom, '//input[@type="hidden"]'):
- name = e.get('name')
- value = e.get('value')
- url_params[name] = value
- params['url'] = base_url + '?' + urlencode(url_params)
- params['cookies'] = response_index.cookies
- return params
- def response(resp):
- resp_url = urlparse(resp.url)
- if resp_url.path.startswith('/verify'):
- raise SearxEngineAccessDeniedException()
- results = []
- dom = html.fromstring(resp.content.decode())
- for result_element in eval_xpath_list(dom, '//div[@data-dot="results"]/div'):
- result_data = eval_xpath_getindex(result_element, './/div[contains(@class, "bec586")]', 0, default=None)
- if result_data is None:
- continue
- title_element = eval_xpath_getindex(result_element, './/h3/a', 0)
- results.append({
- 'url': title_element.get('href'),
- 'title': extract_text(title_element),
- 'content': extract_text(eval_xpath(result_data, './/div[@class="_3eded7"]')),
- })
- return results
|