wikidata.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. # -*- coding: utf-8 -*-
  2. """
  3. Wikidata
  4. @website https://wikidata.org
  5. @provide-api yes (https://wikidata.org/w/api.php)
  6. @using-api partially (most things require scraping)
  7. @results JSON, HTML
  8. @stable no (html can change)
  9. @parse url, infobox
  10. """
  11. from searx import logger
  12. from searx.poolrequests import get
  13. from searx.engines.xpath import extract_text
  14. from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url
  15. from searx.url_utils import urlencode
  16. from searx.utils import match_language, eval_xpath
  17. from json import loads
  18. from lxml.html import fromstring
  19. from lxml import etree
  20. logger = logger.getChild('wikidata')
  21. result_count = 1
  22. # urls
  23. wikidata_host = 'https://www.wikidata.org'
  24. url_search = wikidata_host \
  25. + '/w/index.php?{query}&ns0=1'
  26. wikidata_api = wikidata_host + '/w/api.php'
  27. url_detail = wikidata_api\
  28. + '?action=parse&format=json&{query}'\
  29. + '&redirects=1&prop=text%7Cdisplaytitle%7Cparsewarnings'\
  30. + '&disableeditsection=1&preview=1&sectionpreview=1&disabletoc=1&utf8=1&formatversion=2'
  31. url_map = 'https://www.openstreetmap.org/'\
  32. + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
  33. url_image = 'https://commons.wikimedia.org/wiki/Special:FilePath/{filename}?width=500&height=400'
  34. # xpaths
  35. div_ids_xpath = '//div[@id]'
  36. wikidata_ids_xpath = '//ul[@class="mw-search-results"]/li//a/@href'
  37. title_xpath = '//*[contains(@class,"wikibase-title-label")]'
  38. description_xpath = '//div[contains(@class,"wikibase-entitytermsview-heading-description")]'
  39. label_xpath = './/div[contains(@class,"wikibase-statementgroupview-property-label")]/a'
  40. url_xpath = './/a[contains(@class,"external free") or contains(@class, "wb-external-id")]'
  41. wikilink_xpath = './/ul[contains(@class,"wikibase-sitelinklistview-listview")]'\
  42. + '/li[contains(@data-wb-siteid,"{wikiid}")]//a/@href'
  43. property_row_xpath = './/div[contains(@class,"wikibase-statementview")]'
  44. preferred_rank_xpath = './/span[contains(@class,"wikibase-rankselector-preferred")]'
  45. value_xpath = './/div[contains(@class,"wikibase-statementview-mainsnak")]'\
  46. + '/*/div[contains(@class,"wikibase-snakview-value")]'
  47. language_fallback_xpath = '//sup[contains(@class,"wb-language-fallback-indicator")]'
  48. calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]'
  49. media_xpath = value_xpath + '//div[contains(@class,"commons-media-caption")]//a'
  50. def get_id_cache(result):
  51. id_cache = {}
  52. for e in eval_xpath(result, div_ids_xpath):
  53. id = e.get('id')
  54. if id.startswith('P'):
  55. id_cache[id] = e
  56. return id_cache
  57. def request(query, params):
  58. params['url'] = url_search.format(
  59. query=urlencode({'search': query}))
  60. return params
  61. def response(resp):
  62. results = []
  63. htmlparser = etree.HTMLParser()
  64. html = fromstring(resp.content.decode("utf-8"), parser=htmlparser)
  65. search_results = eval_xpath(html, wikidata_ids_xpath)
  66. if resp.search_params['language'].split('-')[0] == 'all':
  67. language = 'en'
  68. else:
  69. language = match_language(resp.search_params['language'], supported_languages, language_aliases).split('-')[0]
  70. # TODO: make requests asynchronous to avoid timeout when result_count > 1
  71. for search_result in search_results[:result_count]:
  72. wikidata_id = search_result.split('/')[-1]
  73. url = url_detail.format(query=urlencode({'page': wikidata_id, 'uselang': language}))
  74. htmlresponse = get(url)
  75. jsonresponse = loads(htmlresponse.content.decode("utf-8"))
  76. results += getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'], htmlparser)
  77. return results
  78. def getDetail(jsonresponse, wikidata_id, language, locale, htmlparser):
  79. results = []
  80. urls = []
  81. attributes = []
  82. title = jsonresponse.get('parse', {}).get('displaytitle', {})
  83. result = jsonresponse.get('parse', {}).get('text', {})
  84. if not title or not result:
  85. return results
  86. title = fromstring(title, parser=htmlparser)
  87. for elem in eval_xpath(title, language_fallback_xpath):
  88. elem.getparent().remove(elem)
  89. title = extract_text(eval_xpath(title, title_xpath))
  90. result = fromstring(result, parser=htmlparser)
  91. for elem in eval_xpath(result, language_fallback_xpath):
  92. elem.getparent().remove(elem)
  93. description = extract_text(eval_xpath(result, description_xpath))
  94. id_cache = get_id_cache(result)
  95. # URLS
  96. # official website
  97. add_url(urls, result, id_cache, 'P856', results=results)
  98. # wikipedia
  99. wikipedia_link_count = 0
  100. wikipedia_link = get_wikilink(result, language + 'wiki')
  101. if wikipedia_link:
  102. wikipedia_link_count += 1
  103. urls.append({'title': 'Wikipedia (' + language + ')',
  104. 'url': wikipedia_link})
  105. if language != 'en':
  106. wikipedia_en_link = get_wikilink(result, 'enwiki')
  107. if wikipedia_en_link:
  108. wikipedia_link_count += 1
  109. urls.append({'title': 'Wikipedia (en)',
  110. 'url': wikipedia_en_link})
  111. # TODO: get_wiki_firstlanguage
  112. # if wikipedia_link_count == 0:
  113. # more wikis
  114. add_url(urls, result, id_cache, default_label='Wikivoyage (' + language + ')', link_type=language + 'wikivoyage')
  115. add_url(urls, result, id_cache, default_label='Wikiquote (' + language + ')', link_type=language + 'wikiquote')
  116. add_url(urls, result, id_cache, default_label='Wikimedia Commons', link_type='commonswiki')
  117. add_url(urls, result, id_cache, 'P625', 'OpenStreetMap', link_type='geo')
  118. # musicbrainz
  119. add_url(urls, result, id_cache, 'P434', 'MusicBrainz', 'http://musicbrainz.org/artist/')
  120. add_url(urls, result, id_cache, 'P435', 'MusicBrainz', 'http://musicbrainz.org/work/')
  121. add_url(urls, result, id_cache, 'P436', 'MusicBrainz', 'http://musicbrainz.org/release-group/')
  122. add_url(urls, result, id_cache, 'P966', 'MusicBrainz', 'http://musicbrainz.org/label/')
  123. # IMDb
  124. add_url(urls, result, id_cache, 'P345', 'IMDb', 'https://www.imdb.com/', link_type='imdb')
  125. # source code repository
  126. add_url(urls, result, id_cache, 'P1324')
  127. # blog
  128. add_url(urls, result, id_cache, 'P1581')
  129. # social media links
  130. add_url(urls, result, id_cache, 'P2397', 'YouTube', 'https://www.youtube.com/channel/')
  131. add_url(urls, result, id_cache, 'P1651', 'YouTube', 'https://www.youtube.com/watch?v=')
  132. add_url(urls, result, id_cache, 'P2002', 'Twitter', 'https://twitter.com/')
  133. add_url(urls, result, id_cache, 'P2013', 'Facebook', 'https://facebook.com/')
  134. add_url(urls, result, id_cache, 'P2003', 'Instagram', 'https://instagram.com/')
  135. urls.append({'title': 'Wikidata',
  136. 'url': 'https://www.wikidata.org/wiki/'
  137. + wikidata_id + '?uselang=' + language})
  138. # INFOBOX ATTRIBUTES (ROWS)
  139. # DATES
  140. # inception date
  141. add_attribute(attributes, id_cache, 'P571', date=True)
  142. # dissolution date
  143. add_attribute(attributes, id_cache, 'P576', date=True)
  144. # start date
  145. add_attribute(attributes, id_cache, 'P580', date=True)
  146. # end date
  147. add_attribute(attributes, id_cache, 'P582', date=True)
  148. # date of birth
  149. add_attribute(attributes, id_cache, 'P569', date=True)
  150. # date of death
  151. add_attribute(attributes, id_cache, 'P570', date=True)
  152. # date of spacecraft launch
  153. add_attribute(attributes, id_cache, 'P619', date=True)
  154. # date of spacecraft landing
  155. add_attribute(attributes, id_cache, 'P620', date=True)
  156. # nationality
  157. add_attribute(attributes, id_cache, 'P27')
  158. # country of origin
  159. add_attribute(attributes, id_cache, 'P495')
  160. # country
  161. add_attribute(attributes, id_cache, 'P17')
  162. # headquarters
  163. add_attribute(attributes, id_cache, 'Q180')
  164. # PLACES
  165. # capital
  166. add_attribute(attributes, id_cache, 'P36', trim=True)
  167. # head of state
  168. add_attribute(attributes, id_cache, 'P35', trim=True)
  169. # head of government
  170. add_attribute(attributes, id_cache, 'P6', trim=True)
  171. # type of government
  172. add_attribute(attributes, id_cache, 'P122')
  173. # official language
  174. add_attribute(attributes, id_cache, 'P37')
  175. # population
  176. add_attribute(attributes, id_cache, 'P1082', trim=True)
  177. # area
  178. add_attribute(attributes, id_cache, 'P2046')
  179. # currency
  180. add_attribute(attributes, id_cache, 'P38', trim=True)
  181. # heigth (building)
  182. add_attribute(attributes, id_cache, 'P2048')
  183. # MEDIA
  184. # platform (videogames)
  185. add_attribute(attributes, id_cache, 'P400')
  186. # author
  187. add_attribute(attributes, id_cache, 'P50')
  188. # creator
  189. add_attribute(attributes, id_cache, 'P170')
  190. # director
  191. add_attribute(attributes, id_cache, 'P57')
  192. # performer
  193. add_attribute(attributes, id_cache, 'P175')
  194. # developer
  195. add_attribute(attributes, id_cache, 'P178')
  196. # producer
  197. add_attribute(attributes, id_cache, 'P162')
  198. # manufacturer
  199. add_attribute(attributes, id_cache, 'P176')
  200. # screenwriter
  201. add_attribute(attributes, id_cache, 'P58')
  202. # production company
  203. add_attribute(attributes, id_cache, 'P272')
  204. # record label
  205. add_attribute(attributes, id_cache, 'P264')
  206. # publisher
  207. add_attribute(attributes, id_cache, 'P123')
  208. # original network
  209. add_attribute(attributes, id_cache, 'P449')
  210. # distributor
  211. add_attribute(attributes, id_cache, 'P750')
  212. # composer
  213. add_attribute(attributes, id_cache, 'P86')
  214. # publication date
  215. add_attribute(attributes, id_cache, 'P577', date=True)
  216. # genre
  217. add_attribute(attributes, id_cache, 'P136')
  218. # original language
  219. add_attribute(attributes, id_cache, 'P364')
  220. # isbn
  221. add_attribute(attributes, id_cache, 'Q33057')
  222. # software license
  223. add_attribute(attributes, id_cache, 'P275')
  224. # programming language
  225. add_attribute(attributes, id_cache, 'P277')
  226. # version
  227. add_attribute(attributes, id_cache, 'P348', trim=True)
  228. # narrative location
  229. add_attribute(attributes, id_cache, 'P840')
  230. # LANGUAGES
  231. # number of speakers
  232. add_attribute(attributes, id_cache, 'P1098')
  233. # writing system
  234. add_attribute(attributes, id_cache, 'P282')
  235. # regulatory body
  236. add_attribute(attributes, id_cache, 'P1018')
  237. # language code
  238. add_attribute(attributes, id_cache, 'P218')
  239. # OTHER
  240. # ceo
  241. add_attribute(attributes, id_cache, 'P169', trim=True)
  242. # founder
  243. add_attribute(attributes, id_cache, 'P112')
  244. # legal form (company/organization)
  245. add_attribute(attributes, id_cache, 'P1454')
  246. # operator
  247. add_attribute(attributes, id_cache, 'P137')
  248. # crew members (tripulation)
  249. add_attribute(attributes, id_cache, 'P1029')
  250. # taxon
  251. add_attribute(attributes, id_cache, 'P225')
  252. # chemical formula
  253. add_attribute(attributes, id_cache, 'P274')
  254. # winner (sports/contests)
  255. add_attribute(attributes, id_cache, 'P1346')
  256. # number of deaths
  257. add_attribute(attributes, id_cache, 'P1120')
  258. # currency code
  259. add_attribute(attributes, id_cache, 'P498')
  260. image = add_image(id_cache)
  261. if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
  262. results.append({
  263. 'url': urls[0]['url'],
  264. 'title': title,
  265. 'content': description
  266. })
  267. else:
  268. results.append({
  269. 'infobox': title,
  270. 'id': wikipedia_link,
  271. 'content': description,
  272. 'img_src': image,
  273. 'attributes': attributes,
  274. 'urls': urls
  275. })
  276. return results
  277. # only returns first match
  278. def add_image(id_cache):
  279. # P15: route map, P242: locator map, P154: logo, P18: image, P242: map, P41: flag, P2716: collage, P2910: icon
  280. property_ids = ['P15', 'P242', 'P154', 'P18', 'P242', 'P41', 'P2716', 'P2910']
  281. for property_id in property_ids:
  282. image = id_cache.get(property_id, None)
  283. if image is not None:
  284. image_name = eval_xpath(image, media_xpath)
  285. image_src = url_image.replace('{filename}', extract_text(image_name[0]))
  286. return image_src
  287. # setting trim will only returned high ranked rows OR the first row
  288. def add_attribute(attributes, id_cache, property_id, default_label=None, date=False, trim=False):
  289. attribute = id_cache.get(property_id, None)
  290. if attribute is not None:
  291. if default_label:
  292. label = default_label
  293. else:
  294. label = extract_text(eval_xpath(attribute, label_xpath))
  295. label = label[0].upper() + label[1:]
  296. if date:
  297. trim = True
  298. # remove calendar name
  299. calendar_name = eval_xpath(attribute, calendar_name_xpath)
  300. for calendar in calendar_name:
  301. calendar.getparent().remove(calendar)
  302. concat_values = ""
  303. values = []
  304. first_value = None
  305. for row in eval_xpath(attribute, property_row_xpath):
  306. if not first_value or not trim or eval_xpath(row, preferred_rank_xpath):
  307. value = eval_xpath(row, value_xpath)
  308. if not value:
  309. continue
  310. value = extract_text(value)
  311. # save first value in case no ranked row is found
  312. if trim and not first_value:
  313. first_value = value
  314. else:
  315. # to avoid duplicate values
  316. if value not in values:
  317. concat_values += value + ", "
  318. values.append(value)
  319. if trim and not values:
  320. attributes.append({'label': label,
  321. 'value': first_value})
  322. else:
  323. attributes.append({'label': label,
  324. 'value': concat_values[:-2]})
  325. # requires property_id unless it's a wiki link (defined in link_type)
  326. def add_url(urls, result, id_cache, property_id=None, default_label=None, url_prefix=None, results=None,
  327. link_type=None):
  328. links = []
  329. # wiki links don't have property in wikidata page
  330. if link_type and 'wiki' in link_type:
  331. links.append(get_wikilink(result, link_type))
  332. else:
  333. dom_element = id_cache.get(property_id, None)
  334. if dom_element is not None:
  335. if not default_label:
  336. label = extract_text(eval_xpath(dom_element, label_xpath))
  337. label = label[0].upper() + label[1:]
  338. if link_type == 'geo':
  339. links.append(get_geolink(dom_element))
  340. elif link_type == 'imdb':
  341. links.append(get_imdblink(dom_element, url_prefix))
  342. else:
  343. url_results = eval_xpath(dom_element, url_xpath)
  344. for link in url_results:
  345. if link is not None:
  346. if url_prefix:
  347. link = url_prefix + extract_text(link)
  348. else:
  349. link = extract_text(link)
  350. links.append(link)
  351. # append urls
  352. for url in links:
  353. if url is not None:
  354. u = {'title': default_label or label, 'url': url}
  355. if property_id == 'P856':
  356. u['official'] = True
  357. u['domain'] = url.split('/')[2]
  358. urls.append(u)
  359. if results is not None:
  360. results.append(u)
  361. def get_imdblink(result, url_prefix):
  362. imdb_id = eval_xpath(result, value_xpath)
  363. if imdb_id:
  364. imdb_id = extract_text(imdb_id)
  365. id_prefix = imdb_id[:2]
  366. if id_prefix == 'tt':
  367. url = url_prefix + 'title/' + imdb_id
  368. elif id_prefix == 'nm':
  369. url = url_prefix + 'name/' + imdb_id
  370. elif id_prefix == 'ch':
  371. url = url_prefix + 'character/' + imdb_id
  372. elif id_prefix == 'co':
  373. url = url_prefix + 'company/' + imdb_id
  374. elif id_prefix == 'ev':
  375. url = url_prefix + 'event/' + imdb_id
  376. else:
  377. url = None
  378. return url
  379. def get_geolink(result):
  380. coordinates = eval_xpath(result, value_xpath)
  381. if not coordinates:
  382. return None
  383. coordinates = extract_text(coordinates[0])
  384. latitude, longitude = coordinates.split(',')
  385. # convert to decimal
  386. lat = int(latitude[:latitude.find(u'°')])
  387. if latitude.find('\'') >= 0:
  388. lat += int(latitude[latitude.find(u'°') + 1:latitude.find('\'')] or 0) / 60.0
  389. if latitude.find('"') >= 0:
  390. lat += float(latitude[latitude.find('\'') + 1:latitude.find('"')] or 0) / 3600.0
  391. if latitude.find('S') >= 0:
  392. lat *= -1
  393. lon = int(longitude[:longitude.find(u'°')])
  394. if longitude.find('\'') >= 0:
  395. lon += int(longitude[longitude.find(u'°') + 1:longitude.find('\'')] or 0) / 60.0
  396. if longitude.find('"') >= 0:
  397. lon += float(longitude[longitude.find('\'') + 1:longitude.find('"')] or 0) / 3600.0
  398. if longitude.find('W') >= 0:
  399. lon *= -1
  400. # TODO: get precision
  401. precision = 0.0002
  402. # there is no zoom information, deduce from precision (error prone)
  403. # samples :
  404. # 13 --> 5
  405. # 1 --> 6
  406. # 0.016666666666667 --> 9
  407. # 0.00027777777777778 --> 19
  408. # wolframalpha :
  409. # quadratic fit { {13, 5}, {1, 6}, {0.0166666, 9}, {0.0002777777,19}}
  410. # 14.1186-8.8322 x+0.625447 x^2
  411. if precision < 0.0003:
  412. zoom = 19
  413. else:
  414. zoom = int(15 - precision * 8.8322 + precision * precision * 0.625447)
  415. url = url_map\
  416. .replace('{latitude}', str(lat))\
  417. .replace('{longitude}', str(lon))\
  418. .replace('{zoom}', str(zoom))
  419. return url
  420. def get_wikilink(result, wikiid):
  421. url = eval_xpath(result, wikilink_xpath.replace('{wikiid}', wikiid))
  422. if not url:
  423. return None
  424. url = url[0]
  425. if url.startswith('http://'):
  426. url = url.replace('http://', 'https://')
  427. elif url.startswith('//'):
  428. url = 'https:' + url
  429. return url