summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/__init__.py12
-rw-r--r--searx/engines/currency_convert.py2
-rw-r--r--searx/engines/deviantart.py20
-rw-r--r--searx/engines/duckduckgo.py9
-rw-r--r--searx/engines/google.py12
-rw-r--r--searx/engines/google_images.py8
-rw-r--r--searx/engines/json_engine.py16
-rw-r--r--searx/engines/startpage.py10
-rw-r--r--searx/engines/swisscows.py8
-rw-r--r--searx/engines/tokyotoshokan.py2
-rw-r--r--searx/engines/wikidata.py624
-rw-r--r--searx/engines/wikipedia.py3
-rw-r--r--searx/engines/www500px.py2
-rw-r--r--searx/engines/yahoo.py34
-rw-r--r--searx/engines/yahoo_news.py2
15 files changed, 498 insertions, 266 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 099baa587..782b622b0 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -42,7 +42,8 @@ engine_default_args = {'paging': False,
'shortcut': '-',
'disabled': False,
'suspend_end_time': 0,
- 'continuous_errors': 0}
+ 'continuous_errors': 0,
+ 'time_range_support': False}
def load_module(filename):
@@ -57,7 +58,11 @@ def load_module(filename):
def load_engine(engine_data):
engine_name = engine_data['engine']
- engine = load_module(engine_name + '.py')
+ try:
+ engine = load_module(engine_name + '.py')
+ except:
+ logger.exception('Cannot load engine "{}"'.format(engine_name))
+ return None
for param_name in engine_data:
if param_name == 'engine':
@@ -199,4 +204,5 @@ if 'engines' not in settings or not settings['engines']:
for engine_data in settings['engines']:
engine = load_engine(engine_data)
- engines[engine.name] = engine
+ if engine is not None:
+ engines[engine.name] = engine
diff --git a/searx/engines/currency_convert.py b/searx/engines/currency_convert.py
index b0ffb490a..bc839cfb5 100644
--- a/searx/engines/currency_convert.py
+++ b/searx/engines/currency_convert.py
@@ -9,7 +9,7 @@ categories = []
url = 'https://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X'
weight = 100
-parser_re = re.compile(u'.*?(\d+(?:\.\d+)?) ([^.0-9]+) (?:in|to) ([^.0-9]+)', re.I) # noqa
+parser_re = re.compile(u'.*?(\\d+(?:\\.\\d+)?) ([^.0-9]+) (?:in|to) ([^.0-9]+)', re.I) # noqa
db = 1
diff --git a/searx/engines/deviantart.py b/searx/engines/deviantart.py
index 135aeb324..d893fc7fe 100644
--- a/searx/engines/deviantart.py
+++ b/searx/engines/deviantart.py
@@ -13,7 +13,6 @@
"""
from urllib import urlencode
-from urlparse import urljoin
from lxml import html
import re
from searx.engines.xpath import extract_text
@@ -21,10 +20,16 @@ from searx.engines.xpath import extract_text
# engine dependent config
categories = ['images']
paging = True
+time_range_support = True
# search-url
base_url = 'https://www.deviantart.com/'
search_url = base_url + 'browse/all/?offset={offset}&{query}'
+time_range_url = '&order={range}'
+
+time_range_dict = {'day': 11,
+ 'week': 14,
+ 'month': 15}
# do search-request
@@ -33,6 +38,8 @@ def request(query, params):
params['url'] = search_url.format(offset=offset,
query=urlencode({'q': query}))
+ if params['time_range'] in time_range_dict:
+ params['url'] += time_range_url.format(range=time_range_dict[params['time_range']])
return params
@@ -47,14 +54,13 @@ def response(resp):
dom = html.fromstring(resp.text)
- regex = re.compile('\/200H\/')
+ regex = re.compile(r'\/200H\/')
# parse results
- for result in dom.xpath('//div[contains(@class, "tt-a tt-fh")]'):
- link = result.xpath('.//a[contains(@class, "thumb")]')[0]
- url = urljoin(base_url, link.attrib.get('href'))
- title_links = result.xpath('.//span[@class="details"]//a[contains(@class, "t")]')
- title = extract_text(title_links[0])
+ for result in dom.xpath('.//span[@class="thumb wide"]'):
+ link = result.xpath('.//a[@class="torpedo-thumb-link"]')[0]
+ url = link.attrib.get('href')
+ title = extract_text(result.xpath('.//span[@class="title"]'))
thumbnail_src = link.xpath('.//img')[0].attrib.get('src')
img_src = regex.sub('/', thumbnail_src)
diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py
index d29e4416a..2153492e9 100644
--- a/searx/engines/duckduckgo.py
+++ b/searx/engines/duckduckgo.py
@@ -22,9 +22,15 @@ from searx.languages import language_codes
categories = ['general']
paging = True
language_support = True
+time_range_support = True
# search-url
url = 'https://duckduckgo.com/html?{query}&s={offset}'
+time_range_url = '&df={range}'
+
+time_range_dict = {'day': 'd',
+ 'week': 'w',
+ 'month': 'm'}
# specific xpath variables
result_xpath = '//div[@class="result results_links results_links_deep web-result "]' # noqa
@@ -61,6 +67,9 @@ def request(query, params):
params['url'] = url.format(
query=urlencode({'q': query}), offset=offset)
+ if params['time_range'] in time_range_dict:
+ params['url'] += time_range_url.format(range=time_range_dict[params['time_range']])
+
return params
diff --git a/searx/engines/google.py b/searx/engines/google.py
index 6018ad1b2..ea93bc94f 100644
--- a/searx/engines/google.py
+++ b/searx/engines/google.py
@@ -24,6 +24,7 @@ categories = ['general']
paging = True
language_support = True
use_locale_domain = True
+time_range_support = True
# based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
default_hostname = 'www.google.com'
@@ -92,6 +93,11 @@ search_url = ('https://{hostname}' +
search_path +
'?{query}&start={offset}&gws_rd=cr&gbv=1&lr={lang}&ei=x')
+time_range_search = "&tbs=qdr:{range}"
+time_range_dict = {'day': 'd',
+ 'week': 'w',
+ 'month': 'm'}
+
# other URLs
map_hostname_start = 'maps.google.'
maps_path = '/maps'
@@ -179,6 +185,8 @@ def request(query, params):
query=urlencode({'q': query}),
hostname=google_hostname,
lang=url_lang)
+ if params['time_range'] in time_range_dict:
+ params['url'] += time_range_search.format(range=time_range_dict[params['time_range']])
params['headers']['Accept-Language'] = language
params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'
@@ -300,9 +308,9 @@ def parse_map_detail(parsed_url, result, google_hostname):
results = []
# try to parse the geoloc
- m = re.search('@([0-9\.]+),([0-9\.]+),([0-9]+)', parsed_url.path)
+ m = re.search(r'@([0-9\.]+),([0-9\.]+),([0-9]+)', parsed_url.path)
if m is None:
- m = re.search('ll\=([0-9\.]+),([0-9\.]+)\&z\=([0-9]+)', parsed_url.query)
+ m = re.search(r'll\=([0-9\.]+),([0-9\.]+)\&z\=([0-9]+)', parsed_url.query)
if m is not None:
# geoloc found (ignored)
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index efe46812a..090d44704 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -11,7 +11,6 @@
"""
from urllib import urlencode
-from urlparse import parse_qs
from json import loads
from lxml import html
@@ -19,12 +18,17 @@ from lxml import html
categories = ['images']
paging = True
safesearch = True
+time_range_support = True
search_url = 'https://www.google.com/search'\
'?{query}'\
'&tbm=isch'\
'&ijn=1'\
'&start={offset}'
+time_range_search = "&tbs=qdr:{range}"
+time_range_dict = {'day': 'd',
+ 'week': 'w',
+ 'month': 'm'}
# do search-request
@@ -34,6 +38,8 @@ def request(query, params):
params['url'] = search_url.format(query=urlencode({'q': query}),
offset=offset,
safesearch=safesearch)
+ if params['time_range'] in time_range_dict:
+ params['url'] += time_range_search.format(range=time_range_dict[params['time_range']])
if safesearch and params['safesearch']:
params['url'] += '&' + urlencode({'safe': 'active'})
diff --git a/searx/engines/json_engine.py b/searx/engines/json_engine.py
index 5525b7f7e..cd5e3a7e1 100644
--- a/searx/engines/json_engine.py
+++ b/searx/engines/json_engine.py
@@ -8,6 +8,14 @@ content_query = None
title_query = None
# suggestion_xpath = ''
+# parameters for engines with paging support
+#
+# number of results on each page
+# (only needed if the site requires not a page number, but an offset)
+page_size = 1
+# number of the first page (usually 0 or 1)
+first_page_num = 1
+
def iterate(iterable):
if type(iterable) == dict:
@@ -69,8 +77,14 @@ def query(data, query_string):
def request(query, params):
query = urlencode({'q': query})[2:]
- params['url'] = search_url.format(query=query)
+
+ fp = {'query': query}
+ if paging and search_url.find('{pageno}') >= 0:
+ fp['pageno'] = (params['pageno'] + first_page_num - 1) * page_size
+
+ params['url'] = search_url.format(**fp)
params['query'] = query
+
return params
diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py
index 52dd0b92f..d8b702c4d 100644
--- a/searx/engines/startpage.py
+++ b/searx/engines/startpage.py
@@ -68,15 +68,15 @@ def response(resp):
url = link.attrib.get('href')
# block google-ad url's
- if re.match("^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
+ if re.match(r"^http(s|)://(www\.)?google\.[a-z]+/aclk.*$", url):
continue
# block startpage search url's
- if re.match("^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
+ if re.match(r"^http(s|)://(www\.)?startpage\.com/do/search\?.*$", url):
continue
# block ixquick search url's
- if re.match("^http(s|)://(www\.)?ixquick\.com/do/search\?.*$", url):
+ if re.match(r"^http(s|)://(www\.)?ixquick\.com/do/search\?.*$", url):
continue
title = escape(extract_text(link))
@@ -89,7 +89,7 @@ def response(resp):
published_date = None
# check if search result starts with something like: "2 Sep 2014 ... "
- if re.match("^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
+ if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
date_pos = content.find('...') + 4
date_string = content[0:date_pos - 5]
published_date = parser.parse(date_string, dayfirst=True)
@@ -98,7 +98,7 @@ def response(resp):
content = content[date_pos:]
# check if search result starts with something like: "5 days ago ... "
- elif re.match("^[0-9]+ days? ago \.\.\. ", content):
+ elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
date_pos = content.find('...') + 4
date_string = content[0:date_pos - 5]
diff --git a/searx/engines/swisscows.py b/searx/engines/swisscows.py
index 864436a52..1a94ed64e 100644
--- a/searx/engines/swisscows.py
+++ b/searx/engines/swisscows.py
@@ -25,10 +25,10 @@ base_url = 'https://swisscows.ch/'
search_string = '?{query}&page={page}'
# regex
-regex_json = re.compile('initialData: {"Request":(.|\n)*},\s*environment')
-regex_json_remove_start = re.compile('^initialData:\s*')
-regex_json_remove_end = re.compile(',\s*environment$')
-regex_img_url_remove_start = re.compile('^https?://i\.swisscows\.ch/\?link=')
+regex_json = re.compile(r'initialData: {"Request":(.|\n)*},\s*environment')
+regex_json_remove_start = re.compile(r'^initialData:\s*')
+regex_json_remove_end = re.compile(r',\s*environment$')
+regex_img_url_remove_start = re.compile(r'^https?://i\.swisscows\.ch/\?link=')
# do search-request
diff --git a/searx/engines/tokyotoshokan.py b/searx/engines/tokyotoshokan.py
index 17e8e2191..e2990e153 100644
--- a/searx/engines/tokyotoshokan.py
+++ b/searx/engines/tokyotoshokan.py
@@ -48,7 +48,7 @@ def response(resp):
return []
# regular expression for parsing torrent size strings
- size_re = re.compile('Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE)
+ size_re = re.compile(r'Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE)
# processing the results, two rows at a time
for i in xrange(0, len(rows), 2):
diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
index 8aa2fcd5c..91040e218 100644
--- a/searx/engines/wikidata.py
+++ b/searx/engines/wikidata.py
@@ -1,56 +1,86 @@
-import json
+# -*- coding: utf-8 -*-
+"""
+ Wikidata
+
+ @website https://wikidata.org
+ @provide-api yes (https://wikidata.org/w/api.php)
+
+ @using-api partially (most things require scraping)
+ @results JSON, HTML
+ @stable no (html can change)
+ @parse url, infobox
+"""
from searx import logger
from searx.poolrequests import get
-from searx.utils import format_date_by_locale
+from searx.engines.xpath import extract_text
-from datetime import datetime
-from dateutil.parser import parse as dateutil_parse
+from json import loads
+from lxml.html import fromstring
from urllib import urlencode
-
logger = logger.getChild('wikidata')
result_count = 1
+
+# urls
wikidata_host = 'https://www.wikidata.org'
+url_search = wikidata_host \
+ + '/wiki/Special:ItemDisambiguation?{query}'
+
wikidata_api = wikidata_host + '/w/api.php'
-url_search = wikidata_api \
- + '?action=query&list=search&format=json'\
- + '&srnamespace=0&srprop=sectiontitle&{query}'
url_detail = wikidata_api\
- + '?action=wbgetentities&format=json'\
- + '&props=labels%7Cinfo%7Csitelinks'\
- + '%7Csitelinks%2Furls%7Cdescriptions%7Cclaims'\
- + '&{query}'
+ + '?action=parse&format=json&{query}'\
+ + '&redirects=1&prop=text%7Cdisplaytitle%7Clanglinks%7Crevid'\
+ + '&disableeditsection=1&disabletidy=1&preview=1&sectionpreview=1&disabletoc=1&utf8=1&formatversion=2'
+
url_map = 'https://www.openstreetmap.org/'\
+ '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
+url_image = 'https://commons.wikimedia.org/wiki/Special:FilePath/{filename}?width=500&height=400'
+
+# xpaths
+wikidata_ids_xpath = '//div/ul[@class="wikibase-disambiguation"]/li/a/@title'
+title_xpath = '//*[contains(@class,"wikibase-title-label")]'
+description_xpath = '//div[contains(@class,"wikibase-entitytermsview-heading-description")]'
+property_xpath = '//div[@id="{propertyid}"]'
+label_xpath = './/div[contains(@class,"wikibase-statementgroupview-property-label")]/a'
+url_xpath = './/a[contains(@class,"external free") or contains(@class, "wb-external-id")]'
+wikilink_xpath = './/ul[contains(@class,"wikibase-sitelinklistview-listview")]'\
+ + '/li[contains(@data-wb-siteid,"{wikiid}")]//a/@href'
+property_row_xpath = './/div[contains(@class,"wikibase-statementview")]'
+preferred_rank_xpath = './/span[contains(@class,"wikibase-rankselector-preferred")]'
+value_xpath = './/div[contains(@class,"wikibase-statementview-mainsnak")]'\
+ + '/*/div[contains(@class,"wikibase-snakview-value")]'
+language_fallback_xpath = '//sup[contains(@class,"wb-language-fallback-indicator")]'
+calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]'
def request(query, params):
+ language = params['language'].split('_')[0]
+ if language == 'all':
+ language = 'en'
+
params['url'] = url_search.format(
- query=urlencode({'srsearch': query,
- 'srlimit': result_count}))
+ query=urlencode({'label': query,
+ 'language': language}))
return params
def response(resp):
results = []
- search_res = json.loads(resp.text)
-
- wikidata_ids = set()
- for r in search_res.get('query', {}).get('search', {}):
- wikidata_ids.add(r.get('title', ''))
+ html = fromstring(resp.content)
+ wikidata_ids = html.xpath(wikidata_ids_xpath)
language = resp.search_params['language'].split('_')[0]
if language == 'all':
language = 'en'
- url = url_detail.format(query=urlencode({'ids': '|'.join(wikidata_ids),
- 'languages': language + '|en'}))
-
- htmlresponse = get(url)
- jsonresponse = json.loads(htmlresponse.content)
- for wikidata_id in wikidata_ids:
- results = results + getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'])
+ # TODO: make requests asynchronous to avoid timeout when result_count > 1
+ for wikidata_id in wikidata_ids[:result_count]:
+ url = url_detail.format(query=urlencode({'page': wikidata_id,
+ 'uselang': language}))
+ htmlresponse = get(url)
+ jsonresponse = loads(htmlresponse.content)
+ results += getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'])
return results
@@ -60,124 +90,206 @@ def getDetail(jsonresponse, wikidata_id, language, locale):
urls = []
attributes = []
- result = jsonresponse.get('entities', {}).get(wikidata_id, {})
+ title = jsonresponse.get('parse', {}).get('displaytitle', {})
+ result = jsonresponse.get('parse', {}).get('text', {})
- title = result.get('labels', {}).get(language, {}).get('value', None)
- if title is None:
- title = result.get('labels', {}).get('en', {}).get('value', None)
- if title is None:
+ if not title or not result:
return results
- description = result\
- .get('descriptions', {})\
- .get(language, {})\
- .get('value', None)
+ title = fromstring(title)
+ for elem in title.xpath(language_fallback_xpath):
+ elem.getparent().remove(elem)
+ title = extract_text(title.xpath(title_xpath))
- if description is None:
- description = result\
- .get('descriptions', {})\
- .get('en', {})\
- .get('value', '')
+ result = fromstring(result)
+ for elem in result.xpath(language_fallback_xpath):
+ elem.getparent().remove(elem)
- claims = result.get('claims', {})
- official_website = get_string(claims, 'P856', None)
- if official_website is not None:
- urls.append({'title': 'Official site', 'url': official_website})
- results.append({'title': title, 'url': official_website})
+ description = extract_text(result.xpath(description_xpath))
- wikipedia_link_count = 0
- wikipedia_link = get_wikilink(result, language + 'wiki')
- wikipedia_link_count += add_url(urls,
- 'Wikipedia (' + language + ')',
- wikipedia_link)
- if language != 'en':
- wikipedia_en_link = get_wikilink(result, 'enwiki')
- wikipedia_link_count += add_url(urls,
- 'Wikipedia (en)',
- wikipedia_en_link)
- if wikipedia_link_count == 0:
- misc_language = get_wiki_firstlanguage(result, 'wiki')
- if misc_language is not None:
- add_url(urls,
- 'Wikipedia (' + misc_language + ')',
- get_wikilink(result, misc_language + 'wiki'))
+ # URLS
- if language != 'en':
- add_url(urls,
- 'Wiki voyage (' + language + ')',
- get_wikilink(result, language + 'wikivoyage'))
+ # official website
+ add_url(urls, result, 'P856', results=results)
- add_url(urls,
- 'Wiki voyage (en)',
- get_wikilink(result, 'enwikivoyage'))
+ # wikipedia
+ wikipedia_link_count = 0
+ wikipedia_link = get_wikilink(result, language + 'wiki')
+ if wikipedia_link:
+ wikipedia_link_count += 1
+ urls.append({'title': 'Wikipedia (' + language + ')',
+ 'url': wikipedia_link})
if language != 'en':
- add_url(urls,
- 'Wikiquote (' + language + ')',
- get_wikilink(result, language + 'wikiquote'))
-
- add_url(urls,
- 'Wikiquote (en)',
- get_wikilink(result, 'enwikiquote'))
-
- add_url(urls,
- 'Commons wiki',
- get_wikilink(result, 'commonswiki'))
-
- add_url(urls,
- 'Location',
- get_geolink(claims, 'P625', None))
-
- add_url(urls,
- 'Wikidata',
- 'https://www.wikidata.org/wiki/'
- + wikidata_id + '?uselang=' + language)
-
- musicbrainz_work_id = get_string(claims, 'P435')
- if musicbrainz_work_id is not None:
- add_url(urls,
- 'MusicBrainz',
- 'http://musicbrainz.org/work/'
- + musicbrainz_work_id)
-
- musicbrainz_artist_id = get_string(claims, 'P434')
- if musicbrainz_artist_id is not None:
- add_url(urls,
- 'MusicBrainz',
- 'http://musicbrainz.org/artist/'
- + musicbrainz_artist_id)
-
- musicbrainz_release_group_id = get_string(claims, 'P436')
- if musicbrainz_release_group_id is not None:
- add_url(urls,
- 'MusicBrainz',
- 'http://musicbrainz.org/release-group/'
- + musicbrainz_release_group_id)
-
- musicbrainz_label_id = get_string(claims, 'P966')
- if musicbrainz_label_id is not None:
- add_url(urls,
- 'MusicBrainz',
- 'http://musicbrainz.org/label/'
- + musicbrainz_label_id)
-
- # musicbrainz_area_id = get_string(claims, 'P982')
- # P1407 MusicBrainz series ID
- # P1004 MusicBrainz place ID
- # P1330 MusicBrainz instrument ID
- # P1407 MusicBrainz series ID
-
- postal_code = get_string(claims, 'P281', None)
- if postal_code is not None:
- attributes.append({'label': 'Postal code(s)', 'value': postal_code})
-
- date_of_birth = get_time(claims, 'P569', locale, None)
- if date_of_birth is not None:
- attributes.append({'label': 'Date of birth', 'value': date_of_birth})
-
- date_of_death = get_time(claims, 'P570', locale, None)
- if date_of_death is not None:
- attributes.append({'label': 'Date of death', 'value': date_of_death})
+ wikipedia_en_link = get_wikilink(result, 'enwiki')
+ if wikipedia_en_link:
+ wikipedia_link_count += 1
+ urls.append({'title': 'Wikipedia (en)',
+ 'url': wikipedia_en_link})
+
+ # TODO: get_wiki_firstlanguage
+ # if wikipedia_link_count == 0:
+
+ # more wikis
+ add_url(urls, result, default_label='Wikivoyage (' + language + ')', link_type=language + 'wikivoyage')
+ add_url(urls, result, default_label='Wikiquote (' + language + ')', link_type=language + 'wikiquote')
+ add_url(urls, result, default_label='Wikimedia Commons', link_type='commonswiki')
+
+ add_url(urls, result, 'P625', 'OpenStreetMap', link_type='geo')
+
+ # musicbrainz
+ add_url(urls, result, 'P434', 'MusicBrainz', 'http://musicbrainz.org/artist/')
+ add_url(urls, result, 'P435', 'MusicBrainz', 'http://musicbrainz.org/work/')
+ add_url(urls, result, 'P436', 'MusicBrainz', 'http://musicbrainz.org/release-group/')
+ add_url(urls, result, 'P966', 'MusicBrainz', 'http://musicbrainz.org/label/')
+
+ # IMDb
+ add_url(urls, result, 'P345', 'IMDb', 'https://www.imdb.com/', link_type='imdb')
+ # source code repository
+ add_url(urls, result, 'P1324')
+ # blog
+ add_url(urls, result, 'P1581')
+ # social media links
+ add_url(urls, result, 'P2397', 'YouTube', 'https://www.youtube.com/channel/')
+ add_url(urls, result, 'P1651', 'YouTube', 'https://www.youtube.com/watch?v=')
+ add_url(urls, result, 'P2002', 'Twitter', 'https://twitter.com/')
+ add_url(urls, result, 'P2013', 'Facebook', 'https://facebook.com/')
+ add_url(urls, result, 'P2003', 'Instagram', 'https://instagram.com/')
+
+ urls.append({'title': 'Wikidata',
+ 'url': 'https://www.wikidata.org/wiki/'
+ + wikidata_id + '?uselang=' + language})
+
+ # INFOBOX ATTRIBUTES (ROWS)
+
+ # DATES
+ # inception date
+ add_attribute(attributes, result, 'P571', date=True)
+ # dissolution date
+ add_attribute(attributes, result, 'P576', date=True)
+ # start date
+ add_attribute(attributes, result, 'P580', date=True)
+ # end date
+ add_attribute(attributes, result, 'P582', date=True)
+ # date of birth
+ add_attribute(attributes, result, 'P569', date=True)
+ # date of death
+ add_attribute(attributes, result, 'P570', date=True)
+ # date of spacecraft launch
+ add_attribute(attributes, result, 'P619', date=True)
+ # date of spacecraft landing
+ add_attribute(attributes, result, 'P620', date=True)
+
+ # nationality
+ add_attribute(attributes, result, 'P27')
+ # country of origin
+ add_attribute(attributes, result, 'P495')
+ # country
+ add_attribute(attributes, result, 'P17')
+ # headquarters
+ add_attribute(attributes, result, 'Q180')
+
+ # PLACES
+ # capital
+ add_attribute(attributes, result, 'P36', trim=True)
+ # head of state
+ add_attribute(attributes, result, 'P35', trim=True)
+ # head of government
+ add_attribute(attributes, result, 'P6', trim=True)
+ # type of government
+ add_attribute(attributes, result, 'P122')
+ # official language
+ add_attribute(attributes, result, 'P37')
+ # population
+ add_attribute(attributes, result, 'P1082', trim=True)
+ # area
+ add_attribute(attributes, result, 'P2046')
+ # currency
+ add_attribute(attributes, result, 'P38', trim=True)
+ # heigth (building)
+ add_attribute(attributes, result, 'P2048')
+
+ # MEDIA
+ # platform (videogames)
+ add_attribute(attributes, result, 'P400')
+ # author
+ add_attribute(attributes, result, 'P50')
+ # creator
+ add_attribute(attributes, result, 'P170')
+ # director
+ add_attribute(attributes, result, 'P57')
+ # performer
+ add_attribute(attributes, result, 'P175')
+ # developer
+ add_attribute(attributes, result, 'P178')
+ # producer
+ add_attribute(attributes, result, 'P162')
+ # manufacturer
+ add_attribute(attributes, result, 'P176')
+ # screenwriter
+ add_attribute(attributes, result, 'P58')
+ # production company
+ add_attribute(attributes, result, 'P272')
+ # record label
+ add_attribute(attributes, result, 'P264')
+ # publisher
+ add_attribute(attributes, result, 'P123')
+ # original network
+ add_attribute(attributes, result, 'P449')
+ # distributor
+ add_attribute(attributes, result, 'P750')
+ # composer
+ add_attribute(attributes, result, 'P86')
+ # publication date
+ add_attribute(attributes, result, 'P577', date=True)
+ # genre
+ add_attribute(attributes, result, 'P136')
+ # original language
+ add_attribute(attributes, result, 'P364')
+ # isbn
+ add_attribute(attributes, result, 'Q33057')
+ # software license
+ add_attribute(attributes, result, 'P275')
+ # programming language
+ add_attribute(attributes, result, 'P277')
+ # version
+ add_attribute(attributes, result, 'P348', trim=True)
+ # narrative location
+ add_attribute(attributes, result, 'P840')
+
+ # LANGUAGES
+ # number of speakers
+ add_attribute(attributes, result, 'P1098')
+ # writing system
+ add_attribute(attributes, result, 'P282')
+ # regulatory body
+ add_attribute(attributes, result, 'P1018')
+ # language code
+ add_attribute(attributes, result, 'P218')
+
+ # OTHER
+ # ceo
+ add_attribute(attributes, result, 'P169', trim=True)
+ # founder
+ add_attribute(attributes, result, 'P112')
+ # legal form (company/organization)
+ add_attribute(attributes, result, 'P1454')
+ # operator
+ add_attribute(attributes, result, 'P137')
+ # crew members (tripulation)
+ add_attribute(attributes, result, 'P1029')
+ # taxon
+ add_attribute(attributes, result, 'P225')
+ # chemical formula
+ add_attribute(attributes, result, 'P274')
+ # winner (sports/contests)
+ add_attribute(attributes, result, 'P1346')
+ # number of deaths
+ add_attribute(attributes, result, 'P1120')
+ # currency code
+ add_attribute(attributes, result, 'P498')
+
+ image = add_image(result)
if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
results.append({
@@ -190,6 +302,7 @@ def getDetail(jsonresponse, wikidata_id, language, locale):
'infobox': title,
'id': wikipedia_link,
'content': description,
+ 'img_src': image,
'attributes': attributes,
'urls': urls
})
@@ -197,92 +310,151 @@ def getDetail(jsonresponse, wikidata_id, language, locale):
return results
-def add_url(urls, title, url):
- if url is not None:
- urls.append({'title': title, 'url': url})
- return 1
+# only returns first match
+def add_image(result):
+ # P15: route map, P242: locator map, P154: logo, P18: image, P242: map, P41: flag, P2716: collage, P2910: icon
+ property_ids = ['P15', 'P242', 'P154', 'P18', 'P242', 'P41', 'P2716', 'P2910']
+
+ for property_id in property_ids:
+ image = result.xpath(property_xpath.replace('{propertyid}', property_id))
+ if image:
+ image_name = image[0].xpath(value_xpath)
+ image_src = url_image.replace('{filename}', extract_text(image_name[0]))
+ return image_src
+
+
+# setting trim will only returned high ranked rows OR the first row
+def add_attribute(attributes, result, property_id, default_label=None, date=False, trim=False):
+ attribute = result.xpath(property_xpath.replace('{propertyid}', property_id))
+ if attribute:
+
+ if default_label:
+ label = default_label
+ else:
+ label = extract_text(attribute[0].xpath(label_xpath))
+ label = label[0].upper() + label[1:]
+
+ if date:
+ trim = True
+ # remove calendar name
+ calendar_name = attribute[0].xpath(calendar_name_xpath)
+ for calendar in calendar_name:
+ calendar.getparent().remove(calendar)
+
+ concat_values = ""
+ values = []
+ first_value = None
+ for row in attribute[0].xpath(property_row_xpath):
+ if not first_value or not trim or row.xpath(preferred_rank_xpath):
+
+ value = row.xpath(value_xpath)
+ if not value:
+ continue
+ value = extract_text(value)
+
+ # save first value in case no ranked row is found
+ if trim and not first_value:
+ first_value = value
+ else:
+ # to avoid duplicate values
+ if value not in values:
+ concat_values += value + ", "
+ values.append(value)
+
+ if trim and not values:
+ attributes.append({'label': label,
+ 'value': first_value})
+ else:
+ attributes.append({'label': label,
+ 'value': concat_values[:-2]})
+
+
+# requires property_id unless it's a wiki link (defined in link_type)
+def add_url(urls, result, property_id=None, default_label=None, url_prefix=None, results=None, link_type=None):
+ links = []
+
+ # wiki links don't have property in wikidata page
+ if link_type and 'wiki' in link_type:
+ links.append(get_wikilink(result, link_type))
else:
- return 0
+ dom_element = result.xpath(property_xpath.replace('{propertyid}', property_id))
+ if dom_element:
+ dom_element = dom_element[0]
+ if not default_label:
+ label = extract_text(dom_element.xpath(label_xpath))
+ label = label[0].upper() + label[1:]
+
+ if link_type == 'geo':
+ links.append(get_geolink(dom_element))
+
+ elif link_type == 'imdb':
+ links.append(get_imdblink(dom_element, url_prefix))
+
+ else:
+ url_results = dom_element.xpath(url_xpath)
+ for link in url_results:
+ if link is not None:
+ if url_prefix:
+ link = url_prefix + extract_text(link)
+ else:
+ link = extract_text(link)
+ links.append(link)
+
+ # append urls
+ for url in links:
+ if url is not None:
+ urls.append({'title': default_label or label,
+ 'url': url})
+ if results is not None:
+ results.append({'title': default_label or label,
+ 'url': url})
+
+
+def get_imdblink(result, url_prefix):
+ imdb_id = result.xpath(value_xpath)
+ if imdb_id:
+ imdb_id = extract_text(imdb_id)
+ id_prefix = imdb_id[:2]
+ if id_prefix == 'tt':
+ url = url_prefix + 'title/' + imdb_id
+ elif id_prefix == 'nm':
+ url = url_prefix + 'name/' + imdb_id
+ elif id_prefix == 'ch':
+ url = url_prefix + 'character/' + imdb_id
+ elif id_prefix == 'co':
+ url = url_prefix + 'company/' + imdb_id
+ elif id_prefix == 'ev':
+ url = url_prefix + 'event/' + imdb_id
+ else:
+ url = None
+ return url
-def get_mainsnak(claims, propertyName):
- propValue = claims.get(propertyName, {})
- if len(propValue) == 0:
+def get_geolink(result):
+ coordinates = result.xpath(value_xpath)
+ if not coordinates:
return None
-
- propValue = propValue[0].get('mainsnak', None)
- return propValue
-
-
-def get_string(claims, propertyName, defaultValue=None):
- propValue = claims.get(propertyName, {})
- if len(propValue) == 0:
- return defaultValue
-
- result = []
- for e in propValue:
- mainsnak = e.get('mainsnak', {})
-
- datavalue = mainsnak.get('datavalue', {})
- if datavalue is not None:
- result.append(datavalue.get('value', ''))
-
- if len(result) == 0:
- return defaultValue
- else:
- # TODO handle multiple urls
- return result[0]
-
-
-def get_time(claims, propertyName, locale, defaultValue=None):
- propValue = claims.get(propertyName, {})
- if len(propValue) == 0:
- return defaultValue
-
- result = []
- for e in propValue:
- mainsnak = e.get('mainsnak', {})
-
- datavalue = mainsnak.get('datavalue', {})
- if datavalue is not None:
- value = datavalue.get('value', '')
- result.append(value.get('time', ''))
-
- if len(result) == 0:
- date_string = defaultValue
- else:
- date_string = ', '.join(result)
-
- try:
- parsed_date = datetime.strptime(date_string, "+%Y-%m-%dT%H:%M:%SZ")
- except:
- if date_string.startswith('-'):
- return date_string.split('T')[0]
- try:
- parsed_date = dateutil_parse(date_string, fuzzy=False, default=False)
- except:
- logger.debug('could not parse date %s', date_string)
- return date_string.split('T')[0]
-
- return format_date_by_locale(parsed_date, locale)
-
-
-def get_geolink(claims, propertyName, defaultValue=''):
- mainsnak = get_mainsnak(claims, propertyName)
-
- if mainsnak is None:
- return defaultValue
-
- datatype = mainsnak.get('datatype', '')
- datavalue = mainsnak.get('datavalue', {})
-
- if datatype != 'globe-coordinate':
- return defaultValue
-
- value = datavalue.get('value', {})
-
- precision = value.get('precision', 0.0002)
-
+ coordinates = extract_text(coordinates[0])
+ latitude, longitude = coordinates.split(',')
+
+ # convert to decimal
+ lat = int(latitude[:latitude.find(u'°')])
+ if latitude.find('\'') >= 0:
+ lat += int(latitude[latitude.find(u'°') + 1:latitude.find('\'')] or 0) / 60.0
+ if latitude.find('"') >= 0:
+ lat += float(latitude[latitude.find('\'') + 1:latitude.find('"')] or 0) / 3600.0
+ if latitude.find('S') >= 0:
+ lat *= -1
+ lon = int(longitude[:longitude.find(u'°')])
+ if longitude.find('\'') >= 0:
+ lon += int(longitude[longitude.find(u'°') + 1:longitude.find('\'')] or 0) / 60.0
+ if longitude.find('"') >= 0:
+ lon += float(longitude[longitude.find('\'') + 1:longitude.find('"')] or 0) / 3600.0
+ if longitude.find('W') >= 0:
+ lon *= -1
+
+ # TODO: get precision
+ precision = 0.0002
# there is no zoom information, deduce from precision (error prone)
# samples :
# 13 --> 5
@@ -298,26 +470,20 @@ def get_geolink(claims, propertyName, defaultValue=''):
zoom = int(15 - precision * 8.8322 + precision * precision * 0.625447)
url = url_map\
- .replace('{latitude}', str(value.get('latitude', 0)))\
- .replace('{longitude}', str(value.get('longitude', 0)))\
+ .replace('{latitude}', str(lat))\
+ .replace('{longitude}', str(lon))\
.replace('{zoom}', str(zoom))
return url
def get_wikilink(result, wikiid):
- url = result.get('sitelinks', {}).get(wikiid, {}).get('url', None)
- if url is None:
- return url
- elif url.startswith('http://'):
+ url = result.xpath(wikilink_xpath.replace('{wikiid}', wikiid))
+ if not url:
+ return None
+ url = url[0]
+ if url.startswith('http://'):
url = url.replace('http://', 'https://')
elif url.startswith('//'):
url = 'https:' + url
return url
-
-
-def get_wiki_firstlanguage(result, wikipatternid):
- for k in result.get('sitelinks', {}).keys():
- if k.endswith(wikipatternid) and len(k) == (2 + len(wikipatternid)):
- return k[0:2]
- return None
diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py
index fed7b263f..70191d22b 100644
--- a/searx/engines/wikipedia.py
+++ b/searx/engines/wikipedia.py
@@ -99,9 +99,8 @@ def response(resp):
return []
# link to wikipedia article
- # parenthesis are not quoted to make infobox mergeable with wikidata's
wikipedia_link = url_lang(resp.search_params['language']) \
- + 'wiki/' + quote(title.replace(' ', '_').encode('utf8')).replace('%28', '(').replace('%29', ')')
+ + 'wiki/' + quote(title.replace(' ', '_').encode('utf8'))
results.append({'url': wikipedia_link, 'title': title})
diff --git a/searx/engines/www500px.py b/searx/engines/www500px.py
index c98e19443..f1bc6c583 100644
--- a/searx/engines/www500px.py
+++ b/searx/engines/www500px.py
@@ -41,7 +41,7 @@ def response(resp):
results = []
dom = html.fromstring(resp.text)
- regex = re.compile('3\.jpg.*$')
+ regex = re.compile(r'3\.jpg.*$')
# parse results
for result in dom.xpath('//div[@class="photo"]'):
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
index b8b40e4aa..8e24a283e 100644
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -20,10 +20,12 @@ from searx.engines.xpath import extract_text, extract_url
categories = ['general']
paging = True
language_support = True
+time_range_support = True
# search-url
base_url = 'https://search.yahoo.com/'
search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
+search_url_with_time = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}&age={age}&btf={btf}&fr2=time'
# specific xpath variables
results_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' Sr ')]"
@@ -32,6 +34,10 @@ title_xpath = './/h3/a'
content_xpath = './/div[@class="compText aAbs"]'
suggestion_xpath = "//div[contains(concat(' ', normalize-space(@class), ' '), ' AlsoTry ')]//a"
+time_range_dict = {'day': ['1d', 'd'],
+ 'week': ['1w', 'w'],
+ 'month': ['1m', 'm']}
+
# remove yahoo-specific tracking-url
def parse_url(url_string):
@@ -51,18 +57,30 @@ def parse_url(url_string):
return unquote(url_string[start:end])
+def _get_url(query, offset, language, time_range):
+ if time_range in time_range_dict:
+ return base_url + search_url_with_time.format(offset=offset,
+ query=urlencode({'p': query}),
+ lang=language,
+ age=time_range_dict[time_range][0],
+ btf=time_range_dict[time_range][1])
+ return base_url + search_url.format(offset=offset,
+ query=urlencode({'p': query}),
+ lang=language)
+
+
+def _get_language(params):
+ if params['language'] == 'all':
+ return 'en'
+ return params['language'].split('_')[0]
+
+
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 10 + 1
+ language = _get_language(params)
- if params['language'] == 'all':
- language = 'en'
- else:
- language = params['language'].split('_')[0]
-
- params['url'] = base_url + search_url.format(offset=offset,
- query=urlencode({'p': query}),
- lang=language)
+ params['url'] = _get_url(query, offset, language, params['time_range'])
# TODO required?
params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py
index d4cfbeda2..e91c1d34e 100644
--- a/searx/engines/yahoo_news.py
+++ b/searx/engines/yahoo_news.py
@@ -55,7 +55,7 @@ def request(query, params):
def sanitize_url(url):
if ".yahoo.com/" in url:
- return re.sub(u"\;\_ylt\=.+$", "", url)
+ return re.sub(u"\\;\\_ylt\\=.+$", "", url)
else:
return url