summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/currency_convert.py4
-rw-r--r--searx/engines/doku.py84
-rw-r--r--searx/engines/duckduckgo_definitions.py9
-rw-r--r--searx/engines/fdroid.py53
-rw-r--r--searx/engines/google.py12
-rw-r--r--searx/engines/google_images.py8
-rw-r--r--searx/engines/nyaa.py119
-rw-r--r--searx/engines/reddit.py79
-rw-r--r--searx/engines/tokyotoshokan.py102
-rw-r--r--searx/engines/torrentz.py93
-rw-r--r--searx/engines/wikidata.py16
-rw-r--r--searx/engines/wikipedia.py114
-rw-r--r--searx/engines/xpath.py16
13 files changed, 686 insertions, 23 deletions
diff --git a/searx/engines/currency_convert.py b/searx/engines/currency_convert.py
index 26830a167..b0ffb490a 100644
--- a/searx/engines/currency_convert.py
+++ b/searx/engines/currency_convert.py
@@ -9,13 +9,13 @@ categories = []
url = 'https://download.finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X'
weight = 100
-parser_re = re.compile(u'^\W*(\d+(?:\.\d+)?)\W*([^.0-9].+)\W+in?\W+([^\.]+)\W*$', re.I) # noqa
+parser_re = re.compile(u'.*?(\d+(?:\.\d+)?) ([^.0-9]+) (?:in|to) ([^.0-9]+)', re.I) # noqa
db = 1
def normalize_name(name):
- name = name.lower().replace('-', ' ')
+ name = name.lower().replace('-', ' ').rstrip('s')
name = re.sub(' +', ' ', name)
return unicodedata.normalize('NFKD', name).lower()
diff --git a/searx/engines/doku.py b/searx/engines/doku.py
new file mode 100644
index 000000000..93867fd0d
--- /dev/null
+++ b/searx/engines/doku.py
@@ -0,0 +1,84 @@
+# Doku Wiki
+#
+# @website https://www.dokuwiki.org/
+# @provide-api yes
+# (https://www.dokuwiki.org/devel:xmlrpc)
+#
+# @using-api no
+# @results HTML
+# @stable yes
+# @parse (general) url, title, content
+
+from urllib import urlencode
+from lxml.html import fromstring
+from searx.engines.xpath import extract_text
+
+# engine dependent config
+categories = ['general'] # TODO , 'images', 'music', 'videos', 'files'
+paging = False
+language_support = False
+number_of_results = 5
+
+# search-url
+# Doku is OpenSearch compatible
+base_url = 'http://localhost:8090'
+search_url = '/?do=search'\
+ '&{query}'
+# TODO '&startRecord={offset}'\
+# TODO '&maximumRecords={limit}'\
+
+
+# do search-request
+def request(query, params):
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'id': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ doc = fromstring(resp.text)
+
+ # parse results
+ # Quickhits
+ for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'):
+ try:
+ res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+
+ # append result
+ results.append({'title': title,
+ 'content': "",
+ 'url': base_url + res_url})
+
+ # Search results
+ for r in doc.xpath('//dl[@class="search_results"]/*'):
+ try:
+ if r.tag == "dt":
+ res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1]
+ title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title'))
+ elif r.tag == "dd":
+ content = extract_text(r.xpath('.'))
+
+ # append result
+ results.append({'title': title,
+ 'content': content,
+ 'url': base_url + res_url})
+ except:
+ continue
+
+ if not res_url:
+ continue
+
+ # return results
+ return results
diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py
index 793e97d22..208ccca28 100644
--- a/searx/engines/duckduckgo_definitions.py
+++ b/searx/engines/duckduckgo_definitions.py
@@ -1,5 +1,6 @@
import json
from urllib import urlencode
+from re import compile, sub
from lxml import html
from searx.utils import html_to_text
from searx.engines.xpath import extract_text
@@ -7,6 +8,8 @@ from searx.engines.xpath import extract_text
url = 'https://api.duckduckgo.com/'\
+ '?{query}&format=json&pretty=0&no_redirect=1&d=1'
+http_regex = compile(r'^http:')
+
def result_to_text(url, text, htmlResult):
# TODO : remove result ending with "Meaning" or "Category"
@@ -19,8 +22,8 @@ def result_to_text(url, text, htmlResult):
def request(query, params):
- # TODO add kl={locale}
params['url'] = url.format(query=urlencode({'q': query}))
+ params['headers']['Accept-Language'] = params['language']
return params
@@ -103,6 +106,10 @@ def response(resp):
urls.append({'title': search_res.get('DefinitionSource'),
'url': definitionURL})
+ # to merge with wikidata's infobox
+ if infobox_id:
+ infobox_id = http_regex.sub('https:', infobox_id)
+
# entity
entity = search_res.get('Entity', None)
# TODO continent / country / department / location / waterfall /
diff --git a/searx/engines/fdroid.py b/searx/engines/fdroid.py
new file mode 100644
index 000000000..0b16773e3
--- /dev/null
+++ b/searx/engines/fdroid.py
@@ -0,0 +1,53 @@
+"""
+ F-Droid (a repository of FOSS applications for Android)
+
+ @website https://f-droid.org/
+ @provide-api no
+ @using-api no
+ @results HTML
+ @stable no (HTML can change)
+ @parse url, title, content
+"""
+
+from cgi import escape
+from urllib import urlencode
+from searx.engines.xpath import extract_text
+from lxml import html
+
+# engine dependent config
+categories = ['files']
+paging = True
+
+# search-url
+base_url = 'https://f-droid.org/'
+search_url = base_url + 'repository/browse/?{query}'
+
+
+# do search-request
+def request(query, params):
+ query = urlencode({'fdfilter': query,
+ 'fdpage': params['pageno']})
+ params['url'] = search_url.format(query=query)
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ for app in dom.xpath('//div[@id="appheader"]'):
+ url = app.xpath('./ancestor::a/@href')[0]
+ title = app.xpath('./p/span/text()')[0]
+ img_src = app.xpath('.//img/@src')[0]
+
+ content = extract_text(app.xpath('./p')[0])
+ content = escape(content.replace(title, '', 1).strip())
+
+ results.append({'url': url,
+ 'title': title,
+ 'content': content,
+ 'img_src': img_src})
+
+ return results
diff --git a/searx/engines/google.py b/searx/engines/google.py
index 313932200..6018ad1b2 100644
--- a/searx/engines/google.py
+++ b/searx/engines/google.py
@@ -46,11 +46,11 @@ country_to_hostname = {
'NZ': 'www.google.co.nz', # New Zealand
'PH': 'www.google.com.ph', # Philippines
'SG': 'www.google.com.sg', # Singapore
- # 'US': 'www.google.us', # United State, redirect to .com
+ # 'US': 'www.google.us', # United States, redirect to .com
'ZA': 'www.google.co.za', # South Africa
'AR': 'www.google.com.ar', # Argentina
'CL': 'www.google.cl', # Chile
- 'ES': 'www.google.es', # Span
+ 'ES': 'www.google.es', # Spain
'MX': 'www.google.com.mx', # Mexico
'EE': 'www.google.ee', # Estonia
'FI': 'www.google.fi', # Finland
@@ -61,7 +61,7 @@ country_to_hostname = {
'HU': 'www.google.hu', # Hungary
'IT': 'www.google.it', # Italy
'JP': 'www.google.co.jp', # Japan
- 'KR': 'www.google.co.kr', # South Korean
+ 'KR': 'www.google.co.kr', # South Korea
'LT': 'www.google.lt', # Lithuania
'LV': 'www.google.lv', # Latvia
'NO': 'www.google.no', # Norway
@@ -76,9 +76,9 @@ country_to_hostname = {
'SE': 'www.google.se', # Sweden
'TH': 'www.google.co.th', # Thailand
'TR': 'www.google.com.tr', # Turkey
- 'UA': 'www.google.com.ua', # Ikraine
- # 'CN': 'www.google.cn', # China, only from china ?
- 'HK': 'www.google.com.hk', # Hong kong
+ 'UA': 'www.google.com.ua', # Ukraine
+ # 'CN': 'www.google.cn', # China, only from China ?
+ 'HK': 'www.google.com.hk', # Hong Kong
'TW': 'www.google.com.tw' # Taiwan
}
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index 9d51428cc..efe46812a 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -49,8 +49,6 @@ def response(resp):
# parse results
for result in dom.xpath('//div[@data-ved]'):
- data_url = result.xpath('./a/@href')[0]
- data_query = {k: v[0] for k, v in parse_qs(data_url.split('?', 1)[1]).iteritems()}
metadata = loads(result.xpath('./div[@class="rg_meta"]/text()')[0])
@@ -60,11 +58,11 @@ def response(resp):
thumbnail_src = thumbnail_src.replace("http://", "https://")
# append result
- results.append({'url': data_query['imgrefurl'],
+ results.append({'url': metadata['ru'],
'title': metadata['pt'],
'content': metadata['s'],
- 'thumbnail_src': metadata['tu'],
- 'img_src': data_query['imgurl'],
+ 'thumbnail_src': thumbnail_src,
+ 'img_src': metadata['ou'],
'template': 'images.html'})
# return results
diff --git a/searx/engines/nyaa.py b/searx/engines/nyaa.py
new file mode 100644
index 000000000..cda8231f7
--- /dev/null
+++ b/searx/engines/nyaa.py
@@ -0,0 +1,119 @@
+"""
+ Nyaa.se (Anime Bittorrent tracker)
+
+ @website http://www.nyaa.se/
+ @provide-api no
+ @using-api no
+ @results HTML
+ @stable no (HTML can change)
+ @parse url, title, content, seed, leech, torrentfile
+"""
+
+from cgi import escape
+from urllib import urlencode
+from lxml import html
+from searx.engines.xpath import extract_text
+
+# engine dependent config
+categories = ['files', 'images', 'videos', 'music']
+paging = True
+
+# search-url
+base_url = 'http://www.nyaa.se/'
+search_url = base_url + '?page=search&{query}&offset={offset}'
+
+# xpath queries
+xpath_results = '//table[@class="tlist"]//tr[contains(@class, "tlistrow")]'
+xpath_category = './/td[@class="tlisticon"]/a'
+xpath_title = './/td[@class="tlistname"]/a'
+xpath_torrent_file = './/td[@class="tlistdownload"]/a'
+xpath_filesize = './/td[@class="tlistsize"]/text()'
+xpath_seeds = './/td[@class="tlistsn"]/text()'
+xpath_leeches = './/td[@class="tlistln"]/text()'
+xpath_downloads = './/td[@class="tlistdn"]/text()'
+
+
+# convert a variable to integer or return 0 if it's not a number
+def int_or_zero(num):
+ if isinstance(num, list):
+ if len(num) < 1:
+ return 0
+ num = num[0]
+ if num.isdigit():
+ return int(num)
+ return 0
+
+
+# get multiplier to convert torrent size to bytes
+def get_filesize_mul(suffix):
+ return {
+ 'KB': 1024,
+ 'MB': 1024 ** 2,
+ 'GB': 1024 ** 3,
+ 'TB': 1024 ** 4,
+
+ 'KIB': 1024,
+ 'MIB': 1024 ** 2,
+ 'GIB': 1024 ** 3,
+ 'TIB': 1024 ** 4
+ }[str(suffix).upper()]
+
+
+# do search-request
+def request(query, params):
+ query = urlencode({'term': query})
+ params['url'] = search_url.format(query=query, offset=params['pageno'])
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ for result in dom.xpath(xpath_results):
+ # category in which our torrent belongs
+ category = result.xpath(xpath_category)[0].attrib.get('title')
+
+ # torrent title
+ page_a = result.xpath(xpath_title)[0]
+ title = escape(extract_text(page_a))
+
+ # link to the page
+ href = page_a.attrib.get('href')
+
+ # link to the torrent file
+ torrent_link = result.xpath(xpath_torrent_file)[0].attrib.get('href')
+
+ # torrent size
+ try:
+ file_size, suffix = result.xpath(xpath_filesize)[0].split(' ')
+ file_size = int(float(file_size) * get_filesize_mul(suffix))
+ except Exception as e:
+ file_size = None
+
+ # seed count
+ seed = int_or_zero(result.xpath(xpath_seeds))
+
+ # leech count
+ leech = int_or_zero(result.xpath(xpath_leeches))
+
+ # torrent downloads count
+ downloads = int_or_zero(result.xpath(xpath_downloads))
+
+ # content string contains all information not included into template
+ content = 'Category: "{category}". Downloaded {downloads} times.'
+ content = content.format(category=category, downloads=downloads)
+ content = escape(content)
+
+ results.append({'url': href,
+ 'title': title,
+ 'content': content,
+ 'seed': seed,
+ 'leech': leech,
+ 'filesize': file_size,
+ 'torrentfile': torrent_link,
+ 'template': 'torrent.html'})
+
+ return results
diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py
new file mode 100644
index 000000000..3ca7e44f6
--- /dev/null
+++ b/searx/engines/reddit.py
@@ -0,0 +1,79 @@
+"""
+ Reddit
+
+ @website https://www.reddit.com/
+ @provide-api yes (https://www.reddit.com/dev/api)
+
+ @using-api yes
+ @results JSON
+ @stable yes
+ @parse url, title, content, thumbnail, publishedDate
+"""
+
+import json
+from cgi import escape
+from urllib import urlencode
+from urlparse import urlparse, urljoin
+from datetime import datetime
+
+# engine dependent config
+categories = ['general', 'images', 'news', 'social media']
+page_size = 25
+
+# search-url
+base_url = 'https://www.reddit.com/'
+search_url = base_url + 'search.json?{query}'
+
+
+# do search-request
+def request(query, params):
+ query = urlencode({'q': query,
+ 'limit': page_size})
+ params['url'] = search_url.format(query=query)
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ img_results = []
+ text_results = []
+
+ search_results = json.loads(resp.text)
+
+ # return empty array if there are no results
+ if 'data' not in search_results:
+ return []
+
+ posts = search_results.get('data', {}).get('children', [])
+
+ # process results
+ for post in posts:
+ data = post['data']
+
+ # extract post information
+ params = {
+ 'url': urljoin(base_url, data['permalink']),
+ 'title': data['title']
+ }
+
+ # if thumbnail field contains a valid URL, we need to change template
+ thumbnail = data['thumbnail']
+ url_info = urlparse(thumbnail)
+ # netloc & path
+ if url_info[1] != '' and url_info[2] != '':
+ params['img_src'] = data['url']
+ params['thumbnail_src'] = thumbnail
+ params['template'] = 'images.html'
+ img_results.append(params)
+ else:
+ created = datetime.fromtimestamp(data['created_utc'])
+ content = escape(data['selftext'])
+ if len(content) > 500:
+ content = content[:500] + '...'
+ params['content'] = content
+ params['publishedDate'] = created
+ text_results.append(params)
+
+ # show images first and text results second
+ return img_results + text_results
diff --git a/searx/engines/tokyotoshokan.py b/searx/engines/tokyotoshokan.py
new file mode 100644
index 000000000..17e8e2191
--- /dev/null
+++ b/searx/engines/tokyotoshokan.py
@@ -0,0 +1,102 @@
+"""
+ Tokyo Toshokan (A BitTorrent Library for Japanese Media)
+
+ @website https://www.tokyotosho.info/
+ @provide-api no
+ @using-api no
+ @results HTML
+ @stable no (HTML can change)
+ @parse url, title, publishedDate, seed, leech,
+ filesize, magnetlink, content
+"""
+
+import re
+from cgi import escape
+from urllib import urlencode
+from lxml import html
+from searx.engines.xpath import extract_text
+from datetime import datetime
+from searx.engines.nyaa import int_or_zero, get_filesize_mul
+
+# engine dependent config
+categories = ['files', 'videos', 'music']
+paging = True
+
+# search-url
+base_url = 'https://www.tokyotosho.info/'
+search_url = base_url + 'search.php?{query}'
+
+
+# do search-request
+def request(query, params):
+ query = urlencode({'page': params['pageno'],
+ 'terms': query})
+ params['url'] = search_url.format(query=query)
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+ rows = dom.xpath('//table[@class="listing"]//tr[contains(@class, "category_0")]')
+
+ # check if there are no results or page layout was changed so we cannot parse it
+ # currently there are two rows for each result, so total count must be even
+ if len(rows) == 0 or len(rows) % 2 != 0:
+ return []
+
+ # regular expression for parsing torrent size strings
+ size_re = re.compile('Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE)
+
+ # processing the results, two rows at a time
+ for i in xrange(0, len(rows), 2):
+ # parse the first row
+ name_row = rows[i]
+
+ links = name_row.xpath('./td[@class="desc-top"]/a')
+ params = {
+ 'template': 'torrent.html',
+ 'url': links[-1].attrib.get('href'),
+ 'title': extract_text(links[-1])
+ }
+ # I have not yet seen any torrents without magnet links, but
+ # it's better to be prepared to stumble upon one some day
+ if len(links) == 2:
+ magnet = links[0].attrib.get('href')
+ if magnet.startswith('magnet'):
+ # okay, we have a valid magnet link, let's add it to the result
+ params['magnetlink'] = magnet
+
+ # no more info in the first row, start parsing the second one
+ info_row = rows[i + 1]
+ desc = extract_text(info_row.xpath('./td[@class="desc-bot"]')[0])
+ for item in desc.split('|'):
+ item = item.strip()
+ if item.startswith('Size:'):
+ try:
+ # ('1.228', 'GB')
+ groups = size_re.match(item).groups()
+ multiplier = get_filesize_mul(groups[1])
+ params['filesize'] = int(multiplier * float(groups[0]))
+ except Exception as e:
+ pass
+ elif item.startswith('Date:'):
+ try:
+ # Date: 2016-02-21 21:44 UTC
+ date = datetime.strptime(item, 'Date: %Y-%m-%d %H:%M UTC')
+ params['publishedDate'] = date
+ except Exception as e:
+ pass
+ elif item.startswith('Comment:'):
+ params['content'] = item
+ stats = info_row.xpath('./td[@class="stats"]/span')
+ # has the layout not changed yet?
+ if len(stats) == 3:
+ params['seed'] = int_or_zero(extract_text(stats[0]))
+ params['leech'] = int_or_zero(extract_text(stats[1]))
+
+ results.append(params)
+
+ return results
diff --git a/searx/engines/torrentz.py b/searx/engines/torrentz.py
new file mode 100644
index 000000000..92fbe7013
--- /dev/null
+++ b/searx/engines/torrentz.py
@@ -0,0 +1,93 @@
+"""
+ Torrentz.eu (BitTorrent meta-search engine)
+
+ @website https://torrentz.eu/
+ @provide-api no
+
+ @using-api no
+ @results HTML
+ @stable no (HTML can change, although unlikely,
+ see https://torrentz.eu/torrentz.btsearch)
+ @parse url, title, publishedDate, seed, leech, filesize, magnetlink
+"""
+
+import re
+from cgi import escape
+from urllib import urlencode
+from lxml import html
+from searx.engines.xpath import extract_text
+from datetime import datetime
+from searx.engines.nyaa import int_or_zero, get_filesize_mul
+
+# engine dependent config
+categories = ['files', 'videos', 'music']
+paging = True
+
+# search-url
+# https://torrentz.eu/search?f=EXAMPLE&p=6
+base_url = 'https://torrentz.eu/'
+search_url = base_url + 'search?{query}'
+
+
+# do search-request
+def request(query, params):
+ page = params['pageno'] - 1
+ query = urlencode({'q': query, 'p': page})
+ params['url'] = search_url.format(query=query)
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ for result in dom.xpath('//div[@class="results"]/dl'):
+ name_cell = result.xpath('./dt')[0]
+ title = extract_text(name_cell)
+
+ # skip rows that do not contain a link to a torrent
+ links = name_cell.xpath('./a')
+ if len(links) != 1:
+ continue
+
+ # extract url and remove a slash in the beginning
+ link = links[0].attrib.get('href').lstrip('/')
+
+ seed = result.xpath('./dd/span[@class="u"]/text()')[0].replace(',', '')
+ leech = result.xpath('./dd/span[@class="d"]/text()')[0].replace(',', '')
+
+ params = {
+ 'url': base_url + link,
+ 'title': title,
+ 'seed': int_or_zero(seed),
+ 'leech': int_or_zero(leech),
+ 'template': 'torrent.html'
+ }
+
+ # let's try to calculate the torrent size
+ try:
+ size_str = result.xpath('./dd/span[@class="s"]/text()')[0]
+ size, suffix = size_str.split()
+ params['filesize'] = int(size) * get_filesize_mul(suffix)
+ except Exception as e:
+ pass
+
+ # does our link contain a valid SHA1 sum?
+ if re.compile('[0-9a-fA-F]{40}').match(link):
+ # add a magnet link to the result
+ params['magnetlink'] = 'magnet:?xt=urn:btih:' + link
+
+ # extract and convert creation date
+ try:
+ date_str = result.xpath('./dd/span[@class="a"]/span')[0].attrib.get('title')
+ # Fri, 25 Mar 2016 16:29:01
+ date = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S')
+ params['publishedDate'] = date
+ except Exception as e:
+ pass
+
+ results.append(params)
+
+ return results
diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
index 9f3496b72..8aa2fcd5c 100644
--- a/searx/engines/wikidata.py
+++ b/searx/engines/wikidata.py
@@ -86,15 +86,15 @@ def getDetail(jsonresponse, wikidata_id, language, locale):
results.append({'title': title, 'url': official_website})
wikipedia_link_count = 0
+ wikipedia_link = get_wikilink(result, language + 'wiki')
+ wikipedia_link_count += add_url(urls,
+ 'Wikipedia (' + language + ')',
+ wikipedia_link)
if language != 'en':
+ wikipedia_en_link = get_wikilink(result, 'enwiki')
wikipedia_link_count += add_url(urls,
- 'Wikipedia (' + language + ')',
- get_wikilink(result, language +
- 'wiki'))
- wikipedia_en_link = get_wikilink(result, 'enwiki')
- wikipedia_link_count += add_url(urls,
- 'Wikipedia (en)',
- wikipedia_en_link)
+ 'Wikipedia (en)',
+ wikipedia_en_link)
if wikipedia_link_count == 0:
misc_language = get_wiki_firstlanguage(result, 'wiki')
if misc_language is not None:
@@ -188,7 +188,7 @@ def getDetail(jsonresponse, wikidata_id, language, locale):
else:
results.append({
'infobox': title,
- 'id': wikipedia_en_link,
+ 'id': wikipedia_link,
'content': description,
'attributes': attributes,
'urls': urls
diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py
new file mode 100644
index 000000000..fed7b263f
--- /dev/null
+++ b/searx/engines/wikipedia.py
@@ -0,0 +1,114 @@
+"""
+ Wikipedia (Web)
+
+ @website https://{language}.wikipedia.org
+ @provide-api yes
+
+ @using-api yes
+ @results JSON
+ @stable yes
+ @parse url, infobox
+"""
+
+from json import loads
+from urllib import urlencode, quote
+
+# search-url
+base_url = 'https://{language}.wikipedia.org/'
+search_postfix = 'w/api.php?'\
+ 'action=query'\
+ '&format=json'\
+ '&{query}'\
+ '&prop=extracts|pageimages'\
+ '&exintro'\
+ '&explaintext'\
+ '&pithumbsize=300'\
+ '&redirects'
+
+
+# set language in base_url
+def url_lang(lang):
+ if lang == 'all':
+ language = 'en'
+ else:
+ language = lang.split('_')[0]
+
+ return base_url.format(language=language)
+
+
+# do search-request
+def request(query, params):
+ if query.islower():
+ query += '|' + query.title()
+
+ params['url'] = url_lang(params['language']) \
+ + search_postfix.format(query=urlencode({'titles': query}))
+
+ return params
+
+
+# get first meaningful paragraph
+# this should filter out disambiguation pages and notes above first paragraph
+# "magic numbers" were obtained by fine tuning
+def extract_first_paragraph(content, title, image):
+ first_paragraph = None
+
+ failed_attempts = 0
+ for paragraph in content.split('\n'):
+
+ starts_with_title = paragraph.lower().find(title.lower(), 0, len(title) + 35)
+ length = len(paragraph)
+
+ if length >= 200 or (starts_with_title >= 0 and (image or length >= 150)):
+ first_paragraph = paragraph
+ break
+
+ failed_attempts += 1
+ if failed_attempts > 3:
+ return None
+
+ return first_paragraph
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ search_result = loads(resp.content)
+
+ # wikipedia article's unique id
+ # first valid id is assumed to be the requested article
+ for article_id in search_result['query']['pages']:
+ page = search_result['query']['pages'][article_id]
+ if int(article_id) > 0:
+ break
+
+ if int(article_id) < 0:
+ return []
+
+ title = page.get('title')
+
+ image = page.get('thumbnail')
+ if image:
+ image = image.get('source')
+
+ extract = page.get('extract')
+
+ summary = extract_first_paragraph(extract, title, image)
+ if not summary:
+ return []
+
+ # link to wikipedia article
+ # parenthesis are not quoted to make infobox mergeable with wikidata's
+ wikipedia_link = url_lang(resp.search_params['language']) \
+ + 'wiki/' + quote(title.replace(' ', '_').encode('utf8')).replace('%28', '(').replace('%29', ')')
+
+ results.append({'url': wikipedia_link, 'title': title})
+
+ results.append({'infobox': title,
+ 'id': wikipedia_link,
+ 'content': summary,
+ 'img_src': image,
+ 'urls': [{'title': 'Wikipedia', 'url': wikipedia_link}]})
+
+ return results
diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py
index f51634be0..e701c02bf 100644
--- a/searx/engines/xpath.py
+++ b/searx/engines/xpath.py
@@ -11,6 +11,14 @@ title_xpath = None
suggestion_xpath = ''
results_xpath = ''
+# parameters for engines with paging support
+#
+# number of results on each page
+# (only needed if the site requires not a page number, but an offset)
+page_size = 1
+# number of the first page (usually 0 or 1)
+first_page_num = 1
+
'''
if xpath_results is list, extract the text from each result and concat the list
@@ -76,8 +84,14 @@ def normalize_url(url):
def request(query, params):
query = urlencode({'q': query})[2:]
- params['url'] = search_url.format(query=query)
+
+ fp = {'query': query}
+ if paging and search_url.find('{pageno}') >= 0:
+ fp['pageno'] = (params['pageno'] + first_page_num - 1) * page_size
+
+ params['url'] = search_url.format(**fp)
params['query'] = query
+
return params