summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorMarkus Heiser <markus.heiser@darmarIT.de>2020-04-29 12:55:13 +0000
committerGitHub <noreply@github.com>2020-04-29 12:55:13 +0000
commit4bae1a9eabd33ee095002c0392d26c45e8319159 (patch)
tree43601cb54beca64d63457f66a46b1633ffb522c6 /searx/engines
parentceceee546b5273d9a1ebce6638ab98c7c34ed58f (diff)
parent7342806987aec05c50f12e149683609640ba66a0 (diff)
Merge branch 'master' into fix/manage.sh
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/__init__.py9
-rw-r--r--searx/engines/bing.py28
-rw-r--r--searx/engines/bing_images.py21
-rw-r--r--searx/engines/bing_news.py4
-rw-r--r--searx/engines/bing_videos.py6
-rw-r--r--searx/engines/dummy-offline.py12
-rw-r--r--searx/engines/etools.py54
-rw-r--r--searx/engines/faroo.py96
-rw-r--r--searx/engines/flickr_noapi.py26
-rw-r--r--searx/engines/genius.py1
-rw-r--r--searx/engines/gigablast.py41
-rw-r--r--searx/engines/google_news.py2
-rw-r--r--searx/engines/ina.py9
-rw-r--r--searx/engines/microsoft_academic.py2
-rw-r--r--searx/engines/openstreetmap.py2
-rw-r--r--searx/engines/qwant.py1
-rw-r--r--searx/engines/scanr_structures.py2
-rw-r--r--searx/engines/spotify.py14
-rw-r--r--searx/engines/startpage.py7
-rw-r--r--searx/engines/wikipedia.py9
20 files changed, 183 insertions, 163 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 2393f52b6..9ccef8b54 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -54,7 +54,8 @@ engine_default_args = {'paging': False,
'suspend_end_time': 0,
'continuous_errors': 0,
'time_range_support': False,
- 'offline': False}
+ 'offline': False,
+ 'tokens': []}
def load_engine(engine_data):
@@ -160,7 +161,7 @@ def to_percentage(stats, maxvalue):
return stats
-def get_engines_stats():
+def get_engines_stats(preferences):
# TODO refactor
pageloads = []
engine_times = []
@@ -171,8 +172,12 @@ def get_engines_stats():
max_pageload = max_engine_times = max_results = max_score = max_errors = max_score_per_result = 0 # noqa
for engine in engines.values():
+ if not preferences.validate_token(engine):
+ continue
+
if engine.stats['search_count'] == 0:
continue
+
results_num = \
engine.stats['result_count'] / float(engine.stats['search_count'])
diff --git a/searx/engines/bing.py b/searx/engines/bing.py
index ed0b87dbd..afb776acd 100644
--- a/searx/engines/bing.py
+++ b/searx/engines/bing.py
@@ -89,8 +89,7 @@ def response(resp):
'content': content})
try:
- result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]/text()'))
- result_len_container = utils.to_string(result_len_container)
+ result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
if "-" in result_len_container:
# Remove the part "from-to" for paginated request ...
result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:]
@@ -102,7 +101,7 @@ def response(resp):
logger.debug('result error :\n%s', e)
pass
- if _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
+ if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
return []
results.append({'number_of_results': result_len})
@@ -111,13 +110,18 @@ def response(resp):
# get supported languages from their site
def _fetch_supported_languages(resp):
- supported_languages = []
+ lang_tags = set()
+
+ setmkt = re.compile('setmkt=([^&]*)')
dom = html.fromstring(resp.text)
- options = eval_xpath(dom, '//div[@id="limit-languages"]//input')
- for option in options:
- code = eval_xpath(option, './@id')[0].replace('_', '-')
- if code == 'nb':
- code = 'no'
- supported_languages.append(code)
-
- return supported_languages
+ lang_links = eval_xpath(dom, "//li/a[contains(@href, 'setmkt')]")
+
+ for a in lang_links:
+ href = eval_xpath(a, './@href')[0]
+ match = setmkt.search(href)
+ l_tag = match.groups()[0]
+ _lang, _nation = l_tag.split('-', 1)
+ l_tag = _lang.lower() + '-' + _nation.upper()
+ lang_tags.add(l_tag)
+
+ return list(lang_tags)
diff --git a/searx/engines/bing_images.py b/searx/engines/bing_images.py
index 44e2c3bbc..138ed11c6 100644
--- a/searx/engines/bing_images.py
+++ b/searx/engines/bing_images.py
@@ -18,6 +18,8 @@ import re
from searx.url_utils import urlencode
from searx.utils import match_language
+from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
+
# engine dependent config
categories = ['images']
paging = True
@@ -103,22 +105,3 @@ def response(resp):
continue
return results
-
-
-# get supported languages from their site
-def _fetch_supported_languages(resp):
- supported_languages = []
- dom = html.fromstring(resp.text)
-
- regions_xpath = '//div[@id="region-section-content"]' \
- + '//ul[@class="b_vList"]/li/a/@href'
-
- regions = dom.xpath(regions_xpath)
- for region in regions:
- code = re.search('setmkt=[^\&]+', region).group()[7:]
- if code == 'nb-NO':
- code = 'no-NO'
-
- supported_languages.append(code)
-
- return supported_languages
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index 669130c42..d13be777c 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -15,9 +15,10 @@ from datetime import datetime
from dateutil import parser
from lxml import etree
from searx.utils import list_get, match_language
-from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
from searx.url_utils import urlencode, urlparse, parse_qsl
+from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
+
# engine dependent config
categories = ['news']
paging = True
@@ -58,6 +59,7 @@ def _get_url(query, language, offset, time_range):
offset=offset,
interval=time_range_dict[time_range])
else:
+ # e.g. setmkt=de-de&setlang=de
search_path = search_string.format(
query=urlencode({'q': query, 'setmkt': language}),
offset=offset)
diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py
index f1e636819..f048f0d8e 100644
--- a/searx/engines/bing_videos.py
+++ b/searx/engines/bing_videos.py
@@ -12,10 +12,10 @@
from json import loads
from lxml import html
-from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url
from searx.url_utils import urlencode
from searx.utils import match_language
+from searx.engines.bing import _fetch_supported_languages, supported_languages_url, language_aliases
categories = ['videos']
paging = True
@@ -67,6 +67,10 @@ def request(query, params):
if params['time_range'] in time_range_dict:
params['url'] += time_range_string.format(interval=time_range_dict[params['time_range']])
+ # bing videos did not like "older" versions < 70.0.1 when selectin other
+ # languages then 'en' .. very strange ?!?!
+ params['headers']['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64; rv:73.0.1) Gecko/20100101 Firefox/73.0.1'
+
return params
diff --git a/searx/engines/dummy-offline.py b/searx/engines/dummy-offline.py
new file mode 100644
index 000000000..13a9ecc01
--- /dev/null
+++ b/searx/engines/dummy-offline.py
@@ -0,0 +1,12 @@
+"""
+ Dummy Offline
+
+ @results one result
+ @stable yes
+"""
+
+
+def search(query, request_params):
+ return [{
+ 'result': 'this is what you get',
+ }]
diff --git a/searx/engines/etools.py b/searx/engines/etools.py
new file mode 100644
index 000000000..a9eb0980d
--- /dev/null
+++ b/searx/engines/etools.py
@@ -0,0 +1,54 @@
+"""
+ eTools (Web)
+
+ @website https://www.etools.ch
+ @provide-api no
+ @using-api no
+ @results HTML
+ @stable no (HTML can change)
+ @parse url, title, content
+"""
+
+from lxml import html
+from searx.engines.xpath import extract_text
+from searx.url_utils import quote
+from searx.utils import eval_xpath
+
+categories = ['general']
+paging = False
+language_support = False
+safesearch = True
+
+base_url = 'https://www.etools.ch'
+search_path = '/searchAdvancedSubmit.do'\
+ '?query={search_term}'\
+ '&pageResults=20'\
+ '&safeSearch={safesearch}'
+
+
+def request(query, params):
+ if params['safesearch']:
+ safesearch = 'true'
+ else:
+ safesearch = 'false'
+
+ params['url'] = base_url + search_path.format(search_term=quote(query), safesearch=safesearch)
+
+ return params
+
+
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ for result in eval_xpath(dom, '//table[@class="result"]//td[@class="record"]'):
+ url = eval_xpath(result, './a/@href')[0]
+ title = extract_text(eval_xpath(result, './a//text()'))
+ content = extract_text(eval_xpath(result, './/div[@class="text"]//text()'))
+
+ results.append({'url': url,
+ 'title': title,
+ 'content': content})
+
+ return results
diff --git a/searx/engines/faroo.py b/searx/engines/faroo.py
deleted file mode 100644
index a36ecf778..000000000
--- a/searx/engines/faroo.py
+++ /dev/null
@@ -1,96 +0,0 @@
-"""
- Faroo (Web, News)
-
- @website http://www.faroo.com
- @provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
-
- @using-api no
- @results JSON
- @stable yes
- @parse url, title, content, publishedDate, img_src
-"""
-
-from json import loads
-import datetime
-from searx.utils import searx_useragent
-from searx.url_utils import urlencode
-
-# engine dependent config
-categories = ['general', 'news']
-paging = True
-language_support = True
-number_of_results = 10
-
-# search-url
-url = 'http://www.faroo.com/'
-search_url = url + 'instant.json?{query}'\
- '&start={offset}'\
- '&length={number_of_results}'\
- '&l={language}'\
- '&src={categorie}'\
- '&i=false'\
- '&c=false'
-
-search_category = {'general': 'web',
- 'news': 'news'}
-
-
-# do search-request
-def request(query, params):
- offset = (params['pageno'] - 1) * number_of_results + 1
- categorie = search_category.get(params['category'], 'web')
-
- if params['language'] == 'all':
- language = 'en'
- else:
- language = params['language'].split('-')[0]
-
- # if language is not supported, put it in english
- if language != 'en' and\
- language != 'de' and\
- language != 'zh':
- language = 'en'
-
- params['url'] = search_url.format(offset=offset,
- number_of_results=number_of_results,
- query=urlencode({'q': query}),
- language=language,
- categorie=categorie)
-
- params['headers']['Referer'] = url
-
- return params
-
-
-# get response from search-request
-def response(resp):
- # HTTP-Code 429: rate limit exceeded
- if resp.status_code == 429:
- raise Exception("rate limit has been exceeded!")
-
- results = []
-
- search_res = loads(resp.text)
-
- # return empty array if there are no results
- if not search_res.get('results', {}):
- return []
-
- # parse results
- for result in search_res['results']:
- publishedDate = None
- result_json = {'url': result['url'], 'title': result['title'],
- 'content': result['kwic']}
- if result['news']:
- result_json['publishedDate'] = \
- datetime.datetime.fromtimestamp(result['date'] / 1000.0)
-
- # append image result if image url is set
- if result['iurl']:
- result_json['template'] = 'videos.html'
- result_json['thumbnail'] = result['iurl']
-
- results.append(result_json)
-
- # return results
- return results
diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py
index 198ac2cff..c8ee34f7a 100644
--- a/searx/engines/flickr_noapi.py
+++ b/searx/engines/flickr_noapi.py
@@ -109,14 +109,22 @@ def response(resp):
else:
url = build_flickr_url(photo['ownerNsid'], photo['id'])
- results.append({'url': url,
- 'title': title,
- 'img_src': img_src,
- 'thumbnail_src': thumbnail_src,
- 'content': content,
- 'author': author,
- 'source': source,
- 'img_format': img_format,
- 'template': 'images.html'})
+ result = {
+ 'url': url,
+ 'img_src': img_src,
+ 'thumbnail_src': thumbnail_src,
+ 'source': source,
+ 'img_format': img_format,
+ 'template': 'images.html'
+ }
+ try:
+ result['author'] = author
+ result['title'] = title
+ result['content'] = content
+ except:
+ result['author'] = ''
+ result['title'] = ''
+ result['content'] = ''
+ results.append(result)
return results
diff --git a/searx/engines/genius.py b/searx/engines/genius.py
index b265e9d76..aa5afad9b 100644
--- a/searx/engines/genius.py
+++ b/searx/engines/genius.py
@@ -72,6 +72,7 @@ def parse_album(hit):
result.update({'content': 'Released: {}'.format(year)})
return result
+
parse = {'lyric': parse_lyric, 'song': parse_lyric, 'artist': parse_artist, 'album': parse_album}
diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
index a84f3f69d..2bb29a9fe 100644
--- a/searx/engines/gigablast.py
+++ b/searx/engines/gigablast.py
@@ -14,6 +14,7 @@ import random
from json import loads
from time import time
from lxml.html import fromstring
+from searx.poolrequests import get
from searx.url_utils import urlencode
from searx.utils import eval_xpath
@@ -31,13 +32,9 @@ search_string = 'search?{query}'\
'&c=main'\
'&s={offset}'\
'&format=json'\
- '&qh=0'\
- '&qlang={lang}'\
+ '&langcountry={lang}'\
'&ff={safesearch}'\
- '&rxiec={rxieu}'\
- '&ulse={ulse}'\
- '&rand={rxikd}'\
- '&dbez={dbez}'
+ '&rand={rxikd}'
# specific xpath variables
results_xpath = '//response//result'
url_xpath = './/url'
@@ -46,9 +43,26 @@ content_xpath = './/sum'
supported_languages_url = 'https://gigablast.com/search?&rxikd=1'
+extra_param = '' # gigablast requires a random extra parameter
+# which can be extracted from the source code of the search page
+
+
+def parse_extra_param(text):
+ global extra_param
+ param_lines = [x for x in text.splitlines() if x.startswith('var url=') or x.startswith('url=url+')]
+ extra_param = ''
+ for l in param_lines:
+ extra_param += l.split("'")[1]
+ extra_param = extra_param.split('&')[-1]
+
+
+def init(engine_settings=None):
+ parse_extra_param(get('http://gigablast.com/search?c=main&qlangcountry=en-us&q=south&s=10').text)
+
# do search-request
def request(query, params):
+ print("EXTRAPARAM:", extra_param)
offset = (params['pageno'] - 1) * number_of_results
if params['language'] == 'all':
@@ -67,14 +81,11 @@ def request(query, params):
search_path = search_string.format(query=urlencode({'q': query}),
offset=offset,
number_of_results=number_of_results,
- rxikd=int(time() * 1000),
- rxieu=random.randint(1000000000, 9999999999),
- ulse=random.randint(100000000, 999999999),
lang=language,
- safesearch=safesearch,
- dbez=random.randint(100000000, 999999999))
+ rxikd=int(time() * 1000),
+ safesearch=safesearch)
- params['url'] = base_url + search_path
+ params['url'] = base_url + search_path + '&' + extra_param
return params
@@ -84,7 +95,11 @@ def response(resp):
results = []
# parse results
- response_json = loads(resp.text)
+ try:
+ response_json = loads(resp.text)
+ except:
+ parse_extra_param(resp.text)
+ raise Exception('extra param expired, please reload')
for result in response_json['results']:
# append result
diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py
index 9c837b45b..c9cc75435 100644
--- a/searx/engines/google_news.py
+++ b/searx/engines/google_news.py
@@ -54,7 +54,7 @@ def request(query, params):
if params['language'] != 'all':
language = match_language(params['language'], supported_languages, language_aliases).split('-')[0]
if language:
- params['url'] += '&lr=lang_' + language
+ params['url'] += '&hl=' + language
return params
diff --git a/searx/engines/ina.py b/searx/engines/ina.py
index 37a05f099..ea509649f 100644
--- a/searx/engines/ina.py
+++ b/searx/engines/ina.py
@@ -32,7 +32,7 @@ base_url = 'https://www.ina.fr'
search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}'
# specific xpath variables
-results_xpath = '//div[contains(@class,"search-results--list")]/div[@class="media"]'
+results_xpath = '//div[contains(@class,"search-results--list")]//div[@class="media-body"]'
url_xpath = './/a/@href'
title_xpath = './/h3[@class="h3--title media-heading"]'
thumbnail_xpath = './/img/@src'
@@ -65,8 +65,11 @@ def response(resp):
videoid = result.xpath(url_xpath)[0]
url = base_url + videoid
title = p.unescape(extract_text(result.xpath(title_xpath)))
- thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])
- if thumbnail[0] == '/':
+ try:
+ thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])
+ except:
+ thumbnail = ''
+ if thumbnail and thumbnail[0] == '/':
thumbnail = base_url + thumbnail
d = extract_text(result.xpath(publishedDate_xpath)[0])
d = d.split('/')
diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py
index 9387b08d0..9bac0069c 100644
--- a/searx/engines/microsoft_academic.py
+++ b/searx/engines/microsoft_academic.py
@@ -45,6 +45,8 @@ def request(query, params):
def response(resp):
results = []
response_data = loads(resp.text)
+ if not response_data:
+ return results
for result in response_data['results']:
url = _get_url(result)
diff --git a/searx/engines/openstreetmap.py b/searx/engines/openstreetmap.py
index 733ba6203..cec10a3c7 100644
--- a/searx/engines/openstreetmap.py
+++ b/searx/engines/openstreetmap.py
@@ -24,7 +24,7 @@ result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
# do search-request
def request(query, params):
- params['url'] = base_url + search_string.format(query=query)
+ params['url'] = base_url + search_string.format(query=query.decode('utf-8'))
return params
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index de12955c6..54e9dafad 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -50,6 +50,7 @@ def request(query, params):
language = match_language(params['language'], supported_languages, language_aliases)
params['url'] += '&locale=' + language.replace('-', '_').lower()
+ params['headers']['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0'
return params
diff --git a/searx/engines/scanr_structures.py b/searx/engines/scanr_structures.py
index 72fd2b3c9..7208dcb70 100644
--- a/searx/engines/scanr_structures.py
+++ b/searx/engines/scanr_structures.py
@@ -29,7 +29,7 @@ def request(query, params):
params['url'] = search_url
params['method'] = 'POST'
params['headers']['Content-type'] = "application/json"
- params['data'] = dumps({"query": query,
+ params['data'] = dumps({"query": query.decode('utf-8'),
"searchField": "ALL",
"sortDirection": "ASC",
"sortOrder": "RELEVANCY",
diff --git a/searx/engines/spotify.py b/searx/engines/spotify.py
index aed756be3..00c395706 100644
--- a/searx/engines/spotify.py
+++ b/searx/engines/spotify.py
@@ -12,10 +12,14 @@
from json import loads
from searx.url_utils import urlencode
+import requests
+import base64
# engine dependent config
categories = ['music']
paging = True
+api_client_id = None
+api_client_secret = None
# search-url
url = 'https://api.spotify.com/'
@@ -31,6 +35,16 @@ def request(query, params):
params['url'] = search_url.format(query=urlencode({'q': query}), offset=offset)
+ r = requests.post(
+ 'https://accounts.spotify.com/api/token',
+ data={'grant_type': 'client_credentials'},
+ headers={'Authorization': 'Basic ' + base64.b64encode(
+ "{}:{}".format(api_client_id, api_client_secret).encode('utf-8')
+ ).decode('utf-8')}
+ )
+ j = loads(r.text)
+ params['headers'] = {'Authorization': 'Bearer {}'.format(j.get('access_token'))}
+
return params
diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py
index 76567396f..953734934 100644
--- a/searx/engines/startpage.py
+++ b/searx/engines/startpage.py
@@ -99,11 +99,14 @@ def response(resp):
if re.match(r"^([1-9]|[1-2][0-9]|3[0-1]) [A-Z][a-z]{2} [0-9]{4} \.\.\. ", content):
date_pos = content.find('...') + 4
date_string = content[0:date_pos - 5]
- published_date = parser.parse(date_string, dayfirst=True)
-
# fix content string
content = content[date_pos:]
+ try:
+ published_date = parser.parse(date_string, dayfirst=True)
+ except ValueError:
+ pass
+
# check if search result starts with something like: "5 days ago ... "
elif re.match(r"^[0-9]+ days? ago \.\.\. ", content):
date_pos = content.find('...') + 4
diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py
index 4dae735d1..a216ba886 100644
--- a/searx/engines/wikipedia.py
+++ b/searx/engines/wikipedia.py
@@ -21,7 +21,8 @@ search_url = base_url + u'w/api.php?'\
'action=query'\
'&format=json'\
'&{query}'\
- '&prop=extracts|pageimages'\
+ '&prop=extracts|pageimages|pageprops'\
+ '&ppprop=disambiguation'\
'&exintro'\
'&explaintext'\
'&pithumbsize=300'\
@@ -79,12 +80,15 @@ def response(resp):
# wikipedia article's unique id
# first valid id is assumed to be the requested article
+ if 'pages' not in search_result['query']:
+ return results
+
for article_id in search_result['query']['pages']:
page = search_result['query']['pages'][article_id]
if int(article_id) > 0:
break
- if int(article_id) < 0:
+ if int(article_id) < 0 or 'disambiguation' in page.get('pageprops', {}):
return []
title = page.get('title')
@@ -96,6 +100,7 @@ def response(resp):
extract = page.get('extract')
summary = extract_first_paragraph(extract, title, image)
+ summary = summary.replace('() ', '')
# link to wikipedia article
wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \