summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorMarkus Heiser <markus.heiser@darmarIT.de>2020-01-28 10:59:03 +0000
committerGitHub <noreply@github.com>2020-01-28 10:59:03 +0000
commite64ff38217a1ba49afd4bb1c595121d94cbb2e33 (patch)
treed8461b0392143da9d8ec9ae598b8a12c50914104 /searx/engines
parent0e7b6c9a032d67bf5cbdcfc062d8466c18a62abd (diff)
parentbda189565589b0065152f5a9fba4565404f9bd9a (diff)
Merge branch 'master' into fix-infinite-scroll
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/bing.py5
-rw-r--r--searx/engines/flickr_noapi.py26
-rw-r--r--searx/engines/gigablast.py41
-rw-r--r--searx/engines/ina.py9
-rw-r--r--searx/engines/microsoft_academic.py2
-rw-r--r--searx/engines/openstreetmap.py2
-rw-r--r--searx/engines/qwant.py1
-rw-r--r--searx/engines/scanr_structures.py2
-rw-r--r--searx/engines/spotify.py14
-rw-r--r--searx/engines/wikipedia.py9
10 files changed, 79 insertions, 32 deletions
diff --git a/searx/engines/bing.py b/searx/engines/bing.py
index ed0b87dbd..b193f7c60 100644
--- a/searx/engines/bing.py
+++ b/searx/engines/bing.py
@@ -89,8 +89,7 @@ def response(resp):
'content': content})
try:
- result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]/text()'))
- result_len_container = utils.to_string(result_len_container)
+ result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()'))
if "-" in result_len_container:
# Remove the part "from-to" for paginated request ...
result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:]
@@ -102,7 +101,7 @@ def response(resp):
logger.debug('result error :\n%s', e)
pass
- if _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
+ if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len:
return []
results.append({'number_of_results': result_len})
diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py
index 198ac2cff..c8ee34f7a 100644
--- a/searx/engines/flickr_noapi.py
+++ b/searx/engines/flickr_noapi.py
@@ -109,14 +109,22 @@ def response(resp):
else:
url = build_flickr_url(photo['ownerNsid'], photo['id'])
- results.append({'url': url,
- 'title': title,
- 'img_src': img_src,
- 'thumbnail_src': thumbnail_src,
- 'content': content,
- 'author': author,
- 'source': source,
- 'img_format': img_format,
- 'template': 'images.html'})
+ result = {
+ 'url': url,
+ 'img_src': img_src,
+ 'thumbnail_src': thumbnail_src,
+ 'source': source,
+ 'img_format': img_format,
+ 'template': 'images.html'
+ }
+ try:
+ result['author'] = author
+ result['title'] = title
+ result['content'] = content
+ except:
+ result['author'] = ''
+ result['title'] = ''
+ result['content'] = ''
+ results.append(result)
return results
diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
index a84f3f69d..2bb29a9fe 100644
--- a/searx/engines/gigablast.py
+++ b/searx/engines/gigablast.py
@@ -14,6 +14,7 @@ import random
from json import loads
from time import time
from lxml.html import fromstring
+from searx.poolrequests import get
from searx.url_utils import urlencode
from searx.utils import eval_xpath
@@ -31,13 +32,9 @@ search_string = 'search?{query}'\
'&c=main'\
'&s={offset}'\
'&format=json'\
- '&qh=0'\
- '&qlang={lang}'\
+ '&langcountry={lang}'\
'&ff={safesearch}'\
- '&rxiec={rxieu}'\
- '&ulse={ulse}'\
- '&rand={rxikd}'\
- '&dbez={dbez}'
+ '&rand={rxikd}'
# specific xpath variables
results_xpath = '//response//result'
url_xpath = './/url'
@@ -46,9 +43,26 @@ content_xpath = './/sum'
supported_languages_url = 'https://gigablast.com/search?&rxikd=1'
+extra_param = '' # gigablast requires a random extra parameter
+# which can be extracted from the source code of the search page
+
+
+def parse_extra_param(text):
+ global extra_param
+ param_lines = [x for x in text.splitlines() if x.startswith('var url=') or x.startswith('url=url+')]
+ extra_param = ''
+ for l in param_lines:
+ extra_param += l.split("'")[1]
+ extra_param = extra_param.split('&')[-1]
+
+
+def init(engine_settings=None):
+ parse_extra_param(get('http://gigablast.com/search?c=main&qlangcountry=en-us&q=south&s=10').text)
+
# do search-request
def request(query, params):
+ print("EXTRAPARAM:", extra_param)
offset = (params['pageno'] - 1) * number_of_results
if params['language'] == 'all':
@@ -67,14 +81,11 @@ def request(query, params):
search_path = search_string.format(query=urlencode({'q': query}),
offset=offset,
number_of_results=number_of_results,
- rxikd=int(time() * 1000),
- rxieu=random.randint(1000000000, 9999999999),
- ulse=random.randint(100000000, 999999999),
lang=language,
- safesearch=safesearch,
- dbez=random.randint(100000000, 999999999))
+ rxikd=int(time() * 1000),
+ safesearch=safesearch)
- params['url'] = base_url + search_path
+ params['url'] = base_url + search_path + '&' + extra_param
return params
@@ -84,7 +95,11 @@ def response(resp):
results = []
# parse results
- response_json = loads(resp.text)
+ try:
+ response_json = loads(resp.text)
+ except:
+ parse_extra_param(resp.text)
+ raise Exception('extra param expired, please reload')
for result in response_json['results']:
# append result
diff --git a/searx/engines/ina.py b/searx/engines/ina.py
index 37a05f099..ea509649f 100644
--- a/searx/engines/ina.py
+++ b/searx/engines/ina.py
@@ -32,7 +32,7 @@ base_url = 'https://www.ina.fr'
search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}'
# specific xpath variables
-results_xpath = '//div[contains(@class,"search-results--list")]/div[@class="media"]'
+results_xpath = '//div[contains(@class,"search-results--list")]//div[@class="media-body"]'
url_xpath = './/a/@href'
title_xpath = './/h3[@class="h3--title media-heading"]'
thumbnail_xpath = './/img/@src'
@@ -65,8 +65,11 @@ def response(resp):
videoid = result.xpath(url_xpath)[0]
url = base_url + videoid
title = p.unescape(extract_text(result.xpath(title_xpath)))
- thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])
- if thumbnail[0] == '/':
+ try:
+ thumbnail = extract_text(result.xpath(thumbnail_xpath)[0])
+ except:
+ thumbnail = ''
+ if thumbnail and thumbnail[0] == '/':
thumbnail = base_url + thumbnail
d = extract_text(result.xpath(publishedDate_xpath)[0])
d = d.split('/')
diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py
index 9387b08d0..9bac0069c 100644
--- a/searx/engines/microsoft_academic.py
+++ b/searx/engines/microsoft_academic.py
@@ -45,6 +45,8 @@ def request(query, params):
def response(resp):
results = []
response_data = loads(resp.text)
+ if not response_data:
+ return results
for result in response_data['results']:
url = _get_url(result)
diff --git a/searx/engines/openstreetmap.py b/searx/engines/openstreetmap.py
index 733ba6203..cec10a3c7 100644
--- a/searx/engines/openstreetmap.py
+++ b/searx/engines/openstreetmap.py
@@ -24,7 +24,7 @@ result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
# do search-request
def request(query, params):
- params['url'] = base_url + search_string.format(query=query)
+ params['url'] = base_url + search_string.format(query=query.decode('utf-8'))
return params
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index de12955c6..54e9dafad 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -50,6 +50,7 @@ def request(query, params):
language = match_language(params['language'], supported_languages, language_aliases)
params['url'] += '&locale=' + language.replace('-', '_').lower()
+ params['headers']['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0'
return params
diff --git a/searx/engines/scanr_structures.py b/searx/engines/scanr_structures.py
index 72fd2b3c9..7208dcb70 100644
--- a/searx/engines/scanr_structures.py
+++ b/searx/engines/scanr_structures.py
@@ -29,7 +29,7 @@ def request(query, params):
params['url'] = search_url
params['method'] = 'POST'
params['headers']['Content-type'] = "application/json"
- params['data'] = dumps({"query": query,
+ params['data'] = dumps({"query": query.decode('utf-8'),
"searchField": "ALL",
"sortDirection": "ASC",
"sortOrder": "RELEVANCY",
diff --git a/searx/engines/spotify.py b/searx/engines/spotify.py
index aed756be3..00c395706 100644
--- a/searx/engines/spotify.py
+++ b/searx/engines/spotify.py
@@ -12,10 +12,14 @@
from json import loads
from searx.url_utils import urlencode
+import requests
+import base64
# engine dependent config
categories = ['music']
paging = True
+api_client_id = None
+api_client_secret = None
# search-url
url = 'https://api.spotify.com/'
@@ -31,6 +35,16 @@ def request(query, params):
params['url'] = search_url.format(query=urlencode({'q': query}), offset=offset)
+ r = requests.post(
+ 'https://accounts.spotify.com/api/token',
+ data={'grant_type': 'client_credentials'},
+ headers={'Authorization': 'Basic ' + base64.b64encode(
+ "{}:{}".format(api_client_id, api_client_secret).encode('utf-8')
+ ).decode('utf-8')}
+ )
+ j = loads(r.text)
+ params['headers'] = {'Authorization': 'Bearer {}'.format(j.get('access_token'))}
+
return params
diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py
index 4dae735d1..a216ba886 100644
--- a/searx/engines/wikipedia.py
+++ b/searx/engines/wikipedia.py
@@ -21,7 +21,8 @@ search_url = base_url + u'w/api.php?'\
'action=query'\
'&format=json'\
'&{query}'\
- '&prop=extracts|pageimages'\
+ '&prop=extracts|pageimages|pageprops'\
+ '&ppprop=disambiguation'\
'&exintro'\
'&explaintext'\
'&pithumbsize=300'\
@@ -79,12 +80,15 @@ def response(resp):
# wikipedia article's unique id
# first valid id is assumed to be the requested article
+ if 'pages' not in search_result['query']:
+ return results
+
for article_id in search_result['query']['pages']:
page = search_result['query']['pages'][article_id]
if int(article_id) > 0:
break
- if int(article_id) < 0:
+ if int(article_id) < 0 or 'disambiguation' in page.get('pageprops', {}):
return []
title = page.get('title')
@@ -96,6 +100,7 @@ def response(resp):
extract = page.get('extract')
summary = extract_first_paragraph(extract, title, image)
+ summary = summary.replace('() ', '')
# link to wikipedia article
wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \