summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorAlexandre Flament <alex@al-f.net>2020-12-09 21:23:20 +0100
committerAlexandre Flament <alex@al-f.net>2020-12-11 14:37:08 +0100
commitd703119d3a313a406482b121ee94c6afee3bc307 (patch)
tree7834dc899b99db4ea3f9f81542e8e029bf5b7d04 /searx/engines
parent033f39bff7b3365256491014140e35aa1e974d4e (diff)
[enh] add raise_for_httperror
check HTTP response: * detect some comme CAPTCHA challenge (no solving). In this case the engine is suspended for long a time. * otherwise raise HTTPError as before the check is done in poolrequests.py (was before in search.py). update qwant, wikipedia, wikidata to use raise_for_httperror instead of raise_for_status
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/__init__.py8
-rw-r--r--searx/engines/qwant.py27
-rw-r--r--searx/engines/wikidata.py3
-rw-r--r--searx/engines/wikipedia.py4
4 files changed, 25 insertions, 17 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 0b77f2a95..b2a9b25a4 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -281,8 +281,12 @@ def initialize_engines(engine_list):
load_engines(engine_list)
def engine_init(engine_name, init_fn):
- init_fn(get_engine_from_settings(engine_name))
- logger.debug('%s engine: Initialized', engine_name)
+ try:
+ init_fn(get_engine_from_settings(engine_name))
+ except Exception:
+ logger.exception('%s engine: Fail to initialize', engine_name)
+ else:
+ logger.debug('%s engine: Initialized', engine_name)
for engine_name, engine in engines.items():
if hasattr(engine, 'init'):
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index c909ce11b..b785719d9 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -14,6 +14,8 @@ from datetime import datetime
from json import loads
from urllib.parse import urlencode
from searx.utils import html_to_text, match_language
+from searx.exceptions import SearxEngineAPIException, SearxEngineCaptchaException
+from searx.raise_for_httperror import raise_for_httperror
# engine dependent config
@@ -24,8 +26,7 @@ supported_languages_url = 'https://qwant.com/region'
category_to_keyword = {'general': 'web',
'images': 'images',
- 'news': 'news',
- 'social media': 'social'}
+ 'news': 'news'}
# search-url
url = 'https://api.qwant.com/api/search/{keyword}?count=10&offset={offset}&f=&{query}&t={keyword}&uiv=4'
@@ -51,6 +52,7 @@ def request(query, params):
params['url'] += '&locale=' + language.replace('-', '_').lower()
params['headers']['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0'
+ params['raise_for_httperror'] = False
return params
@@ -58,8 +60,20 @@ def request(query, params):
def response(resp):
results = []
+ # According to https://www.qwant.com/js/app.js
+ if resp.status_code == 429:
+ raise SearxEngineCaptchaException()
+
+ # raise for other errors
+ raise_for_httperror(resp)
+
+ # load JSON result
search_results = loads(resp.text)
+ # check for an API error
+ if search_results.get('status') != 'success':
+ raise SearxEngineAPIException('API error ' + str(search_results.get('error', '')))
+
# return empty array if there are no results
if 'data' not in search_results:
return []
@@ -90,15 +104,6 @@ def response(resp):
'thumbnail_src': thumbnail_src,
'img_src': img_src})
- elif category_to_keyword.get(categories[0], '') == 'social':
- published_date = datetime.fromtimestamp(result['date'], None)
- img_src = result.get('img', None)
- results.append({'url': res_url,
- 'title': title,
- 'publishedDate': published_date,
- 'content': content,
- 'img_src': img_src})
-
elif category_to_keyword.get(categories[0], '') == 'news':
published_date = datetime.fromtimestamp(result['date'], None)
media = result.get('media', [])
diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
index 60d0dc9a0..8d787caac 100644
--- a/searx/engines/wikidata.py
+++ b/searx/engines/wikidata.py
@@ -161,9 +161,6 @@ def request(query, params):
def response(resp):
results = []
- if resp.status_code != 200:
- logger.debug('SPARQL endpoint error %s', resp.content.decode())
- resp.raise_for_status()
jsonresponse = loads(resp.content.decode())
language = resp.search_params['language'].lower()
diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py
index 000e1af76..54d75108e 100644
--- a/searx/engines/wikipedia.py
+++ b/searx/engines/wikipedia.py
@@ -14,6 +14,7 @@ from urllib.parse import quote
from json import loads
from lxml.html import fromstring
from searx.utils import match_language, searx_useragent
+from searx.raise_for_httperror import raise_for_httperror
# search-url
search_url = 'https://{language}.wikipedia.org/api/rest_v1/page/summary/{title}'
@@ -37,7 +38,7 @@ def request(query, params):
language=url_lang(params['language']))
params['headers']['User-Agent'] = searx_useragent()
- params['raise_for_status'] = False
+ params['raise_for_httperror'] = False
params['soft_max_redirects'] = 2
return params
@@ -47,6 +48,7 @@ def request(query, params):
def response(resp):
if resp.status_code == 404:
return []
+ raise_for_httperror(resp)
results = []
api_result = loads(resp.text)