diff options
| author | Alexandre Flament <alex@al-f.net> | 2020-12-03 10:31:44 +0100 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2020-12-03 10:31:44 +0100 |
| commit | 89fbb85d454959be725cd4ca19c36c31d05d3289 (patch) | |
| tree | 7ef098d4630c5416aad58f0d3ce5abb27390423f /searx/engines | |
| parent | 6b5a57882242f24f867b6aa14b79b514720c6d83 (diff) | |
| parent | 64cccae99e625f3ebd879f94797decd0d824608d (diff) | |
Merge pull request #2332 from dalf/metrology-errors
[enh] record exception details per engine
Diffstat (limited to 'searx/engines')
| -rw-r--r-- | searx/engines/1337x.py | 14 | ||||
| -rw-r--r-- | searx/engines/__init__.py | 3 | ||||
| -rw-r--r-- | searx/engines/acgsou.py | 27 | ||||
| -rw-r--r-- | searx/engines/ahmia.py | 14 | ||||
| -rw-r--r-- | searx/engines/apkmirror.py | 9 | ||||
| -rw-r--r-- | searx/engines/archlinux.py | 6 | ||||
| -rw-r--r-- | searx/engines/arxiv.py | 20 | ||||
| -rw-r--r-- | searx/engines/bing_news.py | 13 | ||||
| -rw-r--r-- | searx/engines/duckduckgo_images.py | 8 | ||||
| -rw-r--r-- | searx/engines/elasticsearch.py | 3 | ||||
| -rw-r--r-- | searx/engines/google.py | 52 | ||||
| -rw-r--r-- | searx/engines/google_images.py | 6 | ||||
| -rw-r--r-- | searx/engines/google_videos.py | 10 | ||||
| -rw-r--r-- | searx/engines/xpath.py | 29 | ||||
| -rw-r--r-- | searx/engines/youtube_api.py | 3 |
15 files changed, 104 insertions, 113 deletions
diff --git a/searx/engines/1337x.py b/searx/engines/1337x.py index 9e045bc51..18478876a 100644 --- a/searx/engines/1337x.py +++ b/searx/engines/1337x.py @@ -1,6 +1,6 @@ from urllib.parse import quote, urljoin from lxml import html -from searx.utils import extract_text, get_torrent_size +from searx.utils import extract_text, get_torrent_size, eval_xpath, eval_xpath_list, eval_xpath_getindex url = 'https://1337x.to/' @@ -20,12 +20,12 @@ def response(resp): dom = html.fromstring(resp.text) - for result in dom.xpath('//table[contains(@class, "table-list")]/tbody//tr'): - href = urljoin(url, result.xpath('./td[contains(@class, "name")]/a[2]/@href')[0]) - title = extract_text(result.xpath('./td[contains(@class, "name")]/a[2]')) - seed = extract_text(result.xpath('.//td[contains(@class, "seeds")]')) - leech = extract_text(result.xpath('.//td[contains(@class, "leeches")]')) - filesize_info = extract_text(result.xpath('.//td[contains(@class, "size")]/text()')) + for result in eval_xpath_list(dom, '//table[contains(@class, "table-list")]/tbody//tr'): + href = urljoin(url, eval_xpath_getindex(result, './td[contains(@class, "name")]/a[2]/@href', 0)) + title = extract_text(eval_xpath(result, './td[contains(@class, "name")]/a[2]')) + seed = extract_text(eval_xpath(result, './/td[contains(@class, "seeds")]')) + leech = extract_text(eval_xpath(result, './/td[contains(@class, "leeches")]')) + filesize_info = extract_text(eval_xpath(result, './/td[contains(@class, "size")]/text()')) filesize, filesize_multiplier = filesize_info.split() filesize = get_torrent_size(filesize, filesize_multiplier) diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py index a78c4a8c3..ddd6a7feb 100644 --- a/searx/engines/__init__.py +++ b/searx/engines/__init__.py @@ -132,8 +132,9 @@ def load_engine(engine_data): lambda: engine._fetch_supported_languages(get(engine.supported_languages_url))) engine.stats = { + 'sent_search_count': 0, # sent search + 'search_count': 0, # succesful search 'result_count': 0, - 'search_count': 0, 'engine_time': 0, 'engine_time_count': 0, 'score_count': 0, diff --git a/searx/engines/acgsou.py b/searx/engines/acgsou.py index a436df283..b8b367c24 100644 --- a/searx/engines/acgsou.py +++ b/searx/engines/acgsou.py @@ -11,7 +11,7 @@ from urllib.parse import urlencode from lxml import html -from searx.utils import extract_text, get_torrent_size +from searx.utils import extract_text, get_torrent_size, eval_xpath_list, eval_xpath_getindex # engine dependent config categories = ['files', 'images', 'videos', 'music'] @@ -37,29 +37,26 @@ def request(query, params): def response(resp): results = [] dom = html.fromstring(resp.text) - for result in dom.xpath(xpath_results): + for result in eval_xpath_list(dom, xpath_results): # defaults filesize = 0 magnet_link = "magnet:?xt=urn:btih:{}&tr=http://tracker.acgsou.com:2710/announce" - try: - category = extract_text(result.xpath(xpath_category)[0]) - except: - pass - - page_a = result.xpath(xpath_title)[0] + category = extract_text(eval_xpath_getindex(result, xpath_category, 0, default=[])) + page_a = eval_xpath_getindex(result, xpath_title, 0) title = extract_text(page_a) href = base_url + page_a.attrib.get('href') magnet_link = magnet_link.format(page_a.attrib.get('href')[5:-5]) - try: - filesize_info = result.xpath(xpath_filesize)[0] - filesize = filesize_info[:-2] - filesize_multiplier = filesize_info[-2:] - filesize = get_torrent_size(filesize, filesize_multiplier) - except: - pass + filesize_info = eval_xpath_getindex(result, xpath_filesize, 0, default=None) + if filesize_info: + try: + filesize = filesize_info[:-2] + filesize_multiplier = filesize_info[-2:] + filesize = get_torrent_size(filesize, filesize_multiplier) + except: + pass # I didn't add download/seed/leech count since as I figured out they are generated randomly everytime content = 'Category: "{category}".' content = content.format(category=category) diff --git a/searx/engines/ahmia.py b/searx/engines/ahmia.py index d9fcc6ca7..7a2ae0075 100644 --- a/searx/engines/ahmia.py +++ b/searx/engines/ahmia.py @@ -12,7 +12,7 @@ from urllib.parse import urlencode, urlparse, parse_qs from lxml.html import fromstring -from searx.engines.xpath import extract_url, extract_text +from searx.engines.xpath import extract_url, extract_text, eval_xpath_list, eval_xpath # engine config categories = ['onions'] @@ -50,17 +50,17 @@ def response(resp): # trim results so there's not way too many at once first_result_index = page_size * (resp.search_params.get('pageno', 1) - 1) - all_results = dom.xpath(results_xpath) + all_results = eval_xpath_list(dom, results_xpath) trimmed_results = all_results[first_result_index:first_result_index + page_size] # get results for result in trimmed_results: # remove ahmia url and extract the actual url for the result - raw_url = extract_url(result.xpath(url_xpath), search_url) + raw_url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url) cleaned_url = parse_qs(urlparse(raw_url).query).get('redirect_url', [''])[0] - title = extract_text(result.xpath(title_xpath)) - content = extract_text(result.xpath(content_xpath)) + title = extract_text(eval_xpath(result, title_xpath)) + content = extract_text(eval_xpath(result, content_xpath)) results.append({'url': cleaned_url, 'title': title, @@ -68,11 +68,11 @@ def response(resp): 'is_onion': True}) # get spelling corrections - for correction in dom.xpath(correction_xpath): + for correction in eval_xpath_list(dom, correction_xpath): results.append({'correction': extract_text(correction)}) # get number of results - number_of_results = dom.xpath(number_of_results_xpath) + number_of_results = eval_xpath(dom, number_of_results_xpath) if number_of_results: try: results.append({'number_of_results': int(extract_text(number_of_results))}) diff --git a/searx/engines/apkmirror.py b/searx/engines/apkmirror.py index a8ff499af..3a948dcb4 100644 --- a/searx/engines/apkmirror.py +++ b/searx/engines/apkmirror.py @@ -11,7 +11,7 @@ from urllib.parse import urlencode from lxml import html -from searx.utils import extract_text +from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex # engine dependent config @@ -42,12 +42,13 @@ def response(resp): dom = html.fromstring(resp.text) # parse results - for result in dom.xpath('.//div[@id="content"]/div[@class="listWidget"]/div[@class="appRow"]'): + for result in eval_xpath_list(dom, './/div[@id="content"]/div[@class="listWidget"]/div[@class="appRow"]'): - link = result.xpath('.//h5/a')[0] + link = eval_xpath_getindex(result, './/h5/a', 0) url = base_url + link.attrib.get('href') + '#downloads' title = extract_text(link) - thumbnail_src = base_url + result.xpath('.//img')[0].attrib.get('src').replace('&w=32&h=32', '&w=64&h=64') + thumbnail_src = base_url\ + + eval_xpath_getindex(result, './/img', 0).attrib.get('src').replace('&w=32&h=32', '&w=64&h=64') res = { 'url': url, diff --git a/searx/engines/archlinux.py b/searx/engines/archlinux.py index 8f93f4f38..04117c07d 100644 --- a/searx/engines/archlinux.py +++ b/searx/engines/archlinux.py @@ -13,7 +13,7 @@ from urllib.parse import urlencode, urljoin from lxml import html -from searx.utils import extract_text +from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex # engine dependent config categories = ['it'] @@ -131,8 +131,8 @@ def response(resp): dom = html.fromstring(resp.text) # parse results - for result in dom.xpath(xpath_results): - link = result.xpath(xpath_link)[0] + for result in eval_xpath_list(dom, xpath_results): + link = eval_xpath_getindex(result, xpath_link, 0) href = urljoin(base_url, link.attrib.get('href')) title = extract_text(link) diff --git a/searx/engines/arxiv.py b/searx/engines/arxiv.py index 6e231c382..c702c5987 100644 --- a/searx/engines/arxiv.py +++ b/searx/engines/arxiv.py @@ -13,6 +13,7 @@ from lxml import html from datetime import datetime +from searx.utils import eval_xpath_list, eval_xpath_getindex categories = ['science'] @@ -42,29 +43,26 @@ def response(resp): results = [] dom = html.fromstring(resp.content) - search_results = dom.xpath('//entry') - for entry in search_results: - title = entry.xpath('.//title')[0].text + for entry in eval_xpath_list(dom, '//entry'): + title = eval_xpath_getindex(entry, './/title', 0).text - url = entry.xpath('.//id')[0].text + url = eval_xpath_getindex(entry, './/id', 0).text content_string = '{doi_content}{abstract_content}' - abstract = entry.xpath('.//summary')[0].text + abstract = eval_xpath_getindex(entry, './/summary', 0).text # If a doi is available, add it to the snipppet - try: - doi_content = entry.xpath('.//link[@title="doi"]')[0].text - content = content_string.format(doi_content=doi_content, abstract_content=abstract) - except: - content = content_string.format(doi_content="", abstract_content=abstract) + doi_element = eval_xpath_getindex(entry, './/link[@title="doi"]', 0, default=None) + doi_content = doi_element.text if doi_element is not None else '' + content = content_string.format(doi_content=doi_content, abstract_content=abstract) if len(content) > 300: content = content[0:300] + "..." # TODO: center snippet on query term - publishedDate = datetime.strptime(entry.xpath('.//published')[0].text, '%Y-%m-%dT%H:%M:%SZ') + publishedDate = datetime.strptime(eval_xpath_getindex(entry, './/published', 0).text, '%Y-%m-%dT%H:%M:%SZ') res_dict = {'url': url, 'title': title, diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py index 5489c7549..b95def48b 100644 --- a/searx/engines/bing_news.py +++ b/searx/engines/bing_news.py @@ -15,7 +15,8 @@ from datetime import datetime from dateutil import parser from urllib.parse import urlencode, urlparse, parse_qsl from lxml import etree -from searx.utils import list_get, match_language +from lxml.etree import XPath +from searx.utils import match_language, eval_xpath_getindex from searx.engines.bing import language_aliases from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import @@ -94,12 +95,12 @@ def response(resp): # parse results for item in rss.xpath('./channel/item'): # url / title / content - url = url_cleanup(item.xpath('./link/text()')[0]) - title = list_get(item.xpath('./title/text()'), 0, url) - content = list_get(item.xpath('./description/text()'), 0, '') + url = url_cleanup(eval_xpath_getindex(item, './link/text()', 0, default=None)) + title = eval_xpath_getindex(item, './title/text()', 0, default=url) + content = eval_xpath_getindex(item, './description/text()', 0, default='') # publishedDate - publishedDate = list_get(item.xpath('./pubDate/text()'), 0) + publishedDate = eval_xpath_getindex(item, './pubDate/text()', 0, default=None) try: publishedDate = parser.parse(publishedDate, dayfirst=False) except TypeError: @@ -108,7 +109,7 @@ def response(resp): publishedDate = datetime.now() # thumbnail - thumbnail = list_get(item.xpath('./News:Image/text()', namespaces=ns), 0) + thumbnail = eval_xpath_getindex(item, XPath('./News:Image/text()', namespaces=ns), 0, default=None) if thumbnail is not None: thumbnail = image_url_cleanup(thumbnail) diff --git a/searx/engines/duckduckgo_images.py b/searx/engines/duckduckgo_images.py index 438a8d54c..009f81cca 100644 --- a/searx/engines/duckduckgo_images.py +++ b/searx/engines/duckduckgo_images.py @@ -15,6 +15,7 @@ from json import loads from urllib.parse import urlencode +from searx.exceptions import SearxEngineAPIException from searx.engines.duckduckgo import get_region_code from searx.engines.duckduckgo import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import from searx.poolrequests import get @@ -37,7 +38,7 @@ def get_vqd(query, headers): res = get(query_url, headers=headers) content = res.text if content.find('vqd=\'') == -1: - raise Exception('Request failed') + raise SearxEngineAPIException('Request failed') vqd = content[content.find('vqd=\'') + 5:] vqd = vqd[:vqd.find('\'')] return vqd @@ -71,10 +72,7 @@ def response(resp): results = [] content = resp.text - try: - res_json = loads(content) - except: - raise Exception('Cannot parse results') + res_json = loads(content) # parse results for result in res_json['results']: diff --git a/searx/engines/elasticsearch.py b/searx/engines/elasticsearch.py index 99e93d876..081736c1c 100644 --- a/searx/engines/elasticsearch.py +++ b/searx/engines/elasticsearch.py @@ -1,5 +1,6 @@ from json import loads, dumps from requests.auth import HTTPBasicAuth +from searx.exceptions import SearxEngineAPIException base_url = 'http://localhost:9200' @@ -107,7 +108,7 @@ def response(resp): resp_json = loads(resp.text) if 'error' in resp_json: - raise Exception(resp_json['error']) + raise SearxEngineAPIException(resp_json['error']) for result in resp_json['hits']['hits']: r = {key: str(value) if not key.startswith('_') else value for key, value in result['_source'].items()} diff --git a/searx/engines/google.py b/searx/engines/google.py index 83b18a9a0..17ab21f6a 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -20,9 +20,10 @@ Definitions`_. from urllib.parse import urlencode, urlparse from lxml import html -from flask_babel import gettext from searx import logger -from searx.utils import match_language, extract_text, eval_xpath +from searx.utils import match_language, extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex +from searx.exceptions import SearxEngineCaptchaException + logger = logger.getChild('google engine') @@ -131,14 +132,6 @@ suggestion_xpath = '//div[contains(@class, "card-section")]//a' spelling_suggestion_xpath = '//div[@class="med"]/p/a' -def extract_text_from_dom(result, xpath): - """returns extract_text on the first result selected by the xpath or None""" - r = eval_xpath(result, xpath) - if len(r) > 0: - return extract_text(r[0]) - return None - - def get_lang_country(params, lang_list, custom_aliases): """Returns a tuple with *langauage* on its first and *country* on its second position.""" @@ -210,10 +203,10 @@ def response(resp): # detect google sorry resp_url = urlparse(resp.url) if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect': - raise RuntimeWarning('sorry.google.com') + raise SearxEngineCaptchaException() if resp_url.path.startswith('/sorry'): - raise RuntimeWarning(gettext('CAPTCHA required')) + raise SearxEngineCaptchaException() # which subdomain ? # subdomain = resp.search_params.get('google_subdomain') @@ -229,18 +222,17 @@ def response(resp): logger.debug("did not found 'answer'") # results --> number_of_results - try: - _txt = eval_xpath(dom, '//div[@id="result-stats"]//text()')[0] - _digit = ''.join([n for n in _txt if n.isdigit()]) - number_of_results = int(_digit) - results.append({'number_of_results': number_of_results}) - - except Exception as e: # pylint: disable=broad-except - logger.debug("did not 'number_of_results'") - logger.error(e, exc_info=True) + try: + _txt = eval_xpath_getindex(dom, '//div[@id="result-stats"]//text()', 0) + _digit = ''.join([n for n in _txt if n.isdigit()]) + number_of_results = int(_digit) + results.append({'number_of_results': number_of_results}) + except Exception as e: # pylint: disable=broad-except + logger.debug("did not 'number_of_results'") + logger.error(e, exc_info=True) # parse results - for result in eval_xpath(dom, results_xpath): + for result in eval_xpath_list(dom, results_xpath): # google *sections* if extract_text(eval_xpath(result, g_section_with_header)): @@ -248,14 +240,14 @@ def response(resp): continue try: - title_tag = eval_xpath(result, title_xpath) - if not title_tag: + title_tag = eval_xpath_getindex(result, title_xpath, 0, default=None) + if title_tag is None: # this not one of the common google results *section* logger.debug('ingoring <div class="g" ../> section: missing title') continue - title = extract_text(title_tag[0]) - url = eval_xpath(result, href_xpath)[0] - content = extract_text_from_dom(result, content_xpath) + title = extract_text(title_tag) + url = eval_xpath_getindex(result, href_xpath, 0) + content = extract_text(eval_xpath_getindex(result, content_xpath, 0, default=None), allow_none=True) results.append({ 'url': url, 'title': title, @@ -270,11 +262,11 @@ def response(resp): continue # parse suggestion - for suggestion in eval_xpath(dom, suggestion_xpath): + for suggestion in eval_xpath_list(dom, suggestion_xpath): # append suggestion results.append({'suggestion': extract_text(suggestion)}) - for correction in eval_xpath(dom, spelling_suggestion_xpath): + for correction in eval_xpath_list(dom, spelling_suggestion_xpath): results.append({'correction': extract_text(correction)}) # return results @@ -286,7 +278,7 @@ def _fetch_supported_languages(resp): ret_val = {} dom = html.fromstring(resp.text) - radio_buttons = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lr"]') + radio_buttons = eval_xpath_list(dom, '//*[@id="langSec"]//input[@name="lr"]') for x in radio_buttons: name = x.get("data-name") diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py index a3daf6070..9ef1be753 100644 --- a/searx/engines/google_images.py +++ b/searx/engines/google_images.py @@ -26,8 +26,8 @@ Definitions`_. from urllib.parse import urlencode, urlparse, unquote from lxml import html -from flask_babel import gettext from searx import logger +from searx.exceptions import SearxEngineCaptchaException from searx.utils import extract_text, eval_xpath from searx.engines.google import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import @@ -128,10 +128,10 @@ def response(resp): # detect google sorry resp_url = urlparse(resp.url) if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect': - raise RuntimeWarning('sorry.google.com') + raise SearxEngineCaptchaException() if resp_url.path.startswith('/sorry'): - raise RuntimeWarning(gettext('CAPTCHA required')) + raise SearxEngineCaptchaException() # which subdomain ? # subdomain = resp.search_params.get('google_subdomain') diff --git a/searx/engines/google_videos.py b/searx/engines/google_videos.py index 1e6c8b3ee..eedefbf45 100644 --- a/searx/engines/google_videos.py +++ b/searx/engines/google_videos.py @@ -13,7 +13,7 @@ from datetime import date, timedelta from urllib.parse import urlencode from lxml import html -from searx.utils import extract_text +from searx.utils import extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex import re # engine dependent config @@ -66,11 +66,11 @@ def response(resp): dom = html.fromstring(resp.text) # parse results - for result in dom.xpath('//div[@class="g"]'): + for result in eval_xpath_list(dom, '//div[@class="g"]'): - title = extract_text(result.xpath('.//h3')) - url = result.xpath('.//div[@class="r"]/a/@href')[0] - content = extract_text(result.xpath('.//span[@class="st"]')) + title = extract_text(eval_xpath(result, './/h3')) + url = eval_xpath_getindex(result, './/div[@class="r"]/a/@href', 0) + content = extract_text(eval_xpath(result, './/span[@class="st"]')) # get thumbnails script = str(dom.xpath('//script[contains(., "_setImagesSrc")]')[0].text) diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py index a569d9160..d420e250a 100644 --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -1,6 +1,6 @@ from lxml import html from urllib.parse import urlencode -from searx.utils import extract_text, extract_url, eval_xpath +from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list search_url = None url_xpath = None @@ -42,21 +42,22 @@ def response(resp): is_onion = True if 'onions' in categories else False if results_xpath: - for result in eval_xpath(dom, results_xpath): - url = extract_url(eval_xpath(result, url_xpath), search_url) - title = extract_text(eval_xpath(result, title_xpath)) - content = extract_text(eval_xpath(result, content_xpath)) + for result in eval_xpath_list(dom, results_xpath): + url = extract_url(eval_xpath_list(result, url_xpath, min_len=1), search_url) + title = extract_text(eval_xpath_list(result, title_xpath, min_len=1)) + content = extract_text(eval_xpath_list(result, content_xpath, min_len=1)) tmp_result = {'url': url, 'title': title, 'content': content} # add thumbnail if available if thumbnail_xpath: - thumbnail_xpath_result = eval_xpath(result, thumbnail_xpath) + thumbnail_xpath_result = eval_xpath_list(result, thumbnail_xpath) if len(thumbnail_xpath_result) > 0: tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url) # add alternative cached url if available if cached_xpath: - tmp_result['cached_url'] = cached_url + extract_text(result.xpath(cached_xpath)) + tmp_result['cached_url'] = cached_url\ + + extract_text(eval_xpath_list(result, cached_xpath, min_len=1)) if is_onion: tmp_result['is_onion'] = True @@ -66,19 +67,19 @@ def response(resp): if cached_xpath: for url, title, content, cached in zip( (extract_url(x, search_url) for - x in dom.xpath(url_xpath)), - map(extract_text, dom.xpath(title_xpath)), - map(extract_text, dom.xpath(content_xpath)), - map(extract_text, dom.xpath(cached_xpath)) + x in eval_xpath_list(dom, url_xpath)), + map(extract_text, eval_xpath_list(dom, title_xpath)), + map(extract_text, eval_xpath_list(dom, content_xpath)), + map(extract_text, eval_xpath_list(dom, cached_xpath)) ): results.append({'url': url, 'title': title, 'content': content, 'cached_url': cached_url + cached, 'is_onion': is_onion}) else: for url, title, content in zip( (extract_url(x, search_url) for - x in dom.xpath(url_xpath)), - map(extract_text, dom.xpath(title_xpath)), - map(extract_text, dom.xpath(content_xpath)) + x in eval_xpath_list(dom, url_xpath)), + map(extract_text, eval_xpath_list(dom, title_xpath)), + map(extract_text, eval_xpath_list(dom, content_xpath)) ): results.append({'url': url, 'title': title, 'content': content, 'is_onion': is_onion}) diff --git a/searx/engines/youtube_api.py b/searx/engines/youtube_api.py index 4a205ed6c..8c12ac4d2 100644 --- a/searx/engines/youtube_api.py +++ b/searx/engines/youtube_api.py @@ -11,6 +11,7 @@ from json import loads from dateutil import parser from urllib.parse import urlencode +from searx.exceptions import SearxEngineAPIException # engine dependent config categories = ['videos', 'music'] @@ -48,7 +49,7 @@ def response(resp): search_results = loads(resp.text) if 'error' in search_results and 'message' in search_results['error']: - raise Exception(search_results['error']['message']) + raise SearxEngineAPIException(search_results['error']['message']) # return empty array if there are no results if 'items' not in search_results: |