summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/peertube.py2
-rw-r--r--searx/engines/qwant.py23
-rw-r--r--searx/engines/seznam.py4
3 files changed, 19 insertions, 10 deletions
diff --git a/searx/engines/peertube.py b/searx/engines/peertube.py
index 86cd04f2d..058065c03 100644
--- a/searx/engines/peertube.py
+++ b/searx/engines/peertube.py
@@ -97,6 +97,6 @@ def _fetch_supported_languages(resp):
import re
# https://docs.python.org/3/howto/regex.html#greedy-versus-non-greedy
- videolanguages = re.search(r"videoLanguages \(\) \{(.*?)\]", resp.text, re.DOTALL)
+ videolanguages = re.search(r"videoLanguages \(\)[^\n]+(.*?)\]", resp.text, re.DOTALL)
peertube_languages = [m.group(1) for m in re.finditer(r"\{ id: '([a-z]+)', label:", videolanguages.group(1))]
return peertube_languages
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index 97e461177..8d03d8324 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -84,14 +84,16 @@ def request(query, params):
)
# add language tag
- if params['language'] != 'all':
+ if params['language'] == 'all':
+ params['url'] += '&locale=en_us'
+ else:
language = match_language(
params['language'],
# pylint: disable=undefined-variable
supported_languages,
language_aliases,
)
- params['url'] += '&locale=' + language.replace('-', '_')
+ params['url'] += '&locale=' + language.replace('-', '_').lower()
params['raise_for_httperror'] = False
return params
@@ -144,8 +146,8 @@ def response(resp):
mainline_items = row.get('items', [])
for item in mainline_items:
- title = item['title']
- res_url = item['url']
+ title = item.get('title', None)
+ res_url = item.get('url', None)
if mainline_type == 'web':
content = item['desc']
@@ -156,7 +158,10 @@ def response(resp):
})
elif mainline_type == 'news':
- pub_date = datetime.fromtimestamp(item['date'], None)
+
+ pub_date = item['date']
+ if pub_date is not None:
+ pub_date = datetime.fromtimestamp(pub_date)
news_media = item.get('media', [])
img_src = None
if news_media:
@@ -192,8 +197,12 @@ def response(resp):
if c:
content_parts.append("%s: %s " % (gettext("Channel"), c))
content = ' // '.join(content_parts)
- length = timedelta(seconds=item['duration'])
- pub_date = datetime.fromtimestamp(item['date'])
+ length = item['duration']
+ if length is not None:
+ length = timedelta(milliseconds=length)
+ pub_date = item['date']
+ if pub_date is not None:
+ pub_date = datetime.fromtimestamp(pub_date)
thumbnail = item['thumbnail']
# from some locations (DE and others?) the s2 link do
# response a 'Please wait ..' but does not deliver the thumbnail
diff --git a/searx/engines/seznam.py b/searx/engines/seznam.py
index 042088dbe..9cd50dfc0 100644
--- a/searx/engines/seznam.py
+++ b/searx/engines/seznam.py
@@ -53,14 +53,14 @@ def response(resp):
dom = html.fromstring(resp.content.decode())
for result_element in eval_xpath_list(dom, '//div[@data-dot="results"]/div'):
- result_data = eval_xpath_getindex(result_element, './/div[contains(@class, "Result")]', 0, default=None)
+ result_data = eval_xpath_getindex(result_element, './/div[contains(@class, "bec586")]', 0, default=None)
if result_data is None:
continue
title_element = eval_xpath_getindex(result_element, './/h3/a', 0)
results.append({
'url': title_element.get('href'),
'title': extract_text(title_element),
- 'content': extract_text(eval_xpath(result_data, './/p[@class="Result-description"]')),
+ 'content': extract_text(eval_xpath(result_data, './/div[@class="_3eded7"]')),
})
return results