summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/9gag.py77
-rw-r--r--searx/engines/bing_videos.py2
-rw-r--r--searx/engines/duden.py7
-rw-r--r--searx/engines/qwant.py113
-rw-r--r--searx/engines/twitter.py75
5 files changed, 231 insertions, 43 deletions
diff --git a/searx/engines/9gag.py b/searx/engines/9gag.py
new file mode 100644
index 000000000..d1846725c
--- /dev/null
+++ b/searx/engines/9gag.py
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# lint: pylint
+# pylint: disable=invalid-name
+"""9GAG (social media)"""
+
+from json import loads
+from datetime import datetime
+from urllib.parse import urlencode
+
+about = {
+ "website": 'https://9gag.com/',
+ "wikidata_id": 'Q277421',
+ "official_api_documentation": None,
+ "use_official_api": True,
+ "require_api_key": False,
+ "results": 'JSON',
+}
+
+categories = ['social media']
+paging = True
+
+search_url = "https://9gag.com/v1/search-posts?{query}"
+page_size = 10
+
+
+def request(query, params):
+ query = urlencode({'query': query, 'c': (params['pageno'] - 1) * page_size})
+
+ params['url'] = search_url.format(query=query)
+
+ return params
+
+
+def response(resp):
+ results = []
+
+ json_results = loads(resp.text)['data']
+
+ for result in json_results['posts']:
+ result_type = result['type']
+
+ # Get the not cropped version of the thumbnail when the image height is not too important
+ if result['images']['image700']['height'] > 400:
+ thumbnail = result['images']['imageFbThumbnail']['url']
+ else:
+ thumbnail = result['images']['image700']['url']
+
+ if result_type == 'Photo':
+ results.append(
+ {
+ 'template': 'images.html',
+ 'url': result['url'],
+ 'title': result['title'],
+ 'content': result['description'],
+ 'publishedDate': datetime.utcfromtimestamp(result['creationTs']),
+ 'img_src': result['images']['image700']['url'],
+ 'thumbnail_src': thumbnail,
+ }
+ )
+ elif result_type == 'Animated':
+ results.append(
+ {
+ 'template': 'videos.html',
+ 'url': result['url'],
+ 'title': result['title'],
+ 'content': result['description'],
+ 'publishedDate': datetime.utcfromtimestamp(result['creationTs']),
+ 'thumbnail': thumbnail,
+ 'iframe_src': result['images'].get('image460sv', {}).get('url'),
+ }
+ )
+
+ if 'tags' in json_results:
+ for suggestion in json_results['tags']:
+ results.append({'suggestion': suggestion['key']})
+
+ return results
diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py
index 9be8eeaef..85071de21 100644
--- a/searx/engines/bing_videos.py
+++ b/searx/engines/bing_videos.py
@@ -80,7 +80,7 @@ def response(resp):
dom = html.fromstring(resp.text)
- for result in dom.xpath('//div[@class="dg_u"]'):
+ for result in dom.xpath('//div[@class="dg_u"]/div[contains(@class, "mc_vtvc")]'):
metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0])
info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip()
content = '{0} - {1}'.format(metadata['du'], info)
diff --git a/searx/engines/duden.py b/searx/engines/duden.py
index da4c4f7da..dca566404 100644
--- a/searx/engines/duden.py
+++ b/searx/engines/duden.py
@@ -7,6 +7,7 @@ import re
from urllib.parse import quote, urljoin
from lxml import html
from searx.utils import extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
+from searx.network import raise_for_httperror
# about
about = {
@@ -47,6 +48,7 @@ def request(query, params):
# after the last page of results, spelling corrections are returned after a HTTP redirect
# whatever the page number is
params['soft_max_redirects'] = 1
+ params['raise_for_httperror'] = False
return params
@@ -56,6 +58,11 @@ def response(resp):
'''
results = []
+ if resp.status_code == 404:
+ return results
+
+ raise_for_httperror(resp)
+
dom = html.fromstring(resp.text)
number_of_results_element = eval_xpath_getindex(
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index a1799491a..6de2176d0 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -9,16 +9,16 @@ https://www.qwant.com/ queries.
This implementation is used by different qwant engines in the settings.yml::
- name: qwant
- categories: general
+ qwant_categ: web
...
- name: qwant news
- categories: news
+ qwant_categ: news
...
- name: qwant images
- categories: images
+ qwant_categ: images
...
- name: qwant videos
- categories: videos
+ qwant_categ: videos
...
"""
@@ -30,11 +30,11 @@ from datetime import (
from json import loads
from urllib.parse import urlencode
from flask_babel import gettext
+import babel
-from searx.utils import match_language
from searx.exceptions import SearxEngineAPIException
from searx.network import raise_for_httperror
-
+from searx.locales import get_engine_locale
# about
about = {
@@ -50,13 +50,20 @@ about = {
categories = []
paging = True
supported_languages_url = about['website']
+qwant_categ = None # web|news|inages|videos
-category_to_keyword = {
- 'general': 'web',
- 'news': 'news',
- 'images': 'images',
- 'videos': 'videos',
-}
+safesearch = True
+safe_search_map = {0: '&safesearch=0', 1: '&safesearch=1', 2: '&safesearch=2'}
+
+# fmt: off
+qwant_news_locales = [
+ 'ca_ad', 'ca_es', 'ca_fr', 'co_fr', 'de_at', 'de_ch', 'de_de', 'en_au',
+ 'en_ca', 'en_gb', 'en_ie', 'en_my', 'en_nz', 'en_us', 'es_ad', 'es_ar',
+ 'es_cl', 'es_co', 'es_es', 'es_mx', 'es_pe', 'eu_es', 'eu_fr', 'fc_ca',
+ 'fr_ad', 'fr_be', 'fr_ca', 'fr_ch', 'fr_fr', 'it_ch', 'it_it', 'nl_be',
+ 'nl_nl', 'pt_ad', 'pt_pt',
+]
+# fmt: on
# search-url
url = 'https://api.qwant.com/v3/search/{keyword}?{query}&count={count}&offset={offset}'
@@ -64,10 +71,13 @@ url = 'https://api.qwant.com/v3/search/{keyword}?{query}&count={count}&offset={o
def request(query, params):
"""Qwant search request"""
- keyword = category_to_keyword[categories[0]]
+
+ if not query:
+ return None
+
count = 10 # web: count must be equal to 10
- if keyword == 'images':
+ if qwant_categ == 'images':
count = 50
offset = (params['pageno'] - 1) * count
# count + offset must be lower than 250
@@ -78,22 +88,18 @@ def request(query, params):
offset = min(offset, 40)
params['url'] = url.format(
- keyword=keyword,
+ keyword=qwant_categ,
query=urlencode({'q': query}),
offset=offset,
count=count,
)
- # add language tag
- if params['language'] == 'all':
- params['url'] += '&locale=en_US'
- else:
- language = match_language(
- params['language'],
- supported_languages,
- language_aliases,
- )
- params['url'] += '&locale=' + language.replace('-', '_')
+ # add quant's locale
+ q_locale = get_engine_locale(params['language'], supported_languages, default='en_US')
+ params['url'] += '&locale=' + q_locale
+
+ # add safesearch option
+ params['url'] += safe_search_map.get(params['safesearch'], '')
params['raise_for_httperror'] = False
return params
@@ -103,7 +109,6 @@ def response(resp):
"""Get response from Qwant's search request"""
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
- keyword = category_to_keyword[categories[0]]
results = []
# load JSON result
@@ -125,7 +130,7 @@ def response(resp):
# raise for other errors
raise_for_httperror(resp)
- if keyword == 'web':
+ if qwant_categ == 'web':
# The WEB query contains a list named 'mainline'. This list can contain
# different result types (e.g. mainline[0]['type'] returns type of the
# result items in mainline[0]['items']
@@ -136,7 +141,7 @@ def response(resp):
# result['items'].
mainline = data.get('result', {}).get('items', [])
mainline = [
- {'type': keyword, 'items': mainline},
+ {'type': qwant_categ, 'items': mainline},
]
# return empty array if there are no results
@@ -146,7 +151,7 @@ def response(resp):
for row in mainline:
mainline_type = row.get('type', 'web')
- if mainline_type != keyword:
+ if mainline_type != qwant_categ:
continue
if mainline_type == 'ads':
@@ -238,19 +243,43 @@ def response(resp):
return results
-# get supported languages from their site
def _fetch_supported_languages(resp):
- # list of regions is embedded in page as a js object
- response_text = resp.text
- response_text = response_text[response_text.find('INITIAL_PROPS') :]
- response_text = response_text[response_text.find('{') : response_text.find('</script>')]
-
- regions_json = loads(response_text)
-
- supported_languages = []
- for country, langs in regions_json['locales'].items():
- for lang in langs['langs']:
- lang_code = "{lang}-{country}".format(lang=lang, country=country)
- supported_languages.append(lang_code)
+
+ text = resp.text
+ text = text[text.find('INITIAL_PROPS') :]
+ text = text[text.find('{') : text.find('</script>')]
+
+ q_initial_props = loads(text)
+ q_locales = q_initial_props.get('locales')
+ q_valid_locales = []
+
+ for country, v in q_locales.items():
+ for lang in v['langs']:
+ _locale = "{lang}_{country}".format(lang=lang, country=country)
+
+ if qwant_categ == 'news' and _locale.lower() not in qwant_news_locales:
+ # qwant-news does not support all locales from qwant-web:
+ continue
+
+ q_valid_locales.append(_locale)
+
+ supported_languages = {}
+
+ for q_locale in q_valid_locales:
+ try:
+ locale = babel.Locale.parse(q_locale, sep='_')
+ except babel.core.UnknownLocaleError:
+ print("ERROR: can't determine babel locale of quant's locale %s" % q_locale)
+ continue
+
+ # note: supported_languages (dict)
+ #
+ # dict's key is a string build up from a babel.Locale object / the
+ # notation 'xx-XX' (and 'xx') conforms to SearXNG's locale (and
+ # language) notation and dict's values are the locale strings used by
+ # the engine.
+
+ searxng_locale = locale.language + '-' + locale.territory # --> params['language']
+ supported_languages[searxng_locale] = q_locale
return supported_languages
diff --git a/searx/engines/twitter.py b/searx/engines/twitter.py
new file mode 100644
index 000000000..3ebe34be1
--- /dev/null
+++ b/searx/engines/twitter.py
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# lint: pylint
+"""Twitter (microblogging platform)"""
+
+from json import loads
+from urllib.parse import urlencode
+from datetime import datetime
+
+about = {
+ "website": 'https://twitter.com',
+ "wikidata_id": None,
+ "official_api_documentation": 'https://developer.twitter.com/en/docs/twitter-api',
+ "use_official_api": True,
+ "require_api_key": False,
+ "results": 'JSON',
+}
+
+categories = ['social media']
+
+url = "https://api.twitter.com"
+search_url = (
+ "{url}/2/search/adaptive.json?{query}&tweet_mode=extended&query_source=typed_query&pc=1&spelling_corrections=1"
+)
+
+
+def request(query, params):
+ params['url'] = search_url.format(url=url, query=urlencode({'q': query}))
+
+ params['headers'] = {
+ # This token is used in the Twitter web interface (twitter.com). Without this header, the API doesn't work.
+ # The value of the token has never changed (or maybe once a long time ago).
+ # https://github.com/zedeus/nitter/blob/5f31e86e0e8578377fa7d5aeb9631bbb2d35ef1e/src/consts.nim#L5
+ 'Authorization': (
+ "Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKb"
+ "T3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw"
+ )
+ }
+
+ return params
+
+
+def response(resp):
+ results = []
+
+ json_res = loads(resp.text)['globalObjects']
+
+ for tweet in json_res['tweets'].values():
+ text = tweet['full_text']
+ display = tweet['display_text_range']
+
+ img_src = tweet.get('extended_entities', {}).get('media', [{}])[0].get('media_url_https')
+ if img_src:
+ img_src += "?name=thumb"
+
+ results.append(
+ {
+ 'url': 'https://twitter.com/i/web/status/' + tweet['id_str'],
+ 'title': (text[:40] + '...') if len(text) > 40 else text,
+ 'content': text[display[0] : display[1]],
+ 'img_src': img_src,
+ 'publishedDate': datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S %z %Y'),
+ }
+ )
+
+ for user in json_res['users'].values():
+ results.append(
+ {
+ 'title': user['name'],
+ 'content': user['description'],
+ 'url': 'https://twitter.com/' + user['screen_name'],
+ 'img_src': user['profile_image_url_https'],
+ }
+ )
+
+ return results