summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/9gag.py77
-rw-r--r--searx/engines/__init__.py1
-rw-r--r--searx/engines/apple_app_store.py57
-rw-r--r--searx/engines/apple_maps.py113
-rw-r--r--searx/engines/bing_videos.py2
-rw-r--r--searx/engines/deepl.py62
-rw-r--r--searx/engines/duden.py7
-rw-r--r--searx/engines/google_news.py24
-rw-r--r--searx/engines/qwant.py113
-rw-r--r--searx/engines/twitter.py75
-rw-r--r--searx/engines/xpath.py18
11 files changed, 487 insertions, 62 deletions
diff --git a/searx/engines/9gag.py b/searx/engines/9gag.py
new file mode 100644
index 000000000..d1846725c
--- /dev/null
+++ b/searx/engines/9gag.py
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# lint: pylint
+# pylint: disable=invalid-name
+"""9GAG (social media)"""
+
+from json import loads
+from datetime import datetime
+from urllib.parse import urlencode
+
+about = {
+ "website": 'https://9gag.com/',
+ "wikidata_id": 'Q277421',
+ "official_api_documentation": None,
+ "use_official_api": True,
+ "require_api_key": False,
+ "results": 'JSON',
+}
+
+categories = ['social media']
+paging = True
+
+search_url = "https://9gag.com/v1/search-posts?{query}"
+page_size = 10
+
+
+def request(query, params):
+ query = urlencode({'query': query, 'c': (params['pageno'] - 1) * page_size})
+
+ params['url'] = search_url.format(query=query)
+
+ return params
+
+
+def response(resp):
+ results = []
+
+ json_results = loads(resp.text)['data']
+
+ for result in json_results['posts']:
+ result_type = result['type']
+
+ # Get the not cropped version of the thumbnail when the image height is not too important
+ if result['images']['image700']['height'] > 400:
+ thumbnail = result['images']['imageFbThumbnail']['url']
+ else:
+ thumbnail = result['images']['image700']['url']
+
+ if result_type == 'Photo':
+ results.append(
+ {
+ 'template': 'images.html',
+ 'url': result['url'],
+ 'title': result['title'],
+ 'content': result['description'],
+ 'publishedDate': datetime.utcfromtimestamp(result['creationTs']),
+ 'img_src': result['images']['image700']['url'],
+ 'thumbnail_src': thumbnail,
+ }
+ )
+ elif result_type == 'Animated':
+ results.append(
+ {
+ 'template': 'videos.html',
+ 'url': result['url'],
+ 'title': result['title'],
+ 'content': result['description'],
+ 'publishedDate': datetime.utcfromtimestamp(result['creationTs']),
+ 'thumbnail': thumbnail,
+ 'iframe_src': result['images'].get('image460sv', {}).get('url'),
+ }
+ )
+
+ if 'tags' in json_results:
+ for suggestion in json_results['tags']:
+ results.append({'suggestion': suggestion['key']})
+
+ return results
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 07d5b226c..57b090add 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -81,6 +81,7 @@ engine_shortcuts = {}
engine_shortcuts[engine.shortcut] = engine.name
+:meta hide-value:
"""
diff --git a/searx/engines/apple_app_store.py b/searx/engines/apple_app_store.py
new file mode 100644
index 000000000..f75a1a657
--- /dev/null
+++ b/searx/engines/apple_app_store.py
@@ -0,0 +1,57 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# lint: pylint
+"""
+ Apple App Store
+"""
+
+from json import loads
+from urllib.parse import urlencode
+from dateutil.parser import parse
+
+about = {
+ "website": 'https://www.apple.com/app-store/',
+ "wikidata_id": 'Q368215',
+ "official_api_documentation": (
+ 'https://developer.apple.com/library/archive/documentation/AudioVideo/Conceptual/'
+ 'iTuneSearchAPI/UnderstandingSearchResults.html#//apple_ref/doc/uid/TP40017632-CH8-SW1'
+ ),
+ "use_official_api": True,
+ "require_api_key": False,
+ "results": 'JSON',
+}
+
+categories = ['files', 'apps']
+safesearch = True
+
+search_url = 'https://itunes.apple.com/search?{query}'
+
+
+def request(query, params):
+ explicit = "Yes"
+
+ if params['safesearch'] > 0:
+ explicit = "No"
+
+ params['url'] = search_url.format(query=urlencode({'term': query, 'media': 'software', 'explicit': explicit}))
+
+ return params
+
+
+def response(resp):
+ results = []
+
+ json_result = loads(resp.text)
+
+ for result in json_result['results']:
+ results.append(
+ {
+ 'url': result['trackViewUrl'],
+ 'title': result['trackName'],
+ 'content': result['description'],
+ 'img_src': result['artworkUrl100'],
+ 'publishedDate': parse(result['currentVersionReleaseDate']),
+ 'author': result['sellerName'],
+ }
+ )
+
+ return results
diff --git a/searx/engines/apple_maps.py b/searx/engines/apple_maps.py
new file mode 100644
index 000000000..eb4af422e
--- /dev/null
+++ b/searx/engines/apple_maps.py
@@ -0,0 +1,113 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# lint: pylint
+"""Apple Maps"""
+
+from json import loads
+from time import time
+from urllib.parse import urlencode
+
+from searx.network import get as http_get
+from searx.engines.openstreetmap import get_key_label
+
+about = {
+ "website": 'https://www.apple.com/maps/',
+ "wikidata_id": 'Q276101',
+ "official_api_documentation": None,
+ "use_official_api": True,
+ "require_api_key": False,
+ "results": 'JSON',
+}
+
+token = {'value': '', 'last_updated': None}
+
+categories = ['map']
+paging = False
+
+search_url = "https://api.apple-mapkit.com/v1/search?{query}&mkjsVersion=5.72.53"
+
+
+def obtain_token():
+ update_time = time() - (time() % 1800)
+ try:
+ # use duckduckgo's mapkit token
+ token_response = http_get('https://duckduckgo.com/local.js?get_mk_token=1', timeout=2.0)
+ actual_token = http_get(
+ 'https://cdn.apple-mapkit.com/ma/bootstrap?apiVersion=2&mkjsVersion=5.72.53&poi=1',
+ timeout=2.0,
+ headers={'Authorization': 'Bearer ' + token_response.text},
+ )
+ token['value'] = loads(actual_token.text)['authInfo']['access_token']
+ token['last_updated'] = update_time
+ # pylint: disable=bare-except
+ except:
+ pass
+ return token
+
+
+def request(query, params):
+ if time() - (token['last_updated'] or 0) > 1800:
+ obtain_token()
+
+ params['url'] = search_url.format(query=urlencode({'q': query, 'lang': params['language']}))
+
+ params['headers'] = {'Authorization': 'Bearer ' + token['value']}
+
+ return params
+
+
+def response(resp):
+ results = []
+
+ resp_json = loads(resp.text)
+
+ user_language = resp.search_params['language']
+
+ for result in resp_json['results']:
+ boundingbox = None
+ if 'displayMapRegion' in result:
+ box = result['displayMapRegion']
+ boundingbox = [box['southLat'], box['northLat'], box['westLng'], box['eastLng']]
+
+ links = []
+ if 'telephone' in result:
+ telephone = result['telephone']
+ links.append(
+ {
+ 'label': get_key_label('phone', user_language),
+ 'url': 'tel:' + telephone,
+ 'url_label': telephone,
+ }
+ )
+ if result.get('urls'):
+ url = result['urls'][0]
+ links.append(
+ {
+ 'label': get_key_label('website', user_language),
+ 'url': url,
+ 'url_label': url,
+ }
+ )
+
+ results.append(
+ {
+ 'template': 'map.html',
+ 'type': result.get('poiCategory'),
+ 'title': result['name'],
+ 'links': links,
+ 'latitude': result['center']['lat'],
+ 'longitude': result['center']['lng'],
+ 'url': result['placecardUrl'],
+ 'boundingbox': boundingbox,
+ 'geojson': {'type': 'Point', 'coordinates': [result['center']['lng'], result['center']['lat']]},
+ 'address': {
+ 'name': result['name'],
+ 'house_number': result.get('subThoroughfare'),
+ 'road': result.get('thoroughfare'),
+ 'locality': result.get('locality'),
+ 'postcode': result.get('postCode'),
+ 'country': result.get('country'),
+ },
+ }
+ )
+
+ return results
diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py
index 9be8eeaef..85071de21 100644
--- a/searx/engines/bing_videos.py
+++ b/searx/engines/bing_videos.py
@@ -80,7 +80,7 @@ def response(resp):
dom = html.fromstring(resp.text)
- for result in dom.xpath('//div[@class="dg_u"]'):
+ for result in dom.xpath('//div[@class="dg_u"]/div[contains(@class, "mc_vtvc")]'):
metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0])
info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip()
content = '{0} - {1}'.format(metadata['du'], info)
diff --git a/searx/engines/deepl.py b/searx/engines/deepl.py
new file mode 100644
index 000000000..85072710f
--- /dev/null
+++ b/searx/engines/deepl.py
@@ -0,0 +1,62 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# lint: pylint
+"""Deepl translation engine"""
+
+from json import loads
+
+about = {
+ "website": 'https://deepl.com',
+ "wikidata_id": 'Q43968444',
+ "official_api_documentation": 'https://www.deepl.com/docs-api',
+ "use_official_api": True,
+ "require_api_key": True,
+ "results": 'JSON',
+}
+
+engine_type = 'online_dictionary'
+categories = ['general']
+
+url = 'https://api-free.deepl.com/v2/translate'
+api_key = None
+
+
+def request(_query, params):
+ '''pre-request callback
+
+ params<dict>:
+
+ - ``method`` : POST/GET
+ - ``headers``: {}
+ - ``data``: {} # if method == POST
+ - ``url``: ''
+ - ``category``: 'search category'
+ - ``pageno``: 1 # number of the requested page
+ '''
+
+ params['url'] = url
+ params['method'] = 'POST'
+ params['data'] = {'auth_key': api_key, 'text': params['query'], 'target_lang': params['to_lang'][1]}
+
+ return params
+
+
+def response(resp):
+ results = []
+ result = loads(resp.text)
+ translations = result['translations']
+
+ infobox = "<dl>"
+
+ for translation in translations:
+ infobox += f"<dd>{translation['text']}</dd>"
+
+ infobox += "</dl>"
+
+ results.append(
+ {
+ 'infobox': 'Deepl',
+ 'content': infobox,
+ }
+ )
+
+ return results
diff --git a/searx/engines/duden.py b/searx/engines/duden.py
index da4c4f7da..dca566404 100644
--- a/searx/engines/duden.py
+++ b/searx/engines/duden.py
@@ -7,6 +7,7 @@ import re
from urllib.parse import quote, urljoin
from lxml import html
from searx.utils import extract_text, eval_xpath, eval_xpath_list, eval_xpath_getindex
+from searx.network import raise_for_httperror
# about
about = {
@@ -47,6 +48,7 @@ def request(query, params):
# after the last page of results, spelling corrections are returned after a HTTP redirect
# whatever the page number is
params['soft_max_redirects'] = 1
+ params['raise_for_httperror'] = False
return params
@@ -56,6 +58,11 @@ def response(resp):
'''
results = []
+ if resp.status_code == 404:
+ return results
+
+ raise_for_httperror(resp)
+
dom = html.fromstring(resp.text)
number_of_results_element = eval_xpath_getindex(
diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py
index 8f5a4b104..87867d65a 100644
--- a/searx/engines/google_news.py
+++ b/searx/engines/google_news.py
@@ -150,24 +150,12 @@ def response(resp):
# the first <h3> tag in the <article> contains the title of the link
title = extract_text(eval_xpath(result, './article/h3[1]'))
- # the first <div> tag in the <article> contains the content of the link
- content = extract_text(eval_xpath(result, './article/div[1]'))
-
- # the second <div> tag contains origin publisher and the publishing date
-
- pub_date = extract_text(eval_xpath(result, './article/div[2]//time'))
- pub_origin = extract_text(eval_xpath(result, './article/div[2]//a'))
-
- pub_info = []
- if pub_origin:
- pub_info.append(pub_origin)
- if pub_date:
- # The pub_date is mostly a string like 'yesertday', not a real
- # timezone date or time. Therefore we can't use publishedDate.
- pub_info.append(pub_date)
- pub_info = ', '.join(pub_info)
- if pub_info:
- content = pub_info + ': ' + content
+ # The pub_date is mostly a string like 'yesertday', not a real
+ # timezone date or time. Therefore we can't use publishedDate.
+ pub_date = extract_text(eval_xpath(result, './article/div[1]/div[1]/time'))
+ pub_origin = extract_text(eval_xpath(result, './article/div[1]/div[1]/a'))
+
+ content = ' / '.join([x for x in [pub_origin, pub_date] if x])
# The image URL is located in a preceding sibling <img> tag, e.g.:
# "https://lh3.googleusercontent.com/DjhQh7DMszk.....z=-p-h100-w100"
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index a1799491a..6de2176d0 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -9,16 +9,16 @@ https://www.qwant.com/ queries.
This implementation is used by different qwant engines in the settings.yml::
- name: qwant
- categories: general
+ qwant_categ: web
...
- name: qwant news
- categories: news
+ qwant_categ: news
...
- name: qwant images
- categories: images
+ qwant_categ: images
...
- name: qwant videos
- categories: videos
+ qwant_categ: videos
...
"""
@@ -30,11 +30,11 @@ from datetime import (
from json import loads
from urllib.parse import urlencode
from flask_babel import gettext
+import babel
-from searx.utils import match_language
from searx.exceptions import SearxEngineAPIException
from searx.network import raise_for_httperror
-
+from searx.locales import get_engine_locale
# about
about = {
@@ -50,13 +50,20 @@ about = {
categories = []
paging = True
supported_languages_url = about['website']
+qwant_categ = None # web|news|inages|videos
-category_to_keyword = {
- 'general': 'web',
- 'news': 'news',
- 'images': 'images',
- 'videos': 'videos',
-}
+safesearch = True
+safe_search_map = {0: '&safesearch=0', 1: '&safesearch=1', 2: '&safesearch=2'}
+
+# fmt: off
+qwant_news_locales = [
+ 'ca_ad', 'ca_es', 'ca_fr', 'co_fr', 'de_at', 'de_ch', 'de_de', 'en_au',
+ 'en_ca', 'en_gb', 'en_ie', 'en_my', 'en_nz', 'en_us', 'es_ad', 'es_ar',
+ 'es_cl', 'es_co', 'es_es', 'es_mx', 'es_pe', 'eu_es', 'eu_fr', 'fc_ca',
+ 'fr_ad', 'fr_be', 'fr_ca', 'fr_ch', 'fr_fr', 'it_ch', 'it_it', 'nl_be',
+ 'nl_nl', 'pt_ad', 'pt_pt',
+]
+# fmt: on
# search-url
url = 'https://api.qwant.com/v3/search/{keyword}?{query}&count={count}&offset={offset}'
@@ -64,10 +71,13 @@ url = 'https://api.qwant.com/v3/search/{keyword}?{query}&count={count}&offset={o
def request(query, params):
"""Qwant search request"""
- keyword = category_to_keyword[categories[0]]
+
+ if not query:
+ return None
+
count = 10 # web: count must be equal to 10
- if keyword == 'images':
+ if qwant_categ == 'images':
count = 50
offset = (params['pageno'] - 1) * count
# count + offset must be lower than 250
@@ -78,22 +88,18 @@ def request(query, params):
offset = min(offset, 40)
params['url'] = url.format(
- keyword=keyword,
+ keyword=qwant_categ,
query=urlencode({'q': query}),
offset=offset,
count=count,
)
- # add language tag
- if params['language'] == 'all':
- params['url'] += '&locale=en_US'
- else:
- language = match_language(
- params['language'],
- supported_languages,
- language_aliases,
- )
- params['url'] += '&locale=' + language.replace('-', '_')
+ # add quant's locale
+ q_locale = get_engine_locale(params['language'], supported_languages, default='en_US')
+ params['url'] += '&locale=' + q_locale
+
+ # add safesearch option
+ params['url'] += safe_search_map.get(params['safesearch'], '')
params['raise_for_httperror'] = False
return params
@@ -103,7 +109,6 @@ def response(resp):
"""Get response from Qwant's search request"""
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
- keyword = category_to_keyword[categories[0]]
results = []
# load JSON result
@@ -125,7 +130,7 @@ def response(resp):
# raise for other errors
raise_for_httperror(resp)
- if keyword == 'web':
+ if qwant_categ == 'web':
# The WEB query contains a list named 'mainline'. This list can contain
# different result types (e.g. mainline[0]['type'] returns type of the
# result items in mainline[0]['items']
@@ -136,7 +141,7 @@ def response(resp):
# result['items'].
mainline = data.get('result', {}).get('items', [])
mainline = [
- {'type': keyword, 'items': mainline},
+ {'type': qwant_categ, 'items': mainline},
]
# return empty array if there are no results
@@ -146,7 +151,7 @@ def response(resp):
for row in mainline:
mainline_type = row.get('type', 'web')
- if mainline_type != keyword:
+ if mainline_type != qwant_categ:
continue
if mainline_type == 'ads':
@@ -238,19 +243,43 @@ def response(resp):
return results
-# get supported languages from their site
def _fetch_supported_languages(resp):
- # list of regions is embedded in page as a js object
- response_text = resp.text
- response_text = response_text[response_text.find('INITIAL_PROPS') :]
- response_text = response_text[response_text.find('{') : response_text.find('</script>')]
-
- regions_json = loads(response_text)
-
- supported_languages = []
- for country, langs in regions_json['locales'].items():
- for lang in langs['langs']:
- lang_code = "{lang}-{country}".format(lang=lang, country=country)
- supported_languages.append(lang_code)
+
+ text = resp.text
+ text = text[text.find('INITIAL_PROPS') :]
+ text = text[text.find('{') : text.find('</script>')]
+
+ q_initial_props = loads(text)
+ q_locales = q_initial_props.get('locales')
+ q_valid_locales = []
+
+ for country, v in q_locales.items():
+ for lang in v['langs']:
+ _locale = "{lang}_{country}".format(lang=lang, country=country)
+
+ if qwant_categ == 'news' and _locale.lower() not in qwant_news_locales:
+ # qwant-news does not support all locales from qwant-web:
+ continue
+
+ q_valid_locales.append(_locale)
+
+ supported_languages = {}
+
+ for q_locale in q_valid_locales:
+ try:
+ locale = babel.Locale.parse(q_locale, sep='_')
+ except babel.core.UnknownLocaleError:
+ print("ERROR: can't determine babel locale of quant's locale %s" % q_locale)
+ continue
+
+ # note: supported_languages (dict)
+ #
+ # dict's key is a string build up from a babel.Locale object / the
+ # notation 'xx-XX' (and 'xx') conforms to SearXNG's locale (and
+ # language) notation and dict's values are the locale strings used by
+ # the engine.
+
+ searxng_locale = locale.language + '-' + locale.territory # --> params['language']
+ supported_languages[searxng_locale] = q_locale
return supported_languages
diff --git a/searx/engines/twitter.py b/searx/engines/twitter.py
new file mode 100644
index 000000000..3ebe34be1
--- /dev/null
+++ b/searx/engines/twitter.py
@@ -0,0 +1,75 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# lint: pylint
+"""Twitter (microblogging platform)"""
+
+from json import loads
+from urllib.parse import urlencode
+from datetime import datetime
+
+about = {
+ "website": 'https://twitter.com',
+ "wikidata_id": None,
+ "official_api_documentation": 'https://developer.twitter.com/en/docs/twitter-api',
+ "use_official_api": True,
+ "require_api_key": False,
+ "results": 'JSON',
+}
+
+categories = ['social media']
+
+url = "https://api.twitter.com"
+search_url = (
+ "{url}/2/search/adaptive.json?{query}&tweet_mode=extended&query_source=typed_query&pc=1&spelling_corrections=1"
+)
+
+
+def request(query, params):
+ params['url'] = search_url.format(url=url, query=urlencode({'q': query}))
+
+ params['headers'] = {
+ # This token is used in the Twitter web interface (twitter.com). Without this header, the API doesn't work.
+ # The value of the token has never changed (or maybe once a long time ago).
+ # https://github.com/zedeus/nitter/blob/5f31e86e0e8578377fa7d5aeb9631bbb2d35ef1e/src/consts.nim#L5
+ 'Authorization': (
+ "Bearer AAAAAAAAAAAAAAAAAAAAAPYXBAAAAAAACLXUNDekMxqa8h%2F40K4moUkGsoc%3DTYfbDKb"
+ "T3jJPCEVnMYqilB28NHfOPqkca3qaAxGfsyKCs0wRbw"
+ )
+ }
+
+ return params
+
+
+def response(resp):
+ results = []
+
+ json_res = loads(resp.text)['globalObjects']
+
+ for tweet in json_res['tweets'].values():
+ text = tweet['full_text']
+ display = tweet['display_text_range']
+
+ img_src = tweet.get('extended_entities', {}).get('media', [{}])[0].get('media_url_https')
+ if img_src:
+ img_src += "?name=thumb"
+
+ results.append(
+ {
+ 'url': 'https://twitter.com/i/web/status/' + tweet['id_str'],
+ 'title': (text[:40] + '...') if len(text) > 40 else text,
+ 'content': text[display[0] : display[1]],
+ 'img_src': img_src,
+ 'publishedDate': datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S %z %Y'),
+ }
+ )
+
+ for user in json_res['users'].values():
+ results.append(
+ {
+ 'title': user['name'],
+ 'content': user['description'],
+ 'url': 'https://twitter.com/' + user['screen_name'],
+ 'img_src': user['profile_image_url_https'],
+ }
+ )
+
+ return results
diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py
index 705a5211d..f9528e92d 100644
--- a/searx/engines/xpath.py
+++ b/searx/engines/xpath.py
@@ -22,6 +22,7 @@ from urllib.parse import urlencode
from lxml import html
from searx.utils import extract_text, extract_url, eval_xpath, eval_xpath_list
+from searx.network import raise_for_httperror
search_url = None
"""
@@ -61,6 +62,14 @@ lang_all = 'en'
selected.
'''
+no_result_for_http_status = []
+'''Return empty result for these HTTP status codes instead of throwing an error.
+
+.. code:: yaml
+
+ no_result_for_http_status: []
+'''
+
soft_max_redirects = 0
'''Maximum redirects, soft limit. Record an error but don't stop the engine'''
@@ -177,11 +186,18 @@ def request(query, params):
params['url'] = search_url.format(**fargs)
params['soft_max_redirects'] = soft_max_redirects
+ params['raise_for_httperror'] = False
+
return params
-def response(resp):
+def response(resp): # pylint: disable=too-many-branches
'''Scrap *results* from the response (see :ref:`engine results`).'''
+ if no_result_for_http_status and resp.status_code in no_result_for_http_status:
+ return []
+
+ raise_for_httperror(resp)
+
results = []
dom = html.fromstring(resp.text)
is_onion = 'onions' in categories