summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorAllen <64094914+allendema@users.noreply.github.com>2024-05-23 23:21:58 +0000
committerMarkus Heiser <markus.heiser@darmarIT.de>2024-05-29 17:56:17 +0200
commit0fa81fc782feb104bbd6616d87a6f441aad7d5bd (patch)
tree4fd2f697240307d616f8b49614787852e5923fc4 /searx/engines
parent0fb3f0e4aeecf62612cb6568910cf0f97c98cab9 (diff)
[enh] add re-usable func to filter text
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/brave.py7
-rw-r--r--searx/engines/duckduckgo.py17
-rw-r--r--searx/engines/qwant.py7
-rw-r--r--searx/engines/vimeo.py9
-rw-r--r--searx/engines/youtube_noapi.py6
5 files changed, 21 insertions, 25 deletions
diff --git a/searx/engines/brave.py b/searx/engines/brave.py
index 04c2931f9..c5780a02c 100644
--- a/searx/engines/brave.py
+++ b/searx/engines/brave.py
@@ -132,6 +132,7 @@ from lxml import html
from searx import locales
from searx.utils import (
extract_text,
+ extr,
eval_xpath,
eval_xpath_list,
eval_xpath_getindex,
@@ -252,11 +253,7 @@ def response(resp):
if brave_category in ('search', 'goggles'):
return _parse_search(resp)
- datastr = ""
- for line in resp.text.split("\n"):
- if "const data = " in line:
- datastr = line.replace("const data = ", "").strip()[:-1]
- break
+ datastr = extr(resp.text, "const data = ", ";\n").strip()
json_data = js_variable_to_python(datastr)
json_resp = json_data[1]['data']['body']['response']
diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py
index b874ca2f8..fced014c1 100644
--- a/searx/engines/duckduckgo.py
+++ b/searx/engines/duckduckgo.py
@@ -392,7 +392,9 @@ def fetch_traits(engine_traits: EngineTraits):
SearXNG's locale.
"""
- # pylint: disable=too-many-branches, too-many-statements
+ # pylint: disable=too-many-branches, too-many-statements, disable=import-outside-toplevel
+ from searx.utils import extr, js_variable_to_python
+
# fetch regions
engine_traits.all_locale = 'wt-wt'
@@ -403,11 +405,9 @@ def fetch_traits(engine_traits: EngineTraits):
if not resp.ok: # type: ignore
print("ERROR: response from DuckDuckGo is not OK.")
- pos = resp.text.find('regions:{') + 8 # type: ignore
- js_code = resp.text[pos:] # type: ignore
- pos = js_code.find('}') + 1
- regions = json.loads(js_code[:pos])
+ js_code = extr(resp.text, 'regions:', ',snippetLengths')
+ regions = json.loads(js_code)
for eng_tag, name in regions.items():
if eng_tag == 'wt-wt':
@@ -439,12 +439,9 @@ def fetch_traits(engine_traits: EngineTraits):
engine_traits.custom['lang_region'] = {}
- pos = resp.text.find('languages:{') + 10 # type: ignore
- js_code = resp.text[pos:] # type: ignore
- pos = js_code.find('}') + 1
- js_code = '{"' + js_code[1:pos].replace(':', '":').replace(',', ',"')
- languages = json.loads(js_code)
+ js_code = extr(resp.text, 'languages:', ',regions')
+ languages = js_variable_to_python(js_code)
for eng_lang, name in languages.items():
if eng_lang == 'wt_WT':
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index facd47bb9..989fe1445 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -312,13 +312,12 @@ def fetch_traits(engine_traits: EngineTraits):
# pylint: disable=import-outside-toplevel
from searx import network
from searx.locales import region_tag
+ from searx.utils import extr
resp = network.get(about['website'])
- text = resp.text
- text = text[text.find('INITIAL_PROPS') :]
- text = text[text.find('{') : text.find('</script>')]
+ json_string = extr(resp.text, 'INITIAL_PROPS = ', '</script>')
- q_initial_props = loads(text)
+ q_initial_props = loads(json_string)
q_locales = q_initial_props.get('locales')
eng_tag_list = set()
diff --git a/searx/engines/vimeo.py b/searx/engines/vimeo.py
index 2449345e6..d46468d8d 100644
--- a/searx/engines/vimeo.py
+++ b/searx/engines/vimeo.py
@@ -7,6 +7,8 @@ from urllib.parse import urlencode
from json import loads
from dateutil import parser
+from searx.utils import extr
+
# about
about = {
"website": 'https://vimeo.com/',
@@ -23,7 +25,7 @@ paging = True
# search-url
base_url = 'https://vimeo.com/'
-search_url = base_url + '/search/page:{pageno}?{query}'
+search_url = base_url + 'search/page:{pageno}?{query}'
# do search-request
@@ -36,9 +38,8 @@ def request(query, params):
# get response from search-request
def response(resp):
results = []
- data_start_pos = resp.text.find('{"filtered"')
- data_end_pos = resp.text.find(';\n', data_start_pos + 1)
- data = loads(resp.text[data_start_pos:data_end_pos])
+
+ data = loads(extr(resp.text, 'var data = ', ';\n'))
# parse results
for result in data['filtered']['data']:
diff --git a/searx/engines/youtube_noapi.py b/searx/engines/youtube_noapi.py
index b015dff8d..c2136c3ca 100644
--- a/searx/engines/youtube_noapi.py
+++ b/searx/engines/youtube_noapi.py
@@ -7,6 +7,8 @@ from functools import reduce
from json import loads, dumps
from urllib.parse import quote_plus
+from searx.utils import extr
+
# about
about = {
"website": 'https://www.youtube.com/',
@@ -109,8 +111,8 @@ def parse_next_page_response(response_text):
def parse_first_page_response(response_text):
results = []
- results_data = response_text[response_text.find('ytInitialData') :]
- results_data = results_data[results_data.find('{') : results_data.find(';</script>')]
+ results_data = extr(response_text, 'ytInitialData = ', ';</script>')
+
results_json = loads(results_data) if results_data else {}
sections = (
results_json.get('contents', {})