diff options
| author | Zhijie He <hezhijie0327@hotmail.com> | 2025-02-23 13:47:03 +0800 |
|---|---|---|
| committer | Bnyro <bnyro@tutanota.com> | 2025-03-02 13:31:31 +0100 |
| commit | 97aa5a779b3910efb2cc8f7497969fbe0d126910 (patch) | |
| tree | 2ece13de46369a42d7441ec531ebabaae97ac919 /searx/engines | |
| parent | 71d1504e572074327f851688a4caac90a5e41fe8 (diff) | |
[feat] add Sogou engine for searxng
Co-authored-by: Bnyro <bnyro@tutanota.com>
Diffstat (limited to 'searx/engines')
| -rw-r--r-- | searx/engines/sogou.py | 68 | ||||
| -rw-r--r-- | searx/engines/sogou_images.py | 49 | ||||
| -rw-r--r-- | searx/engines/sogou_videos.py | 72 |
3 files changed, 189 insertions, 0 deletions
diff --git a/searx/engines/sogou.py b/searx/engines/sogou.py new file mode 100644 index 000000000..e36b4cd4f --- /dev/null +++ b/searx/engines/sogou.py @@ -0,0 +1,68 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""Sogou search engine for searxng""" + +from urllib.parse import urlencode +from lxml import html + +from searx.utils import extract_text + +# Metadata +about = { + "website": "https://www.sogou.com/", + "wikidata_id": "Q7554565", + "use_official_api": False, + "require_api_key": False, + "results": "HTML", +} + +# Engine Configuration +categories = ["general"] +paging = True +max_page = 10 +time_range_support = True + +time_range_dict = {'day': 'inttime_day', 'week': 'inttime_week', 'month': 'inttime_month', 'year': 'inttime_year'} + +# Base URL +base_url = "https://www.sogou.com" + + +def request(query, params): + query_params = { + "query": query, + "page": params["pageno"], + } + + if time_range_dict.get(params['time_range']): + query_params["s_from"] = time_range_dict.get(params['time_range']) + query_params["tsn"] = 1 + + params["url"] = f"{base_url}/web?{urlencode(query_params)}" + return params + + +def response(resp): + dom = html.fromstring(resp.text) + results = [] + + for item in dom.xpath('//div[contains(@class, "vrwrap")]'): + title = extract_text(item.xpath('.//h3[contains(@class, "vr-title")]/a')) + url = extract_text(item.xpath('.//h3[contains(@class, "vr-title")]/a/@href')) + + if url.startswith("/link?url="): + url = f"{base_url}{url}" + + content = extract_text(item.xpath('.//div[contains(@class, "text-layout")]//p[contains(@class, "star-wiki")]')) + if not content: + content = extract_text(item.xpath('.//div[contains(@class, "fz-mid space-txt")]')) + + if title and url: + results.append( + { + "title": title, + "url": url, + "content": content, + } + ) + + return results diff --git a/searx/engines/sogou_images.py b/searx/engines/sogou_images.py new file mode 100644 index 000000000..69992e3ba --- /dev/null +++ b/searx/engines/sogou_images.py @@ -0,0 +1,49 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""Sogou-Images: A search engine for retrieving images from Sogou.""" + +import json +import re +from urllib.parse import quote_plus + +# about +about = { + "website": "https://pic.sogou.com/", + "wikidata_id": "Q7554565", + "use_official_api": False, + "require_api_key": False, + "results": "HTML", +} + +# engine dependent config +categories = ["images"] + +base_url = "https://pic.sogou.com" + + +def request(query, params): + params["url"] = f"{base_url}/pics?query={quote_plus(query)}" + return params + + +def response(resp): + results = [] + match = re.search(r'window\.__INITIAL_STATE__\s*=\s*({.*?});', resp.text, re.S) + if not match: + return results + + data = json.loads(match.group(1)) + if "searchList" in data and "searchList" in data["searchList"]: + for item in data["searchList"]["searchList"]: + results.append( + { + "template": "images.html", + "url": item.get("url", ""), + "thumbnail_src": item.get("picUrl", ""), + "img_src": item.get("picUrl", ""), + "content": item.get("content_major", ""), + "title": item.get("title", ""), + "source": item.get("ch_site_name", ""), + } + ) + + return results diff --git a/searx/engines/sogou_videos.py b/searx/engines/sogou_videos.py new file mode 100644 index 000000000..1149996c9 --- /dev/null +++ b/searx/engines/sogou_videos.py @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""Sogou-Videos: A search engine for retrieving videos from Sogou.""" + +from urllib.parse import urlencode +from datetime import datetime + +from searx.exceptions import SearxEngineAPIException + +about = { + "website": "https://v.sogou.com/", + "use_official_api": False, + "require_api_key": False, + "results": "JSON", +} + +categories = ["videos"] +paging = True +results_per_page = 10 + +# Base URL +base_url = "https://v.sogou.com" + + +def request(query, params): + query_params = { + "page": params["pageno"], + "pagesize": 10, + "query": query, + } + + params["url"] = f"{base_url}/api/video/shortVideoV2?{urlencode(query_params)}" + return params + + +def response(resp): + try: + data = resp.json() + except Exception as e: + raise SearxEngineAPIException(f"Invalid response: {e}") from e + results = [] + + if not data.get("data", {}).get("list"): + raise SearxEngineAPIException("Invalid response") + + for entry in data["data"]["list"]: + if not entry.get("titleEsc") or not entry.get("url"): + continue + + video_url = entry.get("url") + if video_url.startswith("/vc/np"): + video_url = f"{base_url}{video_url}" + + published_date = None + if entry.get("date") and entry.get("duration"): + try: + date_time_str = f"{entry['date']} {entry['duration']}" + published_date = datetime.strptime(date_time_str, "%Y-%m-%d %H:%M") + except (ValueError, TypeError): + published_date = None + + results.append( + { + 'url': video_url, + 'title': entry["titleEsc"], + 'content': f"{entry['site']} | {entry['duration']}", + 'template': 'videos.html', + 'publishedDate': published_date, + 'thumbnail': entry["picurl"], + } + ) + + return results |