diff options
| author | Zhijie He <hezhijie0327@hotmail.com> | 2025-02-23 13:35:23 +0800 |
|---|---|---|
| committer | Bnyro <bnyro@tutanota.com> | 2025-03-02 13:25:35 +0100 |
| commit | 71d1504e572074327f851688a4caac90a5e41fe8 (patch) | |
| tree | b5a7510936b1452c4c3d2b9b570d999ed58459e8 /searx/engines | |
| parent | 80f5fad16e830a9fea9e73c0eaa5b36c27566d9c (diff) | |
[feat] add 360search engine for searxng
Co-authored-by: Bnyro <bnyro@tutanota.com>
Diffstat (limited to 'searx/engines')
| -rw-r--r-- | searx/engines/360search.py | 67 | ||||
| -rw-r--r-- | searx/engines/360search_videos.py | 64 |
2 files changed, 131 insertions, 0 deletions
diff --git a/searx/engines/360search.py b/searx/engines/360search.py new file mode 100644 index 000000000..9d575f1b3 --- /dev/null +++ b/searx/engines/360search.py @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +# pylint: disable=invalid-name +"""360Search search engine for searxng""" + +from urllib.parse import urlencode +from lxml import html + +from searx.utils import extract_text + +# Metadata +about = { + "website": "https://www.so.com/", + "wikidata_id": "Q10846064", + "use_official_api": False, + "require_api_key": False, + "results": "HTML", +} + +# Engine Configuration +categories = ["general"] +paging = True +time_range_support = True + +time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'} + +# Base URL +base_url = "https://www.so.com" + + +def request(query, params): + query_params = { + "pn": params["pageno"], + "q": query, + } + + if time_range_dict.get(params['time_range']): + query_params["adv_t"] = time_range_dict.get(params['time_range']) + + params["url"] = f"{base_url}/s?{urlencode(query_params)}" + return params + + +def response(resp): + dom = html.fromstring(resp.text) + results = [] + + for item in dom.xpath('//li[contains(@class, "res-list")]'): + title = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a')) + + url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@data-mdurl')) + if not url: + url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@href')) + + content = extract_text(item.xpath('.//p[@class="res-desc"]')) + if not content: + content = extract_text(item.xpath('.//span[@class="res-list-summary"]')) + + if title and url: + results.append( + { + "title": title, + "url": url, + "content": content, + } + ) + + return results diff --git a/searx/engines/360search_videos.py b/searx/engines/360search_videos.py new file mode 100644 index 000000000..a4a59223d --- /dev/null +++ b/searx/engines/360search_videos.py @@ -0,0 +1,64 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +# pylint: disable=invalid-name +"""360Search-Videos: A search engine for retrieving videos from 360Search.""" + +from urllib.parse import urlencode +from datetime import datetime + +from searx.exceptions import SearxEngineAPIException +from searx.utils import html_to_text + +about = { + "website": "https://tv.360kan.com/", + "use_official_api": False, + "require_api_key": False, + "results": "JSON", +} + +paging = True +results_per_page = 10 +categories = ["videos"] + +base_url = "https://tv.360kan.com" + + +def request(query, params): + query_params = {"count": 10, "q": query, "start": params["pageno"] * 10} + + params["url"] = f"{base_url}/v1/video/list?{urlencode(query_params)}" + return params + + +def response(resp): + try: + data = resp.json() + except Exception as e: + raise SearxEngineAPIException(f"Invalid response: {e}") from e + results = [] + + if "data" not in data or "result" not in data["data"]: + raise SearxEngineAPIException("Invalid response") + + for entry in data["data"]["result"]: + if not entry.get("title") or not entry.get("play_url"): + continue + + published_date = None + if entry.get("publish_time"): + try: + published_date = datetime.fromtimestamp(int(entry["publish_time"])) + except (ValueError, TypeError): + published_date = None + + results.append( + { + 'url': entry["play_url"], + 'title': html_to_text(entry["title"]), + 'content': html_to_text(entry["description"]), + 'template': 'videos.html', + 'publishedDate': published_date, + 'thumbnail': entry["cover_img"], + } + ) + + return results |