diff options
| author | Markus Heiser <markus.heiser@darmarit.de> | 2025-10-13 09:28:42 +0200 |
|---|---|---|
| committer | Markus Heiser <markus.heiser@darmarIT.de> | 2025-10-20 10:18:33 +0200 |
| commit | 937165853185ca16b0da6f72bc42bd1487ea0dcb (patch) | |
| tree | 70da1dce7cb51d0f59cd36392103e8f33318f379 /searx/engines | |
| parent | ee6d4f322f4bda18759ffb99380a06923424695b (diff) | |
[mod] typification of SearXNG: add new result type File
This PR adds a new result type: File
Python class: searx/result_types/file.py
Jinja template: searx/templates/simple/result_templates/file.html
CSS (less) client/simple/src/less/result_types/file.less
Class 'File' (singular) replaces template 'files.html' (plural). The renaming
was carried out because there is only one file (singular) in a result. Not to be
confused with the category 'files' where in multiple results can exist.
As mentioned in issue [1], the class '.category-files' was removed from the CSS
and the stylesheet was adopted in result_types/file.less (there based on the
templates and no longer based on the category).
[1] https://github.com/searxng/searxng/issues/5198
Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
Diffstat (limited to 'searx/engines')
| -rw-r--r-- | searx/engines/recoll.py | 172 | ||||
| -rw-r--r-- | searx/engines/wikicommons.py | 258 |
2 files changed, 273 insertions, 157 deletions
diff --git a/searx/engines/recoll.py b/searx/engines/recoll.py index b7499b5a5..ee97f330d 100644 --- a/searx/engines/recoll.py +++ b/searx/engines/recoll.py @@ -13,23 +13,12 @@ Configuration You must configure the following settings: -``base_url``: - Location where recoll-webui can be reached. +- :py:obj:`base_url` +- :py:obj:`mount_prefix` +- :py:obj:`dl_prefix` +- :py:obj:`search_dir` -``mount_prefix``: - Location where the file hierarchy is mounted on your *local* filesystem. - -``dl_prefix``: - Location where the file hierarchy as indexed by recoll can be reached. - -``search_dir``: - Part of the indexed file hierarchy to be search, if empty the full domain is - searched. - -Example -======= - -Scenario: +Example scenario: #. Recoll indexes a local filesystem mounted in ``/export/documents/reference``, #. the Recoll search interface can be reached at https://recoll.example.org/ and @@ -37,107 +26,128 @@ Scenario: .. code:: yaml - base_url: https://recoll.example.org/ + base_url: https://recoll.example.org mount_prefix: /export/documents dl_prefix: https://download.example.org - search_dir: '' + search_dir: "" Implementations =============== """ +import typing as t from datetime import date, timedelta -from json import loads from urllib.parse import urlencode, quote -# about +from searx.result_types import EngineResults + +if t.TYPE_CHECKING: + from searx.extended_types import SXNG_Response + from searx.search.processors import OnlineParams + + about = { "website": None, - "wikidata_id": 'Q15735774', - "official_api_documentation": 'https://www.lesbonscomptes.com/recoll/', + "wikidata_id": "Q15735774", + "official_api_documentation": "https://www.lesbonscomptes.com/recoll/", "use_official_api": True, "require_api_key": False, - "results": 'JSON', + "results": "JSON", } -# engine dependent config paging = True time_range_support = True -# parameters from settings.yml -base_url = None -search_dir = '' -mount_prefix = None -dl_prefix = None - -# embedded -embedded_url = '<{ttype} controls height="166px" ' + 'src="{url}" type="{mtype}"></{ttype}>' +base_url: str = "" +"""Location where recoll-webui can be reached.""" +mount_prefix: str = "" +"""Location where the file hierarchy is mounted on your *local* filesystem.""" -# helper functions -def get_time_range(time_range): - sw = {'day': 1, 'week': 7, 'month': 30, 'year': 365} # pylint: disable=invalid-name - - offset = sw.get(time_range, 0) - if not offset: - return '' - - return (date.today() - timedelta(days=offset)).isoformat() +dl_prefix: str = "" +"""Location where the file hierarchy as indexed by recoll can be reached.""" +search_dir: str = "" +"""Part of the indexed file hierarchy to be search, if empty the full domain is +searched.""" -# do search-request -def request(query, params): - search_after = get_time_range(params['time_range']) - search_url = base_url + 'json?{query}&highlight=0' - params['url'] = search_url.format( - query=urlencode({'query': query, 'page': params['pageno'], 'after': search_after, 'dir': search_dir}) - ) +_s2i: dict[str | None, int] = {"day": 1, "week": 7, "month": 30, "year": 365} - return params +def setup(engine_settings: dict[str, t.Any]) -> bool: + """Initialization of the Recoll engine, checks if the mandatory values are + configured. + """ + missing: list[str] = [] + for cfg_name in ["base_url", "mount_prefix", "dl_prefix"]: + if not engine_settings.get(cfg_name): + missing.append(cfg_name) + if missing: + logger.error("missing recoll configuration: %s", missing) + return False -# get response from search-request -def response(resp): - results = [] + if engine_settings["base_url"].endswith("/"): + engine_settings["base_url"] = engine_settings["base_url"][:-1] + return True - response_json = loads(resp.text) - if not response_json: - return [] +def search_after(time_range: str | None) -> str: + offset = _s2i.get(time_range, 0) + if not offset: + return "" + return (date.today() - timedelta(days=offset)).isoformat() - for result in response_json.get('results', []): - title = result['label'] - url = result['url'].replace('file://' + mount_prefix, dl_prefix) - content = '{}'.format(result['snippet']) - # append result - item = {'url': url, 'title': title, 'content': content, 'template': 'files.html'} +def request(query: str, params: "OnlineParams") -> None: + args = { + "query": query, + "page": params["pageno"], + "after": search_after(params["time_range"]), + "dir": search_dir, + "highlight": 0, + } + params["url"] = f"{base_url}/json?{urlencode(args)}" - if result['size']: - item['size'] = int(result['size']) - for parameter in ['filename', 'abstract', 'author', 'mtype', 'time']: - if result[parameter]: - item[parameter] = result[parameter] +def response(resp: "SXNG_Response") -> EngineResults: - # facilitate preview support for known mime types - if 'mtype' in result and '/' in result['mtype']: - (mtype, subtype) = result['mtype'].split('/') - item['mtype'] = mtype - item['subtype'] = subtype + res = EngineResults() + json_data = resp.json() - if mtype in ['audio', 'video']: - item['embedded'] = embedded_url.format( - ttype=mtype, url=quote(url.encode('utf8'), '/:'), mtype=result['mtype'] - ) + if not json_data: + return res - if mtype in ['image'] and subtype in ['bmp', 'gif', 'jpeg', 'png']: - item['thumbnail'] = url + for result in json_data.get("results", []): - results.append(item) + url = result.get("url", "").replace("file://" + mount_prefix, dl_prefix) - if 'nres' in response_json: - results.append({'number_of_results': response_json['nres']}) + mtype = subtype = result.get("mime", "") + if mtype: + mtype, subtype = (mtype.split("/", 1) + [""])[:2] - return results + # facilitate preview support for known mime types + thumbnail = embedded = "" + if mtype in ["audio", "video"]: + embedded_url = '<{ttype} controls height="166px" ' + 'src="{url}" type="{mtype}"></{ttype}>' + embedded = embedded_url.format(ttype=mtype, url=quote(url.encode("utf8"), "/:"), mtype=result["mtype"]) + if mtype in ["image"] and subtype in ["bmp", "gif", "jpeg", "png"]: + thumbnail = url + + res.add( + res.types.File( + title=result.get("label", ""), + url=url, + content=result.get("snippet", ""), + size=result.get("size", ""), + filename=result.get("filename", ""), + abstract=result.get("abstract", ""), + author=result.get("author", ""), + mtype=mtype, + subtype=subtype, + time=result.get("time", ""), + embedded=embedded, + thumbnail=thumbnail, + ) + ) + return res diff --git a/searx/engines/wikicommons.py b/searx/engines/wikicommons.py index 718cd45d5..1cb2cb577 100644 --- a/searx/engines/wikicommons.py +++ b/searx/engines/wikicommons.py @@ -1,102 +1,208 @@ # SPDX-License-Identifier: AGPL-3.0-or-later -"""Wikimedia Commons (images)""" +"""`Wikimedia Commons`_ is a collection of more than 120 millions freely usable +media files to which anyone can contribute. -import datetime +This engine uses the `MediaWiki query API`_, with which engines can be configured +for searching images, videos, audio, and other files in the Wikimedia. + +.. _MediaWiki query API: https://commons.wikimedia.org/w/api.php?action=help&modules=query +.. _Wikimedia Commons: https://commons.wikimedia.org/ + + +Configuration +============= + +The engine has the following additional settings: + +.. code:: yaml + + - name: wikicommons.images + engine: wikicommons + wc_search_type: image + + - name: wikicommons.videos + engine: wikicommons + wc_search_type: video + + - name: wikicommons.audio + engine: wikicommons + wc_search_type: audio + + - name: wikicommons.files + engine: wikicommons + wc_search_type: file -from urllib.parse import urlencode + +Implementations +=============== + +""" + +import typing as t + +import datetime +import pathlib +from urllib.parse import urlencode, unquote from searx.utils import html_to_text, humanize_bytes +from searx.result_types import EngineResults + +if t.TYPE_CHECKING: + from searx.extended_types import SXNG_Response + from searx.search.processors import OnlineParams -# about about = { - "website": 'https://commons.wikimedia.org/', - "wikidata_id": 'Q565', - "official_api_documentation": 'https://commons.wikimedia.org/w/api.php', + "website": "https://commons.wikimedia.org/", + "wikidata_id": "Q565", + "official_api_documentation": "https://commons.wikimedia.org/w/api.php", "use_official_api": True, "require_api_key": False, - "results": 'JSON', + "results": "JSON", } -categories = ['images'] -search_type = 'images' - -base_url = "https://commons.wikimedia.org" -search_prefix = ( - '?action=query' - '&format=json' - '&generator=search' - '&gsrnamespace=6' - '&gsrprop=snippet' - '&prop=info|imageinfo' - '&iiprop=url|size|mime' - '&iiurlheight=180' # needed for the thumb url -) + +categories: list[str] = [] paging = True number_of_results = 10 -search_types = { - 'images': 'bitmap|drawing', - 'videos': 'video', - 'audio': 'audio', - 'files': 'multimedia|office|archive|3d', +wc_api_url = "https://commons.wikimedia.org/w/api.php" +wc_search_type: str = "" + +SEARCH_TYPES: dict[str, str] = { + "image": "bitmap|drawing", + "video": "video", + "audio": "audio", + "file": "multimedia|office|archive|3d", } +# FileType = t.Literal["bitmap", "drawing", "video", "audio", "multimedia", "office", "archive", "3d"] +# FILE_TYPES = list(t.get_args(FileType)) -def request(query, params): - language = 'en' - if params['language'] != 'all': - language = params['language'].split('-')[0] +def setup(engine_settings: dict[str, t.Any]) -> bool: + """Initialization of the Wikimedia engine, checks if the value configured in + :py:obj:`wc_search_type` is valid.""" - if search_type not in search_types: - raise ValueError(f"Unsupported search type: {search_type}") + if engine_settings.get("wc_search_type") not in SEARCH_TYPES: + logger.error( + "wc_search_type: %s isn't a valid file type (%s)", + engine_settings.get("wc_search_type"), + ",".join(SEARCH_TYPES.keys()), + ) + return False + return True - filetype = search_types[search_type] +def request(query: str, params: "OnlineParams") -> None: + uselang: str = "en" + if params["searxng_locale"] != "all": + uselang = params["searxng_locale"].split("-")[0] + filetype = SEARCH_TYPES[wc_search_type] args = { - 'uselang': language, - 'gsrlimit': number_of_results, - 'gsroffset': number_of_results * (params["pageno"] - 1), - 'gsrsearch': f"filetype:{filetype} {query}", + # https://commons.wikimedia.org/w/api.php + "format": "json", + "uselang": uselang, + "action": "query", + # https://commons.wikimedia.org/w/api.php?action=help&modules=query + "prop": "info|imageinfo", + # generator (gsr optins) https://commons.wikimedia.org/w/api.php?action=help&modules=query%2Bsearch + "generator": "search", + "gsrnamespace": "6", # https://www.mediawiki.org/wiki/Help:Namespaces#Renaming_namespaces + "gsrprop": "snippet", + "gsrlimit": number_of_results, + "gsroffset": number_of_results * (params["pageno"] - 1), + "gsrsearch": f"filetype:{filetype} {query}", + # imageinfo: https://commons.wikimedia.org/w/api.php?action=help&modules=query%2Bimageinfo + "iiprop": "url|size|mime", + "iiurlheight": "180", # needed for the thumb url } + params["url"] = f"{wc_api_url}?{urlencode(args, safe=':|')}" - params["url"] = f"{base_url}/w/api.php{search_prefix}&{urlencode(args, safe=':|')}" - return params +def response(resp: "SXNG_Response") -> EngineResults: -def response(resp): - results = [] - json = resp.json() + res = EngineResults() + json_data = resp.json() + pages = json_data.get("query", {}).get("pages", {}).values() - if not json.get("query", {}).get("pages"): - return results - for item in json["query"]["pages"].values(): + for item in pages: + + if not item.get("imageinfo", []): + continue imageinfo = item["imageinfo"][0] - title = item["title"].replace("File:", "").rsplit('.', 1)[0] - result = { - 'url': imageinfo["descriptionurl"], - 'title': title, - 'content': html_to_text(item["snippet"]), - } - - if search_type == "images": - result['template'] = 'images.html' - result['img_src'] = imageinfo["url"] - result['thumbnail_src'] = imageinfo["thumburl"] - result['resolution'] = f'{imageinfo["width"]} x {imageinfo["height"]}' - else: - result['thumbnail'] = imageinfo["thumburl"] - - if search_type == "videos": - result['template'] = 'videos.html' - if imageinfo.get('duration'): - result['length'] = datetime.timedelta(seconds=int(imageinfo['duration'])) - result['iframe_src'] = imageinfo['url'] - elif search_type == "files": - result['template'] = 'files.html' - result['metadata'] = imageinfo['mime'] - result['size'] = humanize_bytes(imageinfo['size']) - elif search_type == "audio": - result['iframe_src'] = imageinfo['url'] - - results.append(result) - - return results + + title: str = item["title"].replace("File:", "").rsplit(".", 1)[0] + content = html_to_text(item["snippet"]) + + url: str = imageinfo["descriptionurl"] + media_url: str = imageinfo["url"] + mimetype: str = imageinfo["mime"] + thumbnail: str = imageinfo["thumburl"] + size = imageinfo.get("size") + if size: + size = humanize_bytes(size) + + duration = None + seconds: str = imageinfo.get("duration") + if seconds: + try: + duration = datetime.timedelta(seconds=int(seconds)) + except OverflowError: + pass + + if wc_search_type == "file": + res.add( + res.types.File( + title=title, + url=url, + content=content, + size=size, + mimetype=mimetype, + filename=unquote(pathlib.Path(media_url).name), + embedded=media_url, + thumbnail=thumbnail, + ) + ) + continue + + if wc_search_type == "image": + res.add( + res.types.LegacyResult( + template="images.html", + title=title, + url=url, + content=content, + img_src=imageinfo["url"], + thumbnail_src=thumbnail, + resolution=f"{imageinfo['width']} x {imageinfo['height']}", + img_format=imageinfo["mime"], + filesize=size, + ) + ) + continue + + if wc_search_type == "video": + res.add( + res.types.LegacyResult( + template="videos.html", + title=title, + url=url, + content=content, + iframe_src=media_url, + length=duration, + ) + ) + continue + + if wc_search_type == "audio": + res.add( + res.types.MainResult( + template="default.html", + title=title, + url=url, + content=content, + audio_src=media_url, + length=duration, + ) + ) + continue + + return res |