summaryrefslogtreecommitdiff
path: root/searx
diff options
context:
space:
mode:
authorMarkus Heiser <markus.heiser@darmarit.de>2025-09-10 16:37:23 +0200
committerMarkus Heiser <markus.heiser@darmarIT.de>2025-09-20 10:56:46 +0200
commit078c9fcb68fe0e1b75e5aa64040d892faa83c063 (patch)
tree91fe02a7005511316c8583247e9b09d920527053 /searx
parent3ec6d65f9b74c36cd9a7b692b0075557550e870f (diff)
[mod] Crossref engine: revision of the engine (Paper result)
Revision of the engine / use of the result type Paper as well as other typifications. Signed-off-by: Markus Heiser <markus.heiser@darmarit.de>
Diffstat (limited to 'searx')
-rw-r--r--searx/engines/crossref.py102
1 files changed, 68 insertions, 34 deletions
diff --git a/searx/engines/crossref.py b/searx/engines/crossref.py
index d8dfc568d..14075a581 100644
--- a/searx/engines/crossref.py
+++ b/searx/engines/crossref.py
@@ -1,14 +1,27 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
-"""CrossRef"""
+"""Crossref_ is the sustainable source of community-owned scholarly metadata and
+is relied upon by thousands of systems across the research ecosystem and the
+globe.
+
+.. _Crossref: https://www.crossref.org/documentation/retrieve-metadata/
+
+"""
+
+import typing as t
from urllib.parse import urlencode
from datetime import datetime
+from searx.result_types import EngineResults
+
+if t.TYPE_CHECKING:
+ from searx.extended_types import SXNG_Response
+ from searx.search.processors import OnlineParams
about = {
"website": "https://www.crossref.org/",
"wikidata_id": "Q5188229",
- "official_api_documentation": "https://api.crossref.org",
- "use_official_api": False,
+ "official_api_documentation": "https://api.crossref.org/swagger-ui/",
+ "use_official_api": True,
"require_api_key": False,
"results": "JSON",
}
@@ -16,48 +29,69 @@ about = {
categories = ["science", "scientific publications"]
paging = True
search_url = "https://api.crossref.org/works"
+"""Returns a list of all works (journal articles, conference proceedings, books,
+components, etc), 20 per page (`Works/get_works`_).
+
+.. _Works/get_works: https://api.crossref.org/swagger-ui/index.html#/Works/get_works
+"""
+
+
+def request(query: str, params: "OnlineParams") -> None:
+ args = {
+ "query": query,
+ "offset": 20 * (params["pageno"] - 1),
+ }
+ params["url"] = f"{search_url}?{urlencode(args)}"
-def request(query, params):
- params["url"] = search_url + "?" + urlencode({"query": query, "offset": 20 * (params["pageno"] - 1)})
- return params
+def response(resp: "SXNG_Response") -> EngineResults:
+ res = EngineResults()
+ json_data = resp.json()
+ def field(k: str) -> str:
+ return str(record.get(k, ""))
-def response(resp):
- results = []
- for record in resp.json()["message"]["items"]:
+ for record in json_data["message"]["items"]:
if record["type"] == "component":
- # These seem to be files published along with papers. Not something you'd search for
+ # These seem to be files published along with papers. Not something
+ # you'd search for.
continue
- result = {
- "template": "paper.html",
- "content": record.get("abstract", ""),
- "doi": record.get("DOI"),
- "pages": record.get("page"),
- "publisher": record.get("publisher"),
- "tags": record.get("subject"),
- "type": record.get("type"),
- "url": record.get("URL"),
- "volume": record.get("volume"),
- }
+ title: str = ""
+ journal: str = ""
+
if record["type"] == "book-chapter":
- result["title"] = record["container-title"][0]
- if record["title"][0].lower().strip() != result["title"].lower().strip():
- result["title"] += f" ({record['title'][0]})"
+ title = record["container-title"][0]
+ if record["title"][0].lower().strip() != title.lower().strip():
+ title += f" ({record['title'][0]})"
else:
- result["title"] = record["title"][0] if "title" in record else record.get("container-title", [None])[0]
- result["journal"] = record.get("container-title", [None])[0] if "title" in record else None
+ title = record["title"][0] if "title" in record else record.get("container-title", [None])[0]
+ journal = record.get("container-title", [None])[0] if "title" in record else ""
+
+ item = res.types.Paper(
+ title=title,
+ journal=journal,
+ content=field("abstract"),
+ doi=field("DOI"),
+ pages=field("page"),
+ publisher=field("publisher"),
+ tags=record.get("subject"),
+ type=field("type"),
+ url=field("URL"),
+ volume=field("volume"),
+ )
+ res.add(item)
if "resource" in record and "primary" in record["resource"] and "URL" in record["resource"]["primary"]:
- result["url"] = record["resource"]["primary"]["URL"]
+ item.url = record["resource"]["primary"]["URL"]
+
if "published" in record and "date-parts" in record["published"]:
- result["publishedDate"] = datetime(*(record["published"]["date-parts"][0] + [1, 1][:3]))
- result["authors"] = [a.get("given", "") + " " + a.get("family", "") for a in record.get("author", [])]
- result["isbn"] = record.get("isbn") or [i["value"] for i in record.get("isbn-type", [])]
- # All the links are not PDFs, even if the URL ends with ".pdf"
- # result["pdf_url"] = record.get("link", [{"URL": None}])[0]["URL"]
+ item.publishedDate = datetime(*(record["published"]["date-parts"][0] + [1, 1][:3]))
+
+ item.authors = [a.get("given", "") + " " + a.get("family", "") for a in record.get("author", [])]
+ item.isbn = record.get("isbn") or [i["value"] for i in record.get("isbn-type", [])]
- results.append(result)
+ # All the links are not PDFs, even if the URL ends with ".pdf"
+ # item.pdf_url = record.get("link", [{"URL": None}])[0]["URL"]
- return results
+ return res