summaryrefslogtreecommitdiff
path: root/searx/engines/core.py
blob: 0da931792a7cf3818ae740ad84c0e9f58621dcb9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
# SPDX-License-Identifier: AGPL-3.0-or-later
"""CORE_ (COnnecting REpositories) provides a comprehensive bibliographic
database of the world’s scholarly literature, collecting and indexing
research from repositories and journals.

.. _CORE: https://core.ac.uk/about

.. note::

   The CORE engine requires an :py:obj:`API key <api_key>`.

.. _core engine config:

Configuration
=============

The engine has the following additional settings:

- :py:obj:`api_key`

.. code:: yaml

  - name: core.ac.uk
    api_key: "..."
    inactive: false

Implementations
===============

"""

import typing as t

from datetime import datetime
from urllib.parse import urlencode

from searx.result_types import EngineResults

if t.TYPE_CHECKING:
    from searx.extended_types import SXNG_Response
    from searx.search.processors import OnlineParams


about = {
    "website": "https://core.ac.uk",
    "wikidata_id": "Q22661180",
    "official_api_documentation": "https://api.core.ac.uk/docs/v3",
    "use_official_api": True,
    "require_api_key": True,
    "results": "JSON",
}

api_key = ""
"""For an API key register at https://core.ac.uk/services/api and insert
the API key in the engine :ref:`core engine config`."""

categories = ["science", "scientific publications"]
paging = True
nb_per_page = 10
base_url = "https://api.core.ac.uk/v3/search/works/"


def setup(engine_settings: dict[str, t.Any]) -> bool:
    """Initialization of the CORE_ engine, checks whether the :py:obj:`api_key`
    is set, otherwise the engine is inactive.
    """

    key: str = engine_settings.get("api_key", "")
    if key and key not in ("unset", "unknown", "..."):
        return True
    logger.error("CORE's API key is not set or invalid.")
    return False


def request(query: str, params: "OnlineParams") -> None:

    # API v3 uses different parameters
    search_params = {
        "q": query,
        "offset": (params["pageno"] - 1) * nb_per_page,
        "limit": nb_per_page,
        "sort": "relevance",
    }

    params["url"] = base_url + "?" + urlencode(search_params)
    params["headers"] = {"Authorization": f"Bearer {api_key}"}


def response(resp: "SXNG_Response") -> EngineResults:
    # pylint: disable=too-many-branches
    res = EngineResults()
    json_data = resp.json()

    for result in json_data.get("results", []):
        # Get title
        if not result.get("title"):
            continue

        # Get URL - try different options
        url: str | None = None

        # Try DOI first
        doi: str = result.get("doi")
        if doi:
            url = f"https://doi.org/{doi}"

        if url is None and result.get("doi"):
            # use the DOI reference
            url = "https://doi.org/" + str(result["doi"])
        elif result.get("id"):
            url = "https://core.ac.uk/works/" + str(result["id"])
        elif result.get("downloadUrl"):
            url = result["downloadUrl"]
        elif result.get("sourceFulltextUrls"):
            url = result["sourceFulltextUrls"]
        else:
            continue

        # Published date
        published_date = None

        raw_date = result.get("publishedDate") or result.get("depositedDate")
        if raw_date:
            try:
                published_date = datetime.fromisoformat(result["publishedDate"].replace("Z", "+00:00"))
            except (ValueError, AttributeError):
                pass

        # Handle journals
        journals = []
        if result.get("journals"):
            journals = [j.get("title") for j in result["journals"] if j.get("title")]

        # Handle publisher
        publisher = result.get("publisher", "").strip("'")

        # Handle authors
        authors: set[str] = set()
        for i in result.get("authors", []):
            name: str | None = i.get("name")
            if name:
                authors.add(name)

        res.add(
            res.types.Paper(
                title=result.get("title"),
                url=url,
                content=result.get("fullText", "") or "",
                tags=result.get("fieldOfStudy", []),
                publishedDate=published_date,
                type=result.get("documentType", "") or "",
                authors=authors,
                editor=", ".join(result.get("contributors", [])),
                publisher=publisher,
                journal=", ".join(journals),
                doi=result.get("doi"),
                pdf_url=result.get("downloadUrl", {}) or result.get("sourceFulltextUrls", {}),
            )
        )

    return res