summaryrefslogtreecommitdiff
path: root/searx/engines/openlibrary.py
blob: 1c01db6006526b00b1de5b0c8f4a92498ce82c53 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
# SPDX-License-Identifier: AGPL-3.0-or-later
"""`Open Library`_ is an open, editable library catalog, building towards a web
page for every book ever published.

.. _Open Library: https://openlibrary.org

Configuration
=============

The service sometimes takes a very long time to respond, the ``timeout`` may
need to be adjusted.

.. code:: yaml

  - name: openlibrary
    engine: openlibrary
    shortcut: ol
    timeout: 10


Implementations
===============

"""

from datetime import datetime
import typing as t

from urllib.parse import urlencode
from dateutil import parser

from searx.result_types import EngineResults

if t.TYPE_CHECKING:
    from searx.extended_types import SXNG_Response
    from searx.search.processors import OnlineParams

about = {
    "website": "https://openlibrary.org",
    "wikidata_id": "Q1201876",
    "require_api_key": False,
    "use_official_api": False,
    "official_api_documentation": "https://openlibrary.org/developers/api",
}

paging = True
categories = ["general", "books"]

base_url = "https://openlibrary.org"
search_api = "https://openlibrary.org/search.json"
"""The engine uses the API at the endpoint search.json_.

.. _search.json: https://openlibrary.org/dev/docs/api/search
"""
results_per_page = 10


def request(query: str, params: "OnlineParams") -> None:
    args = {
        "q": query,
        "page": params["pageno"],
        "limit": results_per_page,
        "fields": "*",
    }
    params["url"] = f"{search_api}?{urlencode(args)}"
    logger.debug("REST API: %s", params["url"])


def response(resp: "SXNG_Response") -> EngineResults:
    res = EngineResults()
    json_data = resp.json()

    for item in json_data.get("docs", []):
        cover = ""
        if "lending_identifier_s" in item:
            cover = f"https://archive.org/services/img/{item['lending_identifier_s']}"

        published = item.get("publish_date")
        if published:
            published_dates = [date for date in map(_parse_date, published) if date]
            if published_dates:
                published = min(published_dates)

        if not published:
            published = _parse_date(str(item.get("first_publish_year")))

        content = " / ".join(item.get("first_sentence", []))
        res.add(
            res.types.Paper(
                url=f"{base_url}/{item['key']}",
                title=item["title"],
                content=content,
                isbn=item.get("isbn", [])[:5],
                authors=item.get("author_name", []),
                thumbnail=cover,
                publishedDate=published,
                tags=item.get("subject", [])[:10] + item.get("place", [])[:10],
            )
        )
    return res


def _parse_date(date: str) -> datetime | None:
    if not date:
        return None
    try:
        return parser.parse(date)
    except parser.ParserError:
        return None