summaryrefslogtreecommitdiff
path: root/searx/engines/pubmed.py
blob: 7e5ef2ce13ec5c8fb90eccc326c7cd915523134a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
# SPDX-License-Identifier: AGPL-3.0-or-later
"""PubMed_ comprises more than 39 million citations for biomedical literature
from MEDLINE, life science journals, and online books. Citations may include
links to full text content from PubMed Central and publisher web sites.

.. _PubMed: https://pubmed.ncbi.nlm.nih.gov/

Configuration
=============

.. code:: yaml

   - name: pubmed
     engine: pubmed
     shortcut: pub

Implementations
===============

"""

import typing as t

from datetime import datetime
from urllib.parse import urlencode

from lxml import etree

from searx.result_types import EngineResults
from searx.network import get
from searx.utils import (
    eval_xpath_getindex,
    eval_xpath_list,
    extract_text,
    ElementType,
)

if t.TYPE_CHECKING:
    from searx.extended_types import SXNG_Response
    from searx.search.processors import OnlineParams


about = {
    "website": "https://www.ncbi.nlm.nih.gov/pubmed/",
    "wikidata_id": "Q1540899",
    "official_api_documentation": {
        "url": "https://www.ncbi.nlm.nih.gov/home/develop/api/",
        "comment": "More info on api: https://www.ncbi.nlm.nih.gov/books/NBK25501/",
    },
    "use_official_api": True,
    "require_api_key": False,
    "results": "XML",
}

categories = ["science", "scientific publications"]

eutils_api = "https://eutils.ncbi.nlm.nih.gov/entrez/eutils"

# engine dependent config
number_of_results = 10
pubmed_url = "https://www.ncbi.nlm.nih.gov/pubmed/"


def request(query: str, params: "OnlineParams") -> None:

    args = urlencode(
        {
            "db": "pubmed",
            "term": query,
            "retstart": (params["pageno"] - 1) * number_of_results,
            "hits": number_of_results,
        }
    )
    esearch_url = f"{eutils_api}/esearch.fcgi?{args}"
    # DTD: https://eutils.ncbi.nlm.nih.gov/eutils/dtd/20060628/esearch.dtd
    esearch_resp: "SXNG_Response" = get(esearch_url, timeout=3)
    pmids_results = etree.XML(esearch_resp.content)
    pmids: list[str] = [i.text for i in pmids_results.xpath("//eSearchResult/IdList/Id")]

    # send efetch request with the IDs from esearch response
    args = urlencode(
        {
            "db": "pubmed",
            "retmode": "xml",
            "id": ",".join(pmids),
        }
    )
    efetch_url = f"{eutils_api}/efetch.fcgi?{args}"
    params["url"] = efetch_url


def response(resp: "SXNG_Response") -> EngineResults:  # pylint: disable=too-many-locals

    # DTD: https://dtd.nlm.nih.gov/ncbi/pubmed/out/pubmed_250101.dtd

    # parse efetch response
    efetch_xml = etree.XML(resp.content)
    res = EngineResults()

    def _field_txt(xml: ElementType, xpath_str: str) -> str:
        elem = eval_xpath_getindex(xml, xpath_str, 0, default="")
        return extract_text(elem, allow_none=True) or ""

    for pubmed_article in eval_xpath_list(efetch_xml, "//PubmedArticle"):

        medline_citation: ElementType = eval_xpath_getindex(pubmed_article, "./MedlineCitation", 0)
        pubmed_data: ElementType = eval_xpath_getindex(pubmed_article, "./PubmedData", 0)

        title: str = eval_xpath_getindex(medline_citation, ".//Article/ArticleTitle", 0).text
        pmid: str = eval_xpath_getindex(medline_citation, ".//PMID", 0).text
        url: str = pubmed_url + pmid
        content = _field_txt(medline_citation, ".//Abstract/AbstractText//text()")
        doi = _field_txt(medline_citation, ".//ELocationID[@EIdType='doi']/text()")
        journal = _field_txt(medline_citation, "./Article/Journal/Title/text()")
        issn = _field_txt(medline_citation, "./Article/Journal/ISSN/text()")

        authors: list[str] = []

        for author in eval_xpath_list(medline_citation, "./Article/AuthorList/Author"):
            f = eval_xpath_getindex(author, "./ForeName", 0, default=None)
            l = eval_xpath_getindex(author, "./LastName", 0, default=None)
            author_name = f"{f.text if f is not None else ''} {l.text if l is not None else ''}".strip()
            if author_name:
                authors.append(author_name)

        accepted_date = eval_xpath_getindex(
            pubmed_data, "./History//PubMedPubDate[@PubStatus='accepted']", 0, default=None
        )
        pub_date = None
        if accepted_date is not None:
            year = eval_xpath_getindex(accepted_date, "./Year", 0)
            month = eval_xpath_getindex(accepted_date, "./Month", 0)
            day = eval_xpath_getindex(accepted_date, "./Day", 0)
            try:
                pub_date = datetime(year=int(year.text), month=int(month.text), day=int(day.text))
            except ValueError:
                pass

        res.add(
            res.types.Paper(
                url=url,
                title=title,
                content=content,
                journal=journal,
                issn=[issn],
                authors=authors,
                doi=doi,
                publishedDate=pub_date,
            )
        )
    return res