1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
|
# SPDX-License-Identifier: AGPL-3.0-or-later
"""CORE_ (COnnecting REpositories) provides a comprehensive bibliographic
database of the world’s scholarly literature, collecting and indexing
research from repositories and journals.
.. _CORE: https://core.ac.uk/about
.. _core engine config:
Configuration
=============
The engine has the following additional settings:
- :py:obj:`api_key`
.. code:: yaml
- name: core.ac.uk
engine: core
categories: science
shortcut: cor
api_key: "..."
timeout: 5
Implementations
===============
"""
# pylint: disable=too-many-branches
from datetime import datetime
from urllib.parse import urlencode
from searx.exceptions import SearxEngineAPIException
about = {
"website": 'https://core.ac.uk',
"wikidata_id": 'Q22661180',
"official_api_documentation": 'https://api.core.ac.uk/docs/v3',
"use_official_api": True,
"require_api_key": True,
"results": 'JSON',
}
api_key = 'unset'
"""For an API key register at https://core.ac.uk/services/api and insert
the API key in the engine :ref:`core engine config`."""
categories = ['science', 'scientific publications']
paging = True
nb_per_page = 10
base_url = 'https://api.core.ac.uk/v3/search/works/'
def request(query, params):
if api_key == 'unset':
raise SearxEngineAPIException('missing CORE API key')
# API v3 uses different parameters
search_params = {
'q': query,
'offset': (params['pageno'] - 1) * nb_per_page,
'limit': nb_per_page,
'sort': 'relevance',
}
params['url'] = base_url + '?' + urlencode(search_params)
params['headers'] = {'Authorization': f'Bearer {api_key}'}
return params
def response(resp):
results = []
json_data = resp.json()
for result in json_data.get('results', []):
# Get title
if not result.get('title'):
continue
# Get URL - try different options
url = None
# Try DOI first
doi = result.get('doi')
if doi:
url = f'https://doi.org/{doi}'
if url is None and result.get('doi'):
# use the DOI reference
url = 'https://doi.org/' + str(result['doi'])
elif result.get('id'):
url = 'https://core.ac.uk/works/' + str(result['id'])
elif result.get('downloadUrl'):
url = result['downloadUrl']
elif result.get('sourceFulltextUrls'):
url = result['sourceFulltextUrls']
else:
continue
# Published date
published_date = None
raw_date = result.get('publishedDate') or result.get('depositedDate')
if raw_date:
try:
published_date = datetime.fromisoformat(result['publishedDate'].replace('Z', '+00:00'))
except (ValueError, AttributeError):
pass
# Handle journals
journals = []
if result.get('journals'):
journals = [j.get('title') for j in result['journals'] if j.get('title')]
# Handle publisher
publisher = result.get('publisher', '').strip("'")
if publisher:
publisher = publisher.strip("'")
# Handle authors
authors = set()
for i in result.get('authors', []):
name = i.get("name")
if name:
authors.add(name)
results.append(
{
'template': 'paper.html',
'title': result.get('title'),
'url': url,
'content': result.get('fullText', '') or '',
# 'comments': '',
'tags': result.get('fieldOfStudy', []),
'publishedDate': published_date,
'type': result.get('documentType', '') or '',
'authors': authors,
'editor': ', '.join(result.get('contributors', [])),
'publisher': publisher,
'journal': ', '.join(journals),
'doi': result.get('doi'),
# 'issn' : ''
# 'isbn' : ''
'pdf_url': result.get('downloadUrl', {}) or result.get("sourceFulltextUrls", {}),
}
)
return results
|