diff options
| author | Alexandre Flament <alex@al-f.net> | 2021-04-24 09:11:20 +0200 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2021-04-24 09:11:20 +0200 |
| commit | 0603b043ceadbc833410464bbf576fda8c11dc97 (patch) | |
| tree | 5fd72422357aeb7dc57198ece210a53c155304d1 /searx/engines | |
| parent | 4863c1933e523b4d053de3bc49b3b466c96411d9 (diff) | |
| parent | 6d41255eb19d88fd7a0c94106a6d7fc4e948d018 (diff) | |
Merge pull request #16 from return42/add-core.ac
Add a search engine for core.ac.uk
Diffstat (limited to 'searx/engines')
| -rw-r--r-- | searx/engines/core.py | 82 |
1 files changed, 82 insertions, 0 deletions
diff --git a/searx/engines/core.py b/searx/engines/core.py new file mode 100644 index 000000000..3a1147f35 --- /dev/null +++ b/searx/engines/core.py @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""CORE (science) + +""" +# pylint: disable=missing-function-docstring + +from json import loads +from datetime import datetime +from urllib.parse import urlencode + +from searx import logger +from searx.exceptions import SearxEngineAPIException + +logger = logger.getChild('CORE engine') + +about = { + "website": 'https://core.ac.uk', + "wikidata_id": 'Q22661180', + "official_api_documentation": 'https://core.ac.uk/documentation/api/', + "use_official_api": True, + "require_api_key": True, + "results": 'JSON', +} + +categories = ['science'] +paging = True +nb_per_page = 10 + +api_key = 'unset' + +logger = logger.getChild('CORE engine') + +base_url = 'https://core.ac.uk:443/api-v2/search/' +search_string = '{query}?page={page}&pageSize={nb_per_page}&apiKey={apikey}' + +def request(query, params): + + if api_key == 'unset': + raise SearxEngineAPIException('missing CORE API key') + + search_path = search_string.format( + query = urlencode({'q': query}), + nb_per_page = nb_per_page, + page = params['pageno'], + apikey = api_key, + ) + params['url'] = base_url + search_path + + logger.debug("query_url --> %s", params['url']) + return params + +def response(resp): + results = [] + json_data = loads(resp.text) + + for result in json_data['data']: + + source = result['_source'] + time = source['publishedDate'] or source['depositedDate'] + if time : + date = datetime.fromtimestamp(time / 1000) + else: + date = None + + metadata = [] + if source['publisher'] and len(source['publisher']) > 3: + metadata.append(source['publisher']) + if source['topics']: + metadata.append(source['topics'][0]) + if source['doi']: + metadata.append(source['doi']) + metadata = ' / '.join(metadata) + + results.append({ + 'url': source['urls'][0].replace('http://', 'https://', 1), + 'title': source['title'], + 'content': source['description'], + 'publishedDate': date, + 'metadata' : metadata, + }) + + return results |