summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorMarkus Heiser <markus.heiser@darmarIT.de>2022-07-15 09:30:40 +0200
committerGitHub <noreply@github.com>2022-07-15 09:30:40 +0200
commit4e051974440a03999f979eac932a89cb3a3e7871 (patch)
treee1124065e418aefbaaaf3344a730247a59ccf2b9 /searx/engines
parent2f63a528c38a215315e3c0aad3bbc5b9b4d8b269 (diff)
parent445cba7c0294547af7bf688007bd9b29dee058e3 (diff)
Merge pull request #1475 from return42/Emojipedia
[mod] Add engine for Emojipedia
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/emojipedia.py67
1 files changed, 67 insertions, 0 deletions
diff --git a/searx/engines/emojipedia.py b/searx/engines/emojipedia.py
new file mode 100644
index 000000000..b89267c0d
--- /dev/null
+++ b/searx/engines/emojipedia.py
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: AGPL-3.0-or-later
+# lint: pylint
+"""Emojipedia
+
+Emojipedia is an emoji reference website which documents the meaning and
+common usage of emoji characters in the Unicode Standard. It is owned by Zedge
+since 2021. Emojipedia is a voting member of The Unicode Consortium.[1]
+
+[1] https://en.wikipedia.org/wiki/Emojipedia
+"""
+
+from urllib.parse import urlencode
+from lxml import html
+
+from searx.utils import (
+ eval_xpath_list,
+ eval_xpath_getindex,
+ extract_text,
+)
+
+about = {
+ "website": 'https://emojipedia.org',
+ "wikidata_id": 'Q22908129',
+ "official_api_documentation": None,
+ "use_official_api": False,
+ "require_api_key": False,
+ "results": 'HTML',
+}
+
+categories = []
+paging = False
+time_range_support = False
+
+base_url = 'https://emojipedia.org'
+search_url = base_url + '/search/?{query}'
+
+
+def request(query, params):
+ params['url'] = search_url.format(
+ query=urlencode({'q': query}),
+ )
+ return params
+
+
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ for result in eval_xpath_list(dom, "/html/body/div[2]/div[1]/ol/li"):
+
+ extracted_desc = extract_text(eval_xpath_getindex(result, './/p', 0))
+
+ if 'No results found.' in extracted_desc:
+ break
+
+ link = eval_xpath_getindex(result, './/h2/a', 0)
+
+ url = base_url + link.attrib.get('href')
+ title = extract_text(link)
+ content = extracted_desc
+
+ res = {'url': url, 'title': title, 'content': content}
+
+ results.append(res)
+
+ return results