summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/duden.py76
-rw-r--r--searx/engines/qwant.py2
-rw-r--r--searx/engines/www500px.py73
3 files changed, 77 insertions, 74 deletions
diff --git a/searx/engines/duden.py b/searx/engines/duden.py
new file mode 100644
index 000000000..881ff9d9c
--- /dev/null
+++ b/searx/engines/duden.py
@@ -0,0 +1,76 @@
+"""
+ Duden
+ @website https://www.duden.de
+ @provide-api no
+ @using-api no
+ @results HTML (using search portal)
+ @stable no (HTML can change)
+ @parse url, title, content
+"""
+
+from lxml import html, etree
+import re
+from searx.engines.xpath import extract_text
+from searx.url_utils import quote
+from searx import logger
+
+categories = ['general']
+paging = True
+language_support = False
+
+# search-url
+base_url = 'https://www.duden.de/'
+search_url = base_url + 'suchen/dudenonline/{query}?page={offset}'
+
+
+def request(query, params):
+ '''pre-request callback
+ params<dict>:
+ method : POST/GET
+ headers : {}
+ data : {} # if method == POST
+ url : ''
+ category: 'search category'
+ pageno : 1 # number of the requested page
+ '''
+
+ offset = (params['pageno'] - 1)
+ params['url'] = search_url.format(offset=offset, query=quote(query))
+ return params
+
+
+def response(resp):
+ '''post-response callback
+ resp: requests response object
+ '''
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ try:
+ number_of_results_string = re.sub('[^0-9]', '', dom.xpath(
+ '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0]
+ )
+
+ results.append({'number_of_results': int(number_of_results_string)})
+
+ except:
+ logger.debug("Couldn't read number of results.")
+ pass
+
+ for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'):
+ try:
+ logger.debug("running for %s" % str(result))
+ link = result.xpath('.//h2/a')[0]
+ url = link.attrib.get('href')
+ title = result.xpath('string(.//h2/a)')
+ content = extract_text(result.xpath('.//p'))
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'content': content})
+ except:
+ logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
+ continue
+
+ return results
diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py
index 239193b96..4b0f1c87c 100644
--- a/searx/engines/qwant.py
+++ b/searx/engines/qwant.py
@@ -28,7 +28,7 @@ category_to_keyword = {'general': 'web',
'social media': 'social'}
# search-url
-url = 'https://api.qwant.com/api/search/{keyword}?count=10&offset={offset}&f=&{query}'
+url = 'https://api.qwant.com/api/search/{keyword}?count=10&offset={offset}&f=&{query}&t={keyword}&uiv=4'
# do search-request
diff --git a/searx/engines/www500px.py b/searx/engines/www500px.py
deleted file mode 100644
index 7a2015ae9..000000000
--- a/searx/engines/www500px.py
+++ /dev/null
@@ -1,73 +0,0 @@
-"""
- 500px (Images)
-
- @website https://500px.com
- @provide-api yes (https://developers.500px.com/)
-
- @using-api no
- @results HTML
- @stable no (HTML can change)
- @parse url, title, thumbnail, img_src, content
-
- @todo rewrite to api
-"""
-
-from json import loads
-from searx.url_utils import urlencode, urljoin
-
-# engine dependent config
-categories = ['images']
-paging = True
-
-# search-url
-base_url = 'https://500px.com'
-search_url = 'https://api.500px.com/v1/photos/search?type=photos'\
- '&{query}'\
- '&image_size%5B%5D=4'\
- '&image_size%5B%5D=20'\
- '&image_size%5B%5D=21'\
- '&image_size%5B%5D=1080'\
- '&image_size%5B%5D=1600'\
- '&image_size%5B%5D=2048'\
- '&include_states=true'\
- '&formats=jpeg%2Clytro'\
- '&include_tags=true'\
- '&exclude_nude=true'\
- '&page={pageno}'\
- '&rpp=50'\
- '&sdk_key=b68e60cff4c929bedea36ca978830c5caca790c3'
-
-
-# do search-request
-def request(query, params):
- params['url'] = search_url.format(pageno=params['pageno'],
- query=urlencode({'term': query}))
-
- return params
-
-
-# get response from search-request
-def response(resp):
- results = []
-
- response_json = loads(resp.text)
-
- # parse results
- for result in response_json['photos']:
- url = urljoin(base_url, result['url'])
- title = result['name']
- # last index is the biggest resolution
- img_src = result['image_url'][-1]
- thumbnail_src = result['image_url'][0]
- content = result['description'] or ''
-
- # append result
- results.append({'url': url,
- 'title': title,
- 'img_src': img_src,
- 'content': content,
- 'thumbnail_src': thumbnail_src,
- 'template': 'images.html'})
-
- # return results
- return results