summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorAdam Tauber <asciimoo@gmail.com>2015-02-09 10:46:23 +0100
committerAdam Tauber <asciimoo@gmail.com>2015-02-09 10:46:23 +0100
commit5f801d7ea0ee1e4b4bec7f52c45ca3f601fc5cdc (patch)
tree8bd6d19dc101dda652525623627af2a8a5c86205 /searx/engines
parent7c075aa73197030d01b210054488ce99ec861d70 (diff)
parentdd4686a3886458f600427aba0ed7b9666b3644db (diff)
Merge pull request #219 from pointhi/new_engines
New engines: gigablast and blekko_images
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/blekko_images.py56
-rw-r--r--searx/engines/gigablast.py63
2 files changed, 119 insertions, 0 deletions
diff --git a/searx/engines/blekko_images.py b/searx/engines/blekko_images.py
new file mode 100644
index 000000000..2bae9c35e
--- /dev/null
+++ b/searx/engines/blekko_images.py
@@ -0,0 +1,56 @@
+## Blekko (Images)
+#
+# @website https://blekko.com
+# @provide-api yes (inofficial)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title, img_src
+
+from json import loads
+from urllib import urlencode
+
+# engine dependent config
+categories = ['images']
+paging = True
+
+# search-url
+base_url = 'https://blekko.com'
+search_url = '/api/images?{query}&c={c}'
+
+
+# do search-request
+def request(query, params):
+ c = (params['pageno'] - 1) * 48
+
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'q': query}),
+ c=c)
+
+ if params['pageno'] != 1:
+ params['url'] += '&page={pageno}'.format(pageno=(params['pageno']-1))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ search_results = loads(resp.text)
+
+ # return empty array if there are no results
+ if not search_results:
+ return []
+
+ for result in search_results:
+ # append result
+ results.append({'url': result['page_url'],
+ 'title': result['title'],
+ 'content': '',
+ 'img_src': result['url'],
+ 'template': 'images.html'})
+
+ # return results
+ return results
diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py
new file mode 100644
index 000000000..8749c3256
--- /dev/null
+++ b/searx/engines/gigablast.py
@@ -0,0 +1,63 @@
+## Gigablast (Web)
+#
+# @website http://gigablast.com
+# @provide-api yes (http://gigablast.com/api.html)
+#
+# @using-api yes
+# @results XML
+# @stable yes
+# @parse url, title, content
+
+from urllib import urlencode
+from cgi import escape
+from lxml import etree
+
+# engine dependent config
+categories = ['general']
+paging = True
+number_of_results = 5
+
+# search-url
+base_url = 'http://gigablast.com/'
+search_string = 'search?{query}&n={number_of_results}&s={offset}&xml=1&qh=0'
+
+# specific xpath variables
+results_xpath = '//response//result'
+url_xpath = './/url'
+title_xpath = './/title'
+content_xpath = './/sum'
+
+
+# do search-request
+def request(query, params):
+ offset = (params['pageno'] - 1) * number_of_results
+
+ search_path = search_string.format(
+ query=urlencode({'q': query}),
+ offset=offset,
+ number_of_results=number_of_results)
+
+ params['url'] = base_url + search_path
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = etree.fromstring(resp.content)
+
+ # parse results
+ for result in dom.xpath(results_xpath):
+ url = result.xpath(url_xpath)[0].text
+ title = result.xpath(title_xpath)[0].text
+ content = escape(result.xpath(content_xpath)[0].text)
+
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'content': content})
+
+ # return results
+ return results