summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--searx/engines/flickr-noapi.py102
-rw-r--r--searx/engines/flickr.py5
-rw-r--r--searx/engines/kickass.py84
-rw-r--r--searx/settings.yml15
-rw-r--r--searx/static/courgette/img/icon_kickass.icobin0 -> 1150 bytes
-rw-r--r--searx/static/default/img/icon_kickass.icobin0 -> 1150 bytes
-rw-r--r--searx/static/oscar/img/icons/kickass.pngbin0 -> 2019 bytes
-rw-r--r--searx/webapp.py2
8 files changed, 199 insertions, 9 deletions
diff --git a/searx/engines/flickr-noapi.py b/searx/engines/flickr-noapi.py
new file mode 100644
index 000000000..b44affec6
--- /dev/null
+++ b/searx/engines/flickr-noapi.py
@@ -0,0 +1,102 @@
+#!/usr/bin/env python
+
+## Flickr (Images)
+#
+# @website https://www.flickr.com
+# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
+#
+# @using-api no
+# @results HTML
+# @stable no
+# @parse url, title, thumbnail, img_src
+
+from urllib import urlencode
+from json import loads
+from urlparse import urljoin
+from lxml import html
+import re
+
+categories = ['images']
+
+url = 'https://secure.flickr.com/'
+search_url = url+'search/?{query}&page={page}'
+photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
+regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
+
+paging = True
+
+def build_flickr_url(user_id, photo_id):
+ return photo_url.format(userid=user_id,photoid=photo_id)
+
+
+def request(query, params):
+ params['url'] = search_url.format(query=urlencode({'text': query}),
+ page=params['pageno'])
+ return params
+
+
+def response(resp):
+ results = []
+
+ matches = regex.search(resp.text)
+
+ if matches == None:
+ return results
+
+ match = matches.group(1)
+ search_results = loads(match)
+
+ if not '_data' in search_results:
+ return []
+
+ photos = search_results['_data']
+
+ for photo in photos:
+
+ # In paged configuration, the first pages' photos are represented by a None object
+ if photo == None:
+ continue
+
+ # From the biggest to the lowest format
+ if 'o' in photo['sizes']:
+ img_src = photo['sizes']['o']['displayUrl']
+ elif 'k' in photo['sizes']:
+ img_src = photo['sizes']['k']['displayUrl']
+ elif 'h' in photo['sizes']:
+ img_src = photo['sizes']['h']['displayUrl']
+ elif 'b' in photo['sizes']:
+ img_src = photo['sizes']['b']['displayUrl']
+ elif 'c' in photo['sizes']:
+ img_src = photo['sizes']['c']['displayUrl']
+ elif 'z' in photo['sizes']:
+ img_src = photo['sizes']['z']['displayUrl']
+ elif 'n' in photo['sizes']:
+ img_src = photo['sizes']['n']['displayUrl']
+ elif 'm' in photo['sizes']:
+ img_src = photo['sizes']['m']['displayUrl']
+ elif 't' in photo['sizes']:
+ img_src = photo['sizes']['to']['displayUrl']
+ elif 'q' in photo['sizes']:
+ img_src = photo['sizes']['q']['displayUrl']
+ elif 's' in photo['sizes']:
+ img_src = photo['sizes']['s']['displayUrl']
+ else:
+ continue
+
+ url = build_flickr_url(photo['owner']['id'], photo['id'])
+
+ title = photo['title']
+
+ content = '<span class="photo-author">'+ photo['owner']['username'] +'</span><br />'
+
+ if 'description' in photo:
+ content = content + '<span class="description">' + photo['description'] + '</span>'
+
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'img_src': img_src,
+ 'content': content,
+ 'template': 'images.html'})
+
+ return results
diff --git a/searx/engines/flickr.py b/searx/engines/flickr.py
index 8b60aed1d..2fa5ed7ec 100644
--- a/searx/engines/flickr.py
+++ b/searx/engines/flickr.py
@@ -13,9 +13,6 @@
from urllib import urlencode
from json import loads
-from urlparse import urljoin
-from lxml import html
-from time import time
categories = ['images']
@@ -70,7 +67,7 @@ def response(resp):
content = '<span class="photo-author">'+ photo['ownername'] +'</span><br />'
- content = content + ' <span class="description">' + photo['description']['_content'] + '</span>'
+ content = content + '<span class="description">' + photo['description']['_content'] + '</span>'
# append result
results.append({'url': url,
diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
new file mode 100644
index 000000000..bd11a3b6b
--- /dev/null
+++ b/searx/engines/kickass.py
@@ -0,0 +1,84 @@
+## Kickass Torrent (Videos, Music, Files)
+#
+# @website https://kickass.so
+# @provide-api no (nothing found)
+#
+# @using-api no
+# @results HTML (using search portal)
+# @stable yes (HTML can change)
+# @parse url, title, content, seed, leech, magnetlink
+
+from urlparse import urljoin
+from cgi import escape
+from urllib import quote
+from lxml import html
+from operator import itemgetter
+from dateutil import parser
+
+# engine dependent config
+categories = ['videos', 'music', 'files']
+paging = True
+
+# search-url
+url = 'https://kickass.so/'
+search_url = url + 'search/{search_term}/{pageno}/'
+
+# specific xpath variables
+magnet_xpath = './/a[@title="Torrent magnet link"]'
+#content_xpath = './/font[@class="detDesc"]//text()'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(search_term=quote(query),
+ pageno=params['pageno'])
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ search_res = dom.xpath('//table[@class="data"]//tr')
+
+ # return empty array if nothing is found
+ if not search_res:
+ return []
+
+ # parse results
+ for result in search_res[1:]:
+ link = result.xpath('.//a[@class="cellMainLink"]')[0]
+ href = urljoin(url, link.attrib['href'])
+ title = ' '.join(link.xpath('.//text()'))
+ content = escape(html.tostring(result.xpath('.//span[@class="font11px lightgrey block"]')[0], method="text"))
+ seed = result.xpath('.//td[contains(@class, "green")]/text()')[0]
+ leech = result.xpath('.//td[contains(@class, "red")]/text()')[0]
+
+ # convert seed to int if possible
+ if seed.isdigit():
+ seed = int(seed)
+ else:
+ seed = 0
+
+ # convert leech to int if possible
+ if leech.isdigit():
+ leech = int(leech)
+ else:
+ leech = 0
+
+ magnetlink = result.xpath(magnet_xpath)[0].attrib['href']
+
+ # append result
+ results.append({'url': href,
+ 'title': title,
+ 'content': content,
+ 'seed': seed,
+ 'leech': leech,
+ 'magnetlink': magnetlink,
+ 'template': 'torrent.html'})
+
+ # return results sorted by seeder
+ return sorted(results, key=itemgetter('seed'), reverse=True)
diff --git a/searx/settings.yml b/searx/settings.yml
index b7359b919..07cd2ac3e 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -65,12 +65,15 @@ engines:
# categories : files
# shortcut : fc
-# api-key required: https://www.flickr.com/services/apps/create/
-# - name : flickr
+ - name : flickr
+ categories : images
+ shortcut : fl
+# You can use the engine using the official stable API, but you need an API key
+# See : https://www.flickr.com/services/apps/create/
# engine : flickr
-# categories : images
-# shortcut : fl
# api_key: 'apikey' # required!
+# Or you can use the html non-stable engine, activated by default
+ engine : flickr-noapi
- name : general-file
engine : generalfile
@@ -100,6 +103,10 @@ engines:
# engine : piratebay
# shortcut : tpb
+ - name : kickass
+ engine : kickass
+ shortcut : ka
+
- name : soundcloud
engine : soundcloud
shortcut : sc
diff --git a/searx/static/courgette/img/icon_kickass.ico b/searx/static/courgette/img/icon_kickass.ico
new file mode 100644
index 000000000..4aa2c77a5
--- /dev/null
+++ b/searx/static/courgette/img/icon_kickass.ico
Binary files differ
diff --git a/searx/static/default/img/icon_kickass.ico b/searx/static/default/img/icon_kickass.ico
new file mode 100644
index 000000000..4aa2c77a5
--- /dev/null
+++ b/searx/static/default/img/icon_kickass.ico
Binary files differ
diff --git a/searx/static/oscar/img/icons/kickass.png b/searx/static/oscar/img/icons/kickass.png
new file mode 100644
index 000000000..59e809104
--- /dev/null
+++ b/searx/static/oscar/img/icons/kickass.png
Binary files differ
diff --git a/searx/webapp.py b/searx/webapp.py
index 541975573..a2a135e9a 100644
--- a/searx/webapp.py
+++ b/searx/webapp.py
@@ -70,7 +70,7 @@ babel = Babel(app)
#TODO configurable via settings.yml
favicons = ['wikipedia', 'youtube', 'vimeo', 'dailymotion', 'soundcloud',
- 'twitter', 'stackoverflow', 'github', 'deviantart']
+ 'twitter', 'stackoverflow', 'github', 'deviantart', 'kickass']
cookie_max_age = 60 * 60 * 24 * 365 * 23 # 23 years