diff options
Diffstat (limited to 'searx/engines')
| -rw-r--r-- | searx/engines/flickr-noapi.py | 102 | ||||
| -rw-r--r-- | searx/engines/flickr.py | 5 | ||||
| -rw-r--r-- | searx/engines/kickass.py | 84 |
3 files changed, 187 insertions, 4 deletions
diff --git a/searx/engines/flickr-noapi.py b/searx/engines/flickr-noapi.py new file mode 100644 index 000000000..b44affec6 --- /dev/null +++ b/searx/engines/flickr-noapi.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python + +## Flickr (Images) +# +# @website https://www.flickr.com +# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html) +# +# @using-api no +# @results HTML +# @stable no +# @parse url, title, thumbnail, img_src + +from urllib import urlencode +from json import loads +from urlparse import urljoin +from lxml import html +import re + +categories = ['images'] + +url = 'https://secure.flickr.com/' +search_url = url+'search/?{query}&page={page}' +photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}' +regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL) + +paging = True + +def build_flickr_url(user_id, photo_id): + return photo_url.format(userid=user_id,photoid=photo_id) + + +def request(query, params): + params['url'] = search_url.format(query=urlencode({'text': query}), + page=params['pageno']) + return params + + +def response(resp): + results = [] + + matches = regex.search(resp.text) + + if matches == None: + return results + + match = matches.group(1) + search_results = loads(match) + + if not '_data' in search_results: + return [] + + photos = search_results['_data'] + + for photo in photos: + + # In paged configuration, the first pages' photos are represented by a None object + if photo == None: + continue + + # From the biggest to the lowest format + if 'o' in photo['sizes']: + img_src = photo['sizes']['o']['displayUrl'] + elif 'k' in photo['sizes']: + img_src = photo['sizes']['k']['displayUrl'] + elif 'h' in photo['sizes']: + img_src = photo['sizes']['h']['displayUrl'] + elif 'b' in photo['sizes']: + img_src = photo['sizes']['b']['displayUrl'] + elif 'c' in photo['sizes']: + img_src = photo['sizes']['c']['displayUrl'] + elif 'z' in photo['sizes']: + img_src = photo['sizes']['z']['displayUrl'] + elif 'n' in photo['sizes']: + img_src = photo['sizes']['n']['displayUrl'] + elif 'm' in photo['sizes']: + img_src = photo['sizes']['m']['displayUrl'] + elif 't' in photo['sizes']: + img_src = photo['sizes']['to']['displayUrl'] + elif 'q' in photo['sizes']: + img_src = photo['sizes']['q']['displayUrl'] + elif 's' in photo['sizes']: + img_src = photo['sizes']['s']['displayUrl'] + else: + continue + + url = build_flickr_url(photo['owner']['id'], photo['id']) + + title = photo['title'] + + content = '<span class="photo-author">'+ photo['owner']['username'] +'</span><br />' + + if 'description' in photo: + content = content + '<span class="description">' + photo['description'] + '</span>' + + # append result + results.append({'url': url, + 'title': title, + 'img_src': img_src, + 'content': content, + 'template': 'images.html'}) + + return results diff --git a/searx/engines/flickr.py b/searx/engines/flickr.py index 8b60aed1d..2fa5ed7ec 100644 --- a/searx/engines/flickr.py +++ b/searx/engines/flickr.py @@ -13,9 +13,6 @@ from urllib import urlencode from json import loads -from urlparse import urljoin -from lxml import html -from time import time categories = ['images'] @@ -70,7 +67,7 @@ def response(resp): content = '<span class="photo-author">'+ photo['ownername'] +'</span><br />' - content = content + ' <span class="description">' + photo['description']['_content'] + '</span>' + content = content + '<span class="description">' + photo['description']['_content'] + '</span>' # append result results.append({'url': url, diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py new file mode 100644 index 000000000..bd11a3b6b --- /dev/null +++ b/searx/engines/kickass.py @@ -0,0 +1,84 @@ +## Kickass Torrent (Videos, Music, Files) +# +# @website https://kickass.so +# @provide-api no (nothing found) +# +# @using-api no +# @results HTML (using search portal) +# @stable yes (HTML can change) +# @parse url, title, content, seed, leech, magnetlink + +from urlparse import urljoin +from cgi import escape +from urllib import quote +from lxml import html +from operator import itemgetter +from dateutil import parser + +# engine dependent config +categories = ['videos', 'music', 'files'] +paging = True + +# search-url +url = 'https://kickass.so/' +search_url = url + 'search/{search_term}/{pageno}/' + +# specific xpath variables +magnet_xpath = './/a[@title="Torrent magnet link"]' +#content_xpath = './/font[@class="detDesc"]//text()' + + +# do search-request +def request(query, params): + params['url'] = search_url.format(search_term=quote(query), + pageno=params['pageno']) + + return params + + +# get response from search-request +def response(resp): + results = [] + + dom = html.fromstring(resp.text) + + search_res = dom.xpath('//table[@class="data"]//tr') + + # return empty array if nothing is found + if not search_res: + return [] + + # parse results + for result in search_res[1:]: + link = result.xpath('.//a[@class="cellMainLink"]')[0] + href = urljoin(url, link.attrib['href']) + title = ' '.join(link.xpath('.//text()')) + content = escape(html.tostring(result.xpath('.//span[@class="font11px lightgrey block"]')[0], method="text")) + seed = result.xpath('.//td[contains(@class, "green")]/text()')[0] + leech = result.xpath('.//td[contains(@class, "red")]/text()')[0] + + # convert seed to int if possible + if seed.isdigit(): + seed = int(seed) + else: + seed = 0 + + # convert leech to int if possible + if leech.isdigit(): + leech = int(leech) + else: + leech = 0 + + magnetlink = result.xpath(magnet_xpath)[0].attrib['href'] + + # append result + results.append({'url': href, + 'title': title, + 'content': content, + 'seed': seed, + 'leech': leech, + 'magnetlink': magnetlink, + 'template': 'torrent.html'}) + + # return results sorted by seeder + return sorted(results, key=itemgetter('seed'), reverse=True) |