summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/__init__.py11
-rw-r--r--searx/engines/dailymotion.py15
-rw-r--r--searx/engines/deezer.py61
-rw-r--r--searx/engines/digg.py3
-rw-r--r--searx/engines/flickr-noapi.py2
-rw-r--r--searx/engines/kickass.py31
-rw-r--r--searx/engines/soundcloud.py14
-rw-r--r--searx/engines/startpage.py5
-rw-r--r--searx/engines/vimeo.py26
-rw-r--r--searx/engines/wikidata.py22
-rw-r--r--searx/engines/youtube.py13
11 files changed, 161 insertions, 42 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 9bc5cdfd4..643b107a5 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -22,6 +22,10 @@ from imp import load_source
from flask.ext.babel import gettext
from operator import itemgetter
from searx import settings
+from searx import logger
+
+
+logger = logger.getChild('engines')
engine_dir = dirname(realpath(__file__))
@@ -81,7 +85,7 @@ def load_engine(engine_data):
if engine_attr.startswith('_'):
continue
if getattr(engine, engine_attr) is None:
- print('[E] Engine config error: Missing attribute "{0}.{1}"'
+ logger.error('Missing engine config attribute: "{0}.{1}"'
.format(engine.name, engine_attr))
sys.exit(1)
@@ -100,9 +104,8 @@ def load_engine(engine_data):
categories['general'].append(engine)
if engine.shortcut:
- # TODO check duplications
if engine.shortcut in engine_shortcuts:
- print('[E] Engine config error: ambigious shortcut: {0}'
+ logger.error('Engine config error: ambigious shortcut: {0}'
.format(engine.shortcut))
sys.exit(1)
engine_shortcuts[engine.shortcut] = engine.name
@@ -199,7 +202,7 @@ def get_engines_stats():
if 'engines' not in settings or not settings['engines']:
- print '[E] Error no engines found. Edit your settings.yml'
+ logger.error('No engines found. Edit your settings.yml')
exit(2)
for engine_data in settings['engines']:
diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py
index a5bffa866..03b1dbb8b 100644
--- a/searx/engines/dailymotion.py
+++ b/searx/engines/dailymotion.py
@@ -6,12 +6,14 @@
# @using-api yes
# @results JSON
# @stable yes
-# @parse url, title, thumbnail
+# @parse url, title, thumbnail, publishedDate, embedded
#
# @todo set content-parameter with correct data
from urllib import urlencode
from json import loads
+from cgi import escape
+from datetime import datetime
# engine dependent config
categories = ['videos']
@@ -20,7 +22,9 @@ language_support = True
# search-url
# see http://www.dailymotion.com/doc/api/obj-video.html
-search_url = 'https://api.dailymotion.com/videos?fields=title,description,duration,url,thumbnail_360_url&sort=relevance&limit=5&page={pageno}&{query}' # noqa
+search_url = 'https://api.dailymotion.com/videos?fields=created_time,title,description,duration,url,thumbnail_360_url,id&sort=relevance&limit=5&page={pageno}&{query}' # noqa
+embedded_url = '<iframe frameborder="0" width="540" height="304" ' +\
+ 'data-src="//www.dailymotion.com/embed/video/{videoid}" allowfullscreen></iframe>'
# do search-request
@@ -51,14 +55,17 @@ def response(resp):
for res in search_res['list']:
title = res['title']
url = res['url']
- #content = res['description']
- content = ''
+ content = escape(res['description'])
thumbnail = res['thumbnail_360_url']
+ publishedDate = datetime.fromtimestamp(res['created_time'], None)
+ embedded = embedded_url.format(videoid=res['id'])
results.append({'template': 'videos.html',
'url': url,
'title': title,
'content': content,
+ 'publishedDate': publishedDate,
+ 'embedded': embedded,
'thumbnail': thumbnail})
# return results
diff --git a/searx/engines/deezer.py b/searx/engines/deezer.py
new file mode 100644
index 000000000..433ceffa1
--- /dev/null
+++ b/searx/engines/deezer.py
@@ -0,0 +1,61 @@
+## Deezer (Music)
+#
+# @website https://deezer.com
+# @provide-api yes (http://developers.deezer.com/api/)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title, content, embedded
+
+from json import loads
+from urllib import urlencode
+
+# engine dependent config
+categories = ['music']
+paging = True
+
+# search-url
+url = 'http://api.deezer.com/'
+search_url = url + 'search?{query}&index={offset}'
+
+embedded_url = '<iframe scrolling="no" frameborder="0" allowTransparency="true" ' +\
+ 'data-src="http://www.deezer.com/plugins/player?type=tracks&id={audioid}" ' +\
+ 'width="540" height="80"></iframe>'
+
+
+# do search-request
+def request(query, params):
+ offset = (params['pageno'] - 1) * 25
+
+ params['url'] = search_url.format(query=urlencode({'q': query}),
+ offset=offset)
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ search_res = loads(resp.text)
+
+ # parse results
+ for result in search_res.get('data', []):
+ if result['type'] == 'track':
+ title = result['title']
+ url = result['link']
+ content = result['artist']['name'] +\
+ " &bull; " +\
+ result['album']['title'] +\
+ " &bull; " + result['title']
+ embedded = embedded_url.format(audioid=result['id'])
+
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'embedded': embedded,
+ 'content': content})
+
+ # return results
+ return results
diff --git a/searx/engines/digg.py b/searx/engines/digg.py
index 241234fdb..8c457d6b9 100644
--- a/searx/engines/digg.py
+++ b/searx/engines/digg.py
@@ -44,6 +44,9 @@ def response(resp):
search_result = loads(resp.text)
+ if search_result['html'] == '':
+ return results
+
dom = html.fromstring(search_result['html'])
# parse results
diff --git a/searx/engines/flickr-noapi.py b/searx/engines/flickr-noapi.py
index aa2fa5d3b..89dd2ee5f 100644
--- a/searx/engines/flickr-noapi.py
+++ b/searx/engines/flickr-noapi.py
@@ -73,7 +73,7 @@ def response(resp):
url = build_flickr_url(photo['owner']['id'], photo['id'])
- title = photo['title']
+ title = photo.get('title', '')
content = '<span class="photo-author">' +\
photo['owner']['username'] +\
diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
index 16e9d6de6..a4d270673 100644
--- a/searx/engines/kickass.py
+++ b/searx/engines/kickass.py
@@ -24,6 +24,7 @@ search_url = url + 'search/{search_term}/{pageno}/'
# specific xpath variables
magnet_xpath = './/a[@title="Torrent magnet link"]'
+torrent_xpath = './/a[@title="Download torrent file"]'
content_xpath = './/span[@class="font11px lightgrey block"]'
@@ -60,6 +61,9 @@ def response(resp):
method="text"))
seed = result.xpath('.//td[contains(@class, "green")]/text()')[0]
leech = result.xpath('.//td[contains(@class, "red")]/text()')[0]
+ filesize = result.xpath('.//td[contains(@class, "nobr")]/text()')[0]
+ filesize_multiplier = result.xpath('.//td[contains(@class, "nobr")]//span/text()')[0]
+ files = result.xpath('.//td[contains(@class, "center")][2]/text()')[0]
# convert seed to int if possible
if seed.isdigit():
@@ -73,15 +77,42 @@ def response(resp):
else:
leech = 0
+ # convert filesize to byte if possible
+ try:
+ filesize = float(filesize)
+
+ # convert filesize to byte
+ if filesize_multiplier == 'TB':
+ filesize = int(filesize * 1024 * 1024 * 1024 * 1024)
+ elif filesize_multiplier == 'GB':
+ filesize = int(filesize * 1024 * 1024 * 1024)
+ elif filesize_multiplier == 'MB':
+ filesize = int(filesize * 1024 * 1024)
+ elif filesize_multiplier == 'kb':
+ filesize = int(filesize * 1024)
+ except:
+ filesize = None
+
+ # convert files to int if possible
+ if files.isdigit():
+ files = int(files)
+ else:
+ files = None
+
magnetlink = result.xpath(magnet_xpath)[0].attrib['href']
+ torrentfile = result.xpath(torrent_xpath)[0].attrib['href']
+
# append result
results.append({'url': href,
'title': title,
'content': content,
'seed': seed,
'leech': leech,
+ 'filesize': filesize,
+ 'files': files,
'magnetlink': magnetlink,
+ 'torrentfile': torrentfile,
'template': 'torrent.html'})
# return results sorted by seeder
diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py
index 164a569a3..44374af6f 100644
--- a/searx/engines/soundcloud.py
+++ b/searx/engines/soundcloud.py
@@ -6,10 +6,11 @@
# @using-api yes
# @results JSON
# @stable yes
-# @parse url, title, content
+# @parse url, title, content, publishedDate, embedded
from json import loads
-from urllib import urlencode
+from urllib import urlencode, quote_plus
+from dateutil import parser
# engine dependent config
categories = ['music']
@@ -27,6 +28,10 @@ search_url = url + 'search?{query}'\
'&linked_partitioning=1'\
'&client_id={client_id}' # noqa
+embedded_url = '<iframe width="100%" height="166" ' +\
+ 'scrolling="no" frameborder="no" ' +\
+ 'data-src="https://w.soundcloud.com/player/?url={uri}"></iframe>'
+
# do search-request
def request(query, params):
@@ -50,10 +55,15 @@ def response(resp):
if result['kind'] in ('track', 'playlist'):
title = result['title']
content = result['description']
+ publishedDate = parser.parse(result['last_modified'])
+ uri = quote_plus(result['uri'])
+ embedded = embedded_url.format(uri=uri)
# append result
results.append({'url': result['permalink_url'],
'title': title,
+ 'publishedDate': publishedDate,
+ 'embedded': embedded,
'content': content})
# return results
diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py
index 16da728cd..70b193952 100644
--- a/searx/engines/startpage.py
+++ b/searx/engines/startpage.py
@@ -66,7 +66,10 @@ def response(resp):
continue
link = links[0]
url = link.attrib.get('href')
- title = escape(link.text_content())
+ try:
+ title = escape(link.text_content())
+ except UnicodeDecodeError:
+ continue
# block google-ad url's
if re.match("^http(s|)://www.google.[a-z]+/aclk.*$", url):
diff --git a/searx/engines/vimeo.py b/searx/engines/vimeo.py
index c66c4148a..39033c591 100644
--- a/searx/engines/vimeo.py
+++ b/searx/engines/vimeo.py
@@ -1,4 +1,4 @@
-## Vimeo (Videos)
+# Vimeo (Videos)
#
# @website https://vimeo.com/
# @provide-api yes (http://developer.vimeo.com/api),
@@ -7,14 +7,14 @@
# @using-api no (TODO, rewrite to api)
# @results HTML (using search portal)
# @stable no (HTML can change)
-# @parse url, title, publishedDate, thumbnail
+# @parse url, title, publishedDate, thumbnail, embedded
#
# @todo rewrite to api
# @todo set content-parameter with correct data
from urllib import urlencode
-from HTMLParser import HTMLParser
from lxml import html
+from HTMLParser import HTMLParser
from searx.engines.xpath import extract_text
from dateutil import parser
@@ -23,26 +23,26 @@ categories = ['videos']
paging = True
# search-url
-base_url = 'https://vimeo.com'
+base_url = 'http://vimeo.com'
search_url = base_url + '/search/page:{pageno}?{query}'
# specific xpath variables
+results_xpath = '//div[@id="browse_content"]/ol/li'
url_xpath = './a/@href'
+title_xpath = './a/div[@class="data"]/p[@class="title"]'
content_xpath = './a/img/@src'
-title_xpath = './a/div[@class="data"]/p[@class="title"]/text()'
-results_xpath = '//div[@id="browse_content"]/ol/li'
publishedDate_xpath = './/p[@class="meta"]//attribute::datetime'
+embedded_url = '<iframe data-src="//player.vimeo.com/video{videoid}" ' +\
+ 'width="540" height="304" frameborder="0" ' +\
+ 'webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>'
+
# do search-request
def request(query, params):
params['url'] = search_url.format(pageno=params['pageno'],
query=urlencode({'q': query}))
- # TODO required?
- params['cookies']['__utma'] =\
- '00000000.000#0000000.0000000000.0000000000.0000000000.0'
-
return params
@@ -51,16 +51,17 @@ def response(resp):
results = []
dom = html.fromstring(resp.text)
-
p = HTMLParser()
# parse results
for result in dom.xpath(results_xpath):
- url = base_url + result.xpath(url_xpath)[0]
+ videoid = result.xpath(url_xpath)[0]
+ url = base_url + videoid
title = p.unescape(extract_text(result.xpath(title_xpath)))
thumbnail = extract_text(result.xpath(content_xpath)[0])
publishedDate = parser.parse(extract_text(
result.xpath(publishedDate_xpath)[0]))
+ embedded = embedded_url.format(videoid=videoid)
# append result
results.append({'url': url,
@@ -68,6 +69,7 @@ def response(resp):
'content': '',
'template': 'videos.html',
'publishedDate': publishedDate,
+ 'embedded': embedded,
'thumbnail': thumbnail})
# return results
diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py
index df976ae35..da3152ac8 100644
--- a/searx/engines/wikidata.py
+++ b/searx/engines/wikidata.py
@@ -1,8 +1,7 @@
import json
from requests import get
from urllib import urlencode
-import locale
-import dateutil.parser
+from searx.utils import format_date_by_locale
result_count = 1
wikidata_host = 'https://www.wikidata.org'
@@ -38,27 +37,18 @@ def response(resp):
if language == 'all':
language = 'en'
- try:
- locale.setlocale(locale.LC_ALL, str(resp.search_params['language']))
- except:
- try:
- locale.setlocale(locale.LC_ALL, 'en_US')
- except:
- pass
- pass
-
url = url_detail.format(query=urlencode({'ids': '|'.join(wikidata_ids),
'languages': language + '|en'}))
htmlresponse = get(url)
jsonresponse = json.loads(htmlresponse.content)
for wikidata_id in wikidata_ids:
- results = results + getDetail(jsonresponse, wikidata_id, language)
+ results = results + getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'])
return results
-def getDetail(jsonresponse, wikidata_id, language):
+def getDetail(jsonresponse, wikidata_id, language, locale):
results = []
urls = []
attributes = []
@@ -176,12 +166,12 @@ def getDetail(jsonresponse, wikidata_id, language):
date_of_birth = get_time(claims, 'P569', None)
if date_of_birth is not None:
- date_of_birth = dateutil.parser.parse(date_of_birth[8:]).strftime(locale.nl_langinfo(locale.D_FMT))
+ date_of_birth = format_date_by_locale(date_of_birth[8:], locale)
attributes.append({'label': 'Date of birth', 'value': date_of_birth})
date_of_death = get_time(claims, 'P570', None)
if date_of_death is not None:
- date_of_death = dateutil.parser.parse(date_of_death[8:]).strftime(locale.nl_langinfo(locale.D_FMT))
+ date_of_death = format_date_by_locale(date_of_death[8:], locale)
attributes.append({'label': 'Date of death', 'value': date_of_death})
if len(attributes) == 0 and len(urls) == 2 and len(description) == 0:
@@ -235,7 +225,7 @@ def get_string(claims, propertyName, defaultValue=None):
if len(result) == 0:
return defaultValue
else:
- #TODO handle multiple urls
+ # TODO handle multiple urls
return result[0]
diff --git a/searx/engines/youtube.py b/searx/engines/youtube.py
index 973e799f8..59f07c574 100644
--- a/searx/engines/youtube.py
+++ b/searx/engines/youtube.py
@@ -6,7 +6,7 @@
# @using-api yes
# @results JSON
# @stable yes
-# @parse url, title, content, publishedDate, thumbnail
+# @parse url, title, content, publishedDate, thumbnail, embedded
from json import loads
from urllib import urlencode
@@ -19,7 +19,11 @@ language_support = True
# search-url
base_url = 'https://gdata.youtube.com/feeds/api/videos'
-search_url = base_url + '?alt=json&{query}&start-index={index}&max-results=5' # noqa
+search_url = base_url + '?alt=json&{query}&start-index={index}&max-results=5'
+
+embedded_url = '<iframe width="540" height="304" ' +\
+ 'data-src="//www.youtube-nocookie.com/embed/{videoid}" ' +\
+ 'frameborder="0" allowfullscreen></iframe>'
# do search-request
@@ -60,6 +64,8 @@ def response(resp):
if url.endswith('&'):
url = url[:-1]
+ videoid = url[32:]
+
title = result['title']['$t']
content = ''
thumbnail = ''
@@ -72,12 +78,15 @@ def response(resp):
content = result['content']['$t']
+ embedded = embedded_url.format(videoid=videoid)
+
# append result
results.append({'url': url,
'title': title,
'content': content,
'template': 'videos.html',
'publishedDate': publishedDate,
+ 'embedded': embedded,
'thumbnail': thumbnail})
# return results