summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/500px.py57
-rw-r--r--searx/engines/__init__.py23
-rw-r--r--searx/engines/bing_news.py11
-rw-r--r--searx/engines/digg.py67
-rw-r--r--searx/engines/duckduckgo_definitions.py6
-rw-r--r--searx/engines/faroo.py15
-rw-r--r--searx/engines/flickr-noapi.py95
-rw-r--r--searx/engines/flickr.py91
-rw-r--r--searx/engines/google_images.py2
-rw-r--r--searx/engines/kickass.py88
-rw-r--r--searx/engines/mediawiki.py11
-rw-r--r--searx/engines/openstreetmap.py14
-rw-r--r--searx/engines/photon.py132
-rw-r--r--searx/engines/piratebay.py2
-rw-r--r--searx/engines/searchcode_code.py65
-rw-r--r--searx/engines/searchcode_doc.py56
-rw-r--r--searx/engines/soundcloud.py7
-rw-r--r--searx/engines/subtitleseeker.py78
-rw-r--r--searx/engines/twitter.py27
-rw-r--r--searx/engines/yacy.py29
-rw-r--r--searx/engines/yahoo.py9
21 files changed, 802 insertions, 83 deletions
diff --git a/searx/engines/500px.py b/searx/engines/500px.py
new file mode 100644
index 000000000..3b95619a1
--- /dev/null
+++ b/searx/engines/500px.py
@@ -0,0 +1,57 @@
+## 500px (Images)
+#
+# @website https://500px.com
+# @provide-api yes (https://developers.500px.com/)
+#
+# @using-api no
+# @results HTML
+# @stable no (HTML can change)
+# @parse url, title, thumbnail, img_src, content
+#
+# @todo rewrite to api
+
+
+from urllib import urlencode
+from urlparse import urljoin
+from lxml import html
+
+# engine dependent config
+categories = ['images']
+paging = True
+
+# search-url
+base_url = 'https://500px.com'
+search_url = base_url+'/search?search?page={pageno}&type=photos&{query}'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(pageno=params['pageno'],
+ query=urlencode({'q': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ # parse results
+ for result in dom.xpath('//div[@class="photo"]'):
+ link = result.xpath('.//a')[0]
+ url = urljoin(base_url, link.attrib.get('href'))
+ title = result.xpath('.//div[@class="title"]//text()')[0]
+ img_src = link.xpath('.//img')[0].attrib['src']
+ content = result.xpath('.//div[@class="info"]//text()')[0]
+
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'img_src': img_src,
+ 'content': content,
+ 'template': 'images.html'})
+
+ # return results
+ return results
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 80356a8cd..9bc5cdfd4 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -41,11 +41,8 @@ def load_module(filename):
module.name = modname
return module
-if 'engines' not in settings or not settings['engines']:
- print '[E] Error no engines found. Edit your settings.yml'
- exit(2)
-for engine_data in settings['engines']:
+def load_engine(engine_data):
engine_name = engine_data['engine']
engine = load_module(engine_name + '.py')
@@ -84,10 +81,10 @@ for engine_data in settings['engines']:
if engine_attr.startswith('_'):
continue
if getattr(engine, engine_attr) is None:
- print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr) # noqa
+ print('[E] Engine config error: Missing attribute "{0}.{1}"'
+ .format(engine.name, engine_attr))
sys.exit(1)
- engines[engine.name] = engine
engine.stats = {
'result_count': 0,
'search_count': 0,
@@ -104,7 +101,12 @@ for engine_data in settings['engines']:
if engine.shortcut:
# TODO check duplications
+ if engine.shortcut in engine_shortcuts:
+ print('[E] Engine config error: ambigious shortcut: {0}'
+ .format(engine.shortcut))
+ sys.exit(1)
engine_shortcuts[engine.shortcut] = engine.name
+ return engine
def get_engines_stats():
@@ -194,3 +196,12 @@ def get_engines_stats():
sorted(errors, key=itemgetter('avg'), reverse=True)
),
]
+
+
+if 'engines' not in settings or not settings['engines']:
+ print '[E] Error no engines found. Edit your settings.yml'
+ exit(2)
+
+for engine_data in settings['engines']:
+ engine = load_engine(engine_data)
+ engines[engine.name] = engine
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index 5dce4a2b2..3dda04cbb 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -57,12 +57,16 @@ def response(resp):
link = result.xpath('.//div[@class="newstitle"]/a')[0]
url = link.attrib.get('href')
title = ' '.join(link.xpath('.//text()'))
- contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
+ contentXPath = result.xpath('.//div[@class="sn_txt"]/div'
+ '//span[@class="sn_snip"]//text()')
if contentXPath is not None:
content = escape(' '.join(contentXPath))
# parse publishedDate
- publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
+ publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
+ '//span[contains(@class,"sn_ST")]'
+ '//span[contains(@class,"sn_tm")]'
+ '//text()')
if publishedDateXPath is not None:
publishedDate = escape(' '.join(publishedDateXPath))
@@ -74,7 +78,8 @@ def response(resp):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))
- elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
+ elif re.match("^[0-9]+ hour(s|),"
+ " [0-9]+ minute(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))\
diff --git a/searx/engines/digg.py b/searx/engines/digg.py
new file mode 100644
index 000000000..241234fdb
--- /dev/null
+++ b/searx/engines/digg.py
@@ -0,0 +1,67 @@
+## Digg (News, Social media)
+#
+# @website https://digg.com/
+# @provide-api no
+#
+# @using-api no
+# @results HTML (using search portal)
+# @stable no (HTML can change)
+# @parse url, title, content, publishedDate, thumbnail
+
+from urllib import quote_plus
+from json import loads
+from lxml import html
+from cgi import escape
+from dateutil import parser
+
+# engine dependent config
+categories = ['news', 'social media']
+paging = True
+
+# search-url
+base_url = 'https://digg.com/'
+search_url = base_url+'api/search/{query}.json?position={position}&format=html'
+
+# specific xpath variables
+results_xpath = '//article'
+link_xpath = './/small[@class="time"]//a'
+title_xpath = './/h2//a//text()'
+content_xpath = './/p//text()'
+pubdate_xpath = './/time'
+
+
+# do search-request
+def request(query, params):
+ offset = (params['pageno'] - 1) * 10
+ params['url'] = search_url.format(position=offset,
+ query=quote_plus(query))
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ search_result = loads(resp.text)
+
+ dom = html.fromstring(search_result['html'])
+
+ # parse results
+ for result in dom.xpath(results_xpath):
+ url = result.attrib.get('data-contenturl')
+ thumbnail = result.xpath('.//img')[0].attrib.get('src')
+ title = ''.join(result.xpath(title_xpath))
+ content = escape(''.join(result.xpath(content_xpath)))
+ pubdate = result.xpath(pubdate_xpath)[0].attrib.get('datetime')
+ publishedDate = parser.parse(pubdate)
+
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'content': content,
+ 'template': 'videos.html',
+ 'publishedDate': publishedDate,
+ 'thumbnail': thumbnail})
+
+ # return results
+ return results
diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py
index 8f81d2c8e..b66d6c0f2 100644
--- a/searx/engines/duckduckgo_definitions.py
+++ b/searx/engines/duckduckgo_definitions.py
@@ -1,6 +1,7 @@
import json
from urllib import urlencode
from lxml import html
+from searx.utils import html_to_text
from searx.engines.xpath import extract_text
url = 'https://api.duckduckgo.com/'\
@@ -17,11 +18,6 @@ def result_to_text(url, text, htmlResult):
return text
-def html_to_text(htmlFragment):
- dom = html.fromstring(htmlFragment)
- return extract_text(dom)
-
-
def request(query, params):
# TODO add kl={locale}
params['url'] = url.format(query=urlencode({'q': query}))
diff --git a/searx/engines/faroo.py b/searx/engines/faroo.py
index dada4758d..5360ea156 100644
--- a/searx/engines/faroo.py
+++ b/searx/engines/faroo.py
@@ -22,10 +22,17 @@ api_key = None
# search-url
url = 'http://www.faroo.com/'
-search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
+search_url = url + 'api?{query}'\
+ '&start={offset}'\
+ '&length={number_of_results}'\
+ '&l={language}'\
+ '&src={categorie}'\
+ '&i=false'\
+ '&f=json'\
+ '&key={api_key}' # noqa
search_category = {'general': 'web',
- 'news': 'news'}
+ 'news': 'news'}
# do search-request
@@ -80,8 +87,8 @@ def response(resp):
# parse results
for result in search_res['results']:
if result['news']:
- # timestamp (how many milliseconds have passed between now and the beginning of 1970)
- publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0)
+ # timestamp (milliseconds since 1970)
+ publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0) # noqa
# append news result
results.append({'url': result['url'],
diff --git a/searx/engines/flickr-noapi.py b/searx/engines/flickr-noapi.py
new file mode 100644
index 000000000..f90903647
--- /dev/null
+++ b/searx/engines/flickr-noapi.py
@@ -0,0 +1,95 @@
+#!/usr/bin/env python
+
+# Flickr (Images)
+#
+# @website https://www.flickr.com
+# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
+#
+# @using-api no
+# @results HTML
+# @stable no
+# @parse url, title, thumbnail, img_src
+
+from urllib import urlencode
+from json import loads
+import re
+
+categories = ['images']
+
+url = 'https://secure.flickr.com/'
+search_url = url+'search/?{query}&page={page}'
+photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
+regex = re.compile(r"\"search-photos-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL)
+image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's')
+
+paging = True
+
+
+def build_flickr_url(user_id, photo_id):
+ return photo_url.format(userid=user_id, photoid=photo_id)
+
+
+def request(query, params):
+ params['url'] = search_url.format(query=urlencode({'text': query}),
+ page=params['pageno'])
+ return params
+
+
+def response(resp):
+ results = []
+
+ matches = regex.search(resp.text)
+
+ if matches is None:
+ return results
+
+ match = matches.group(1)
+ search_results = loads(match)
+
+ if '_data' not in search_results:
+ return []
+
+ photos = search_results['_data']
+
+ for photo in photos:
+
+ # In paged configuration, the first pages' photos
+ # are represented by a None object
+ if photo is None:
+ continue
+
+ img_src = None
+ # From the biggest to the lowest format
+ for image_size in image_sizes:
+ if image_size in photo['sizes']:
+ img_src = photo['sizes'][image_size]['displayUrl']
+ break
+
+ if not img_src:
+ continue
+
+ if 'id' not in photo['owner']:
+ continue
+
+ url = build_flickr_url(photo['owner']['id'], photo['id'])
+
+ title = photo['title']
+
+ content = '<span class="photo-author">' +\
+ photo['owner']['username'] +\
+ '</span><br />'
+
+ if 'description' in photo:
+ content = content +\
+ '<span class="description">' +\
+ photo['description'] +\
+ '</span>'
+
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'img_src': img_src,
+ 'content': content,
+ 'template': 'images.html'})
+
+ return results
diff --git a/searx/engines/flickr.py b/searx/engines/flickr.py
index 4ec2841dd..4dadd80a6 100644
--- a/searx/engines/flickr.py
+++ b/searx/engines/flickr.py
@@ -1,54 +1,87 @@
#!/usr/bin/env python
+## Flickr (Images)
+#
+# @website https://www.flickr.com
+# @provide-api yes (https://secure.flickr.com/services/api/flickr.photos.search.html)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title, thumbnail, img_src
+#More info on api-key : https://www.flickr.com/services/apps/create/
+
from urllib import urlencode
-#from json import loads
-from urlparse import urljoin
-from lxml import html
-from time import time
+from json import loads
categories = ['images']
-url = 'https://secure.flickr.com/'
-search_url = url+'search/?{query}&page={page}'
-results_xpath = '//div[@class="view display-item-tile"]/figure/div'
+nb_per_page = 15
+paging = True
+api_key = None
+
+
+url = 'https://api.flickr.com/services/rest/?method=flickr.photos.search' +\
+ '&api_key={api_key}&{text}&sort=relevance' +\
+ '&extras=description%2C+owner_name%2C+url_o%2C+url_z' +\
+ '&per_page={nb_per_page}&format=json&nojsoncallback=1&page={page}'
+photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}'
paging = True
+def build_flickr_url(user_id, photo_id):
+ return photo_url.format(userid=user_id, photoid=photo_id)
+
+
def request(query, params):
- params['url'] = search_url.format(query=urlencode({'text': query}),
- page=params['pageno'])
- time_string = str(int(time())-3)
- params['cookies']['BX'] = '3oqjr6d9nmpgl&b=3&s=dh'
- params['cookies']['xb'] = '421409'
- params['cookies']['localization'] = 'en-us'
- params['cookies']['flrbp'] = time_string +\
- '-3a8cdb85a427a33efda421fbda347b2eaf765a54'
- params['cookies']['flrbs'] = time_string +\
- '-ed142ae8765ee62c9ec92a9513665e0ee1ba6776'
- params['cookies']['flrb'] = '9'
+ params['url'] = url.format(text=urlencode({'text': query}),
+ api_key=api_key,
+ nb_per_page=nb_per_page,
+ page=params['pageno'])
return params
def response(resp):
results = []
- dom = html.fromstring(resp.text)
- for result in dom.xpath(results_xpath):
- img = result.xpath('.//img')
- if not img:
- continue
+ search_results = loads(resp.text)
- img = img[0]
- img_src = 'https:'+img.attrib.get('src')
+ # return empty array if there are no results
+ if not 'photos' in search_results:
+ return []
- if not img_src:
+ if not 'photo' in search_results['photos']:
+ return []
+
+ photos = search_results['photos']['photo']
+
+ # parse results
+ for photo in photos:
+ if 'url_o' in photo:
+ img_src = photo['url_o']
+ elif 'url_z' in photo:
+ img_src = photo['url_z']
+ else:
continue
- href = urljoin(url, result.xpath('.//a')[0].attrib.get('href'))
- title = img.attrib.get('alt', '')
- results.append({'url': href,
+ url = build_flickr_url(photo['owner'], photo['id'])
+
+ title = photo['title']
+
+ content = '<span class="photo-author">' +\
+ photo['ownername'] +\
+ '</span><br />' +\
+ '<span class="description">' +\
+ photo['description']['_content'] +\
+ '</span>'
+
+ # append result
+ results.append({'url': url,
'title': title,
'img_src': img_src,
+ 'content': content,
'template': 'images.html'})
+
+ # return results
return results
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index 491f5c2c2..79fac3fb0 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -9,7 +9,7 @@
# @stable yes (but deprecated)
# @parse url, title, img_src
-from urllib import urlencode,unquote
+from urllib import urlencode, unquote
from json import loads
# engine dependent config
diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
new file mode 100644
index 000000000..16e9d6de6
--- /dev/null
+++ b/searx/engines/kickass.py
@@ -0,0 +1,88 @@
+## Kickass Torrent (Videos, Music, Files)
+#
+# @website https://kickass.so
+# @provide-api no (nothing found)
+#
+# @using-api no
+# @results HTML (using search portal)
+# @stable yes (HTML can change)
+# @parse url, title, content, seed, leech, magnetlink
+
+from urlparse import urljoin
+from cgi import escape
+from urllib import quote
+from lxml import html
+from operator import itemgetter
+
+# engine dependent config
+categories = ['videos', 'music', 'files']
+paging = True
+
+# search-url
+url = 'https://kickass.so/'
+search_url = url + 'search/{search_term}/{pageno}/'
+
+# specific xpath variables
+magnet_xpath = './/a[@title="Torrent magnet link"]'
+content_xpath = './/span[@class="font11px lightgrey block"]'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(search_term=quote(query),
+ pageno=params['pageno'])
+
+ # FIX: SSLError: hostname 'kickass.so'
+ # doesn't match either of '*.kickass.to', 'kickass.to'
+ params['verify'] = False
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ search_res = dom.xpath('//table[@class="data"]//tr')
+
+ # return empty array if nothing is found
+ if not search_res:
+ return []
+
+ # parse results
+ for result in search_res[1:]:
+ link = result.xpath('.//a[@class="cellMainLink"]')[0]
+ href = urljoin(url, link.attrib['href'])
+ title = ' '.join(link.xpath('.//text()'))
+ content = escape(html.tostring(result.xpath(content_xpath)[0],
+ method="text"))
+ seed = result.xpath('.//td[contains(@class, "green")]/text()')[0]
+ leech = result.xpath('.//td[contains(@class, "red")]/text()')[0]
+
+ # convert seed to int if possible
+ if seed.isdigit():
+ seed = int(seed)
+ else:
+ seed = 0
+
+ # convert leech to int if possible
+ if leech.isdigit():
+ leech = int(leech)
+ else:
+ leech = 0
+
+ magnetlink = result.xpath(magnet_xpath)[0].attrib['href']
+
+ # append result
+ results.append({'url': href,
+ 'title': title,
+ 'content': content,
+ 'seed': seed,
+ 'leech': leech,
+ 'magnetlink': magnetlink,
+ 'template': 'torrent.html'})
+
+ # return results sorted by seeder
+ return sorted(results, key=itemgetter('seed'), reverse=True)
diff --git a/searx/engines/mediawiki.py b/searx/engines/mediawiki.py
index 4a8b0e8b8..8ca32c62a 100644
--- a/searx/engines/mediawiki.py
+++ b/searx/engines/mediawiki.py
@@ -28,15 +28,17 @@ search_url = base_url + 'w/api.php?action=query'\
'&srprop=timestamp'\
'&format=json'\
'&sroffset={offset}'\
- '&srlimit={limit}'
+ '&srlimit={limit}' # noqa
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
+
string_args = dict(query=urlencode({'srsearch': query}),
- offset=offset,
- limit=number_of_results)
+ offset=offset,
+ limit=number_of_results)
+
format_strings = list(Formatter().parse(base_url))
if params['language'] == 'all':
@@ -67,7 +69,8 @@ def response(resp):
# parse results
for result in search_results['query']['search']:
- url = base_url.format(language=resp.search_params['language']) + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
+ url = base_url.format(language=resp.search_params['language']) +\
+ 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
# append result
results.append({'url': url,
diff --git a/searx/engines/openstreetmap.py b/searx/engines/openstreetmap.py
index 36b6011e7..68446ef5f 100644
--- a/searx/engines/openstreetmap.py
+++ b/searx/engines/openstreetmap.py
@@ -9,20 +9,24 @@
# @parse url, title
from json import loads
+from searx.utils import searx_useragent
# engine dependent config
categories = ['map']
paging = False
# search-url
-url = 'https://nominatim.openstreetmap.org/search/{query}?format=json&polygon_geojson=1&addressdetails=1'
-
+base_url = 'https://nominatim.openstreetmap.org/'
+search_string = 'search/{query}?format=json&polygon_geojson=1&addressdetails=1'
result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
# do search-request
def request(query, params):
- params['url'] = url.format(query=query)
+ params['url'] = base_url + search_string.format(query=query)
+
+ # using searx User-Agent
+ params['headers']['User-Agent'] = searx_useragent()
return params
@@ -68,8 +72,8 @@ def response(resp):
address.update({'house_number': address_raw.get('house_number'),
'road': address_raw.get('road'),
'locality': address_raw.get('city',
- address_raw.get('town',
- address_raw.get('village'))),
+ address_raw.get('town', # noqa
+ address_raw.get('village'))), # noqa
'postcode': address_raw.get('postcode'),
'country': address_raw.get('country'),
'country_code': address_raw.get('country_code')})
diff --git a/searx/engines/photon.py b/searx/engines/photon.py
new file mode 100644
index 000000000..16340d24a
--- /dev/null
+++ b/searx/engines/photon.py
@@ -0,0 +1,132 @@
+## Photon (Map)
+#
+# @website https://photon.komoot.de
+# @provide-api yes (https://photon.komoot.de/)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title
+
+from urllib import urlencode
+from json import loads
+from searx.utils import searx_useragent
+
+# engine dependent config
+categories = ['map']
+paging = False
+language_support = True
+number_of_results = 10
+
+# search-url
+base_url = 'https://photon.komoot.de/'
+search_string = 'api/?{query}&limit={limit}'
+result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
+
+# list of supported languages
+allowed_languages = ['de', 'en', 'fr', 'it']
+
+
+# do search-request
+def request(query, params):
+ params['url'] = base_url +\
+ search_string.format(query=urlencode({'q': query}),
+ limit=number_of_results)
+
+ if params['language'] != 'all':
+ language = params['language'].split('_')[0]
+ if language in allowed_languages:
+ params['url'] = params['url'] + "&lang=" + language
+
+ # using searx User-Agent
+ params['headers']['User-Agent'] = searx_useragent()
+
+ # FIX: SSLError: SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
+ params['verify'] = False
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+ json = loads(resp.text)
+
+ # parse results
+ for r in json.get('features', {}):
+
+ properties = r.get('properties')
+
+ if not properties:
+ continue
+
+ # get title
+ title = properties['name']
+
+ # get osm-type
+ if properties.get('osm_type') == 'N':
+ osm_type = 'node'
+ elif properties.get('osm_type') == 'W':
+ osm_type = 'way'
+ elif properties.get('osm_type') == 'R':
+ osm_type = 'relation'
+ else:
+ # continue if invalide osm-type
+ continue
+
+ url = result_base_url.format(osm_type=osm_type,
+ osm_id=properties.get('osm_id'))
+
+ osm = {'type': osm_type,
+ 'id': properties.get('osm_id')}
+
+ geojson = r.get('geometry')
+
+ if properties.get('extent'):
+ boundingbox = [properties.get('extent')[3],
+ properties.get('extent')[1],
+ properties.get('extent')[0],
+ properties.get('extent')[2]]
+ else:
+ # TODO: better boundingbox calculation
+ boundingbox = [geojson['coordinates'][1],
+ geojson['coordinates'][1],
+ geojson['coordinates'][0],
+ geojson['coordinates'][0]]
+
+ # address calculation
+ address = {}
+
+ # get name
+ if properties.get('osm_key') == 'amenity' or\
+ properties.get('osm_key') == 'shop' or\
+ properties.get('osm_key') == 'tourism' or\
+ properties.get('osm_key') == 'leisure':
+ address = {'name': properties.get('name')}
+
+ # add rest of adressdata, if something is already found
+ if address.get('name'):
+ address.update({'house_number': properties.get('housenumber'),
+ 'road': properties.get('street'),
+ 'locality': properties.get('city',
+ properties.get('town', # noqa
+ properties.get('village'))), # noqa
+ 'postcode': properties.get('postcode'),
+ 'country': properties.get('country')})
+ else:
+ address = None
+
+ # append result
+ results.append({'template': 'map.html',
+ 'title': title,
+ 'content': '',
+ 'longitude': geojson['coordinates'][0],
+ 'latitude': geojson['coordinates'][1],
+ 'boundingbox': boundingbox,
+ 'geojson': geojson,
+ 'address': address,
+ 'osm': osm,
+ 'url': url})
+
+ # return results
+ return results
diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py
index 14905dc83..f6144faa2 100644
--- a/searx/engines/piratebay.py
+++ b/searx/engines/piratebay.py
@@ -19,7 +19,7 @@ categories = ['videos', 'music', 'files']
paging = True
# search-url
-url = 'https://thepiratebay.se/'
+url = 'https://thepiratebay.cr/'
search_url = url + 'search/{search_term}/{pageno}/99/{search_type}'
# piratebay specific type-definitions
diff --git a/searx/engines/searchcode_code.py b/searx/engines/searchcode_code.py
new file mode 100644
index 000000000..0f98352c1
--- /dev/null
+++ b/searx/engines/searchcode_code.py
@@ -0,0 +1,65 @@
+## Searchcode (It)
+#
+# @website https://searchcode.com/
+# @provide-api yes (https://searchcode.com/api/)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title, content
+
+from urllib import urlencode
+from json import loads
+import cgi
+
+# engine dependent config
+categories = ['it']
+paging = True
+
+# search-url
+url = 'https://searchcode.com/'
+search_url = url+'api/codesearch_I/?{query}&p={pageno}'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(query=urlencode({'q': query}),
+ pageno=params['pageno']-1)
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ search_results = loads(resp.text)
+
+ # parse results
+ for result in search_results['results']:
+ href = result['url']
+ title = "" + result['name'] + " - " + result['filename']
+ content = result['repo'] + "<br />"
+
+ lines = dict()
+ for line, code in result['lines'].items():
+ lines[int(line)] = code
+
+ content = content + '<pre class="code-formatter"><table class="code">'
+ for line, code in sorted(lines.items()):
+ content = content + '<tr><td class="line-number" style="padding-right:5px;">'
+ content = content + str(line) + '</td><td class="code-snippet">'
+ # Replace every two spaces with ' &nbps;' to keep formatting
+ # while allowing the browser to break the line if necessary
+ content = content + cgi.escape(code).replace('\t', ' ').replace(' ', '&nbsp; ').replace(' ', ' &nbsp;')
+ content = content + "</td></tr>"
+
+ content = content + "</table></pre>"
+
+ # append result
+ results.append({'url': href,
+ 'title': title,
+ 'content': content})
+
+ # return results
+ return results
diff --git a/searx/engines/searchcode_doc.py b/searx/engines/searchcode_doc.py
new file mode 100644
index 000000000..b5b7159be
--- /dev/null
+++ b/searx/engines/searchcode_doc.py
@@ -0,0 +1,56 @@
+## Searchcode (It)
+#
+# @website https://searchcode.com/
+# @provide-api yes (https://searchcode.com/api/)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title, content
+
+from urllib import urlencode
+from json import loads
+
+# engine dependent config
+categories = ['it']
+paging = True
+
+# search-url
+url = 'https://searchcode.com/'
+search_url = url+'api/search_IV/?{query}&p={pageno}'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(query=urlencode({'q': query}),
+ pageno=params['pageno']-1)
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ search_results = loads(resp.text)
+
+ # parse results
+ for result in search_results['results']:
+ href = result['url']
+ title = "[" + result['type'] + "] " +\
+ result['namespace'] +\
+ " " + result['name']
+ content = '<span class="highlight">[' +\
+ result['type'] + "] " +\
+ result['name'] + " " +\
+ result['synopsis'] +\
+ "</span><br />" +\
+ result['description']
+
+ # append result
+ results.append({'url': href,
+ 'title': title,
+ 'content': content})
+
+ # return results
+ return results
diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py
index 390e7ca1f..164a569a3 100644
--- a/searx/engines/soundcloud.py
+++ b/searx/engines/soundcloud.py
@@ -20,7 +20,12 @@ guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
# search-url
url = 'https://api.soundcloud.com/'
-search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}'
+search_url = url + 'search?{query}'\
+ '&facet=model'\
+ '&limit=20'\
+ '&offset={offset}'\
+ '&linked_partitioning=1'\
+ '&client_id={client_id}' # noqa
# do search-request
diff --git a/searx/engines/subtitleseeker.py b/searx/engines/subtitleseeker.py
new file mode 100644
index 000000000..c413dcf26
--- /dev/null
+++ b/searx/engines/subtitleseeker.py
@@ -0,0 +1,78 @@
+## Subtitleseeker (Video)
+#
+# @website http://www.subtitleseeker.com
+# @provide-api no
+#
+# @using-api no
+# @results HTML
+# @stable no (HTML can change)
+# @parse url, title, content
+
+from cgi import escape
+from urllib import quote_plus
+from lxml import html
+from searx.languages import language_codes
+
+# engine dependent config
+categories = ['videos']
+paging = True
+language = ""
+
+# search-url
+url = 'http://www.subtitleseeker.com/'
+search_url = url+'search/TITLES/{query}&p={pageno}'
+
+# specific xpath variables
+results_xpath = '//div[@class="boxRows"]'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(query=quote_plus(query),
+ pageno=params['pageno'])
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ search_lang = ""
+
+ if resp.search_params['language'] != 'all':
+ search_lang = [lc[1]
+ for lc in language_codes
+ if lc[0][:2] == resp.search_params['language']][0]
+
+ # parse results
+ for result in dom.xpath(results_xpath):
+ link = result.xpath(".//a")[0]
+ href = link.attrib.get('href')
+
+ if language is not "":
+ href = href + language + '/'
+ elif search_lang:
+ href = href + search_lang + '/'
+
+ title = escape(link.xpath(".//text()")[0])
+
+ content = result.xpath('.//div[contains(@class,"red")]//text()')[0]
+ content = content + " - "
+ text = result.xpath('.//div[contains(@class,"grey-web")]')[0]
+ content = content + html.tostring(text, method='text')
+
+ if result.xpath(".//span") != []:
+ content = content +\
+ " - (" +\
+ result.xpath(".//span//text()")[0].strip() +\
+ ")"
+
+ # append result
+ results.append({'url': href,
+ 'title': title,
+ 'content': escape(content)})
+
+ # return results
+ return results
diff --git a/searx/engines/twitter.py b/searx/engines/twitter.py
index 0689150c8..bd9a8c2fc 100644
--- a/searx/engines/twitter.py
+++ b/searx/engines/twitter.py
@@ -1,6 +1,6 @@
## Twitter (Social media)
#
-# @website https://www.bing.com/news
+# @website https://twitter.com/
# @provide-api yes (https://dev.twitter.com/docs/using-search)
#
# @using-api no
@@ -14,6 +14,7 @@ from urlparse import urljoin
from urllib import urlencode
from lxml import html
from cgi import escape
+from datetime import datetime
# engine dependent config
categories = ['social media']
@@ -27,7 +28,8 @@ search_url = base_url+'search?'
results_xpath = '//li[@data-item-type="tweet"]'
link_xpath = './/small[@class="time"]//a'
title_xpath = './/span[@class="username js-action-profile-name"]//text()'
-content_xpath = './/p[@class="js-tweet-text tweet-text"]//text()'
+content_xpath = './/p[@class="js-tweet-text tweet-text"]'
+timestamp_xpath = './/span[contains(@class,"_timestamp")]'
# do search-request
@@ -52,12 +54,21 @@ def response(resp):
link = tweet.xpath(link_xpath)[0]
url = urljoin(base_url, link.attrib.get('href'))
title = ''.join(tweet.xpath(title_xpath))
- content = escape(''.join(tweet.xpath(content_xpath)))
-
- # append result
- results.append({'url': url,
- 'title': title,
- 'content': content})
+ content = escape(html.tostring(tweet.xpath(content_xpath)[0], method='text', encoding='UTF-8').decode("utf-8"))
+ pubdate = tweet.xpath(timestamp_xpath)
+ if len(pubdate) > 0:
+ timestamp = float(pubdate[0].attrib.get('data-time'))
+ publishedDate = datetime.fromtimestamp(timestamp, None)
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'content': content,
+ 'publishedDate': publishedDate})
+ else:
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'content': content})
# return results
return results
diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py
index 3ee0e91c7..4c4fac7df 100644
--- a/searx/engines/yacy.py
+++ b/searx/engines/yacy.py
@@ -24,7 +24,11 @@ number_of_results = 5
# search-url
base_url = 'http://localhost:8090'
-search_url = '/yacysearch.json?{query}&startRecord={offset}&maximumRecords={limit}&contentdom={search_type}&resource=global'
+search_url = '/yacysearch.json?{query}'\
+ '&startRecord={offset}'\
+ '&maximumRecords={limit}'\
+ '&contentdom={search_type}'\
+ '&resource=global' # noqa
# yacy specific type-definitions
search_types = {'general': 'text',
@@ -39,10 +43,11 @@ def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
search_type = search_types.get(params['category'], '0')
- params['url'] = base_url + search_url.format(query=urlencode({'query': query}),
- offset=offset,
- limit=number_of_results,
- search_type=search_type)
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'query': query}),
+ offset=offset,
+ limit=number_of_results,
+ search_type=search_type)
# add language tag if specified
if params['language'] != 'all':
@@ -70,19 +75,19 @@ def response(resp):
# append result
results.append({'url': result['link'],
- 'title': result['title'],
- 'content': result['description'],
- 'publishedDate': publishedDate})
+ 'title': result['title'],
+ 'content': result['description'],
+ 'publishedDate': publishedDate})
elif resp.search_params['category'] == 'images':
# parse image results
for result in search_results:
# append result
results.append({'url': result['url'],
- 'title': result['title'],
- 'content': '',
- 'img_src': result['image'],
- 'template': 'images.html'})
+ 'title': result['title'],
+ 'content': '',
+ 'img_src': result['image'],
+ 'template': 'images.html'})
#TODO parse video, audio and file results
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
index 938540ece..c6c5b0d0d 100644
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -20,7 +20,8 @@ paging = True
language_support = True
# search-url
-search_url = 'https://search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}'
+base_url = 'https://search.yahoo.com/'
+search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
# specific xpath variables
results_xpath = '//div[@class="res"]'
@@ -57,9 +58,9 @@ def request(query, params):
else:
language = params['language'].split('_')[0]
- params['url'] = search_url.format(offset=offset,
- query=urlencode({'p': query}),
- lang=language)
+ params['url'] = base_url + search_url.format(offset=offset,
+ query=urlencode({'p': query}),
+ lang=language)
# TODO required?
params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\