summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--searx/engines/bing_news.py11
-rw-r--r--searx/engines/faroo.py15
-rw-r--r--searx/engines/google_images.py2
-rw-r--r--searx/engines/kickass.py8
-rw-r--r--searx/engines/mediawiki.py11
-rw-r--r--searx/engines/openstreetmap.py14
-rw-r--r--searx/engines/photon.py128
-rw-r--r--searx/engines/soundcloud.py7
-rw-r--r--searx/engines/yacy.py29
-rw-r--r--searx/engines/yahoo.py9
-rw-r--r--searx/https_rewrite.py68
-rw-r--r--searx/https_rules/Soundcloud.xml2
-rw-r--r--searx/search.py39
-rw-r--r--searx/settings.yml4
-rw-r--r--searx/webapp.py59
15 files changed, 291 insertions, 115 deletions
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index 5dce4a2b2..3dda04cbb 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -57,12 +57,16 @@ def response(resp):
link = result.xpath('.//div[@class="newstitle"]/a')[0]
url = link.attrib.get('href')
title = ' '.join(link.xpath('.//text()'))
- contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
+ contentXPath = result.xpath('.//div[@class="sn_txt"]/div'
+ '//span[@class="sn_snip"]//text()')
if contentXPath is not None:
content = escape(' '.join(contentXPath))
# parse publishedDate
- publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
+ publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
+ '//span[contains(@class,"sn_ST")]'
+ '//span[contains(@class,"sn_tm")]'
+ '//text()')
if publishedDateXPath is not None:
publishedDate = escape(' '.join(publishedDateXPath))
@@ -74,7 +78,8 @@ def response(resp):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))
- elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
+ elif re.match("^[0-9]+ hour(s|),"
+ " [0-9]+ minute(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))\
diff --git a/searx/engines/faroo.py b/searx/engines/faroo.py
index dada4758d..5360ea156 100644
--- a/searx/engines/faroo.py
+++ b/searx/engines/faroo.py
@@ -22,10 +22,17 @@ api_key = None
# search-url
url = 'http://www.faroo.com/'
-search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
+search_url = url + 'api?{query}'\
+ '&start={offset}'\
+ '&length={number_of_results}'\
+ '&l={language}'\
+ '&src={categorie}'\
+ '&i=false'\
+ '&f=json'\
+ '&key={api_key}' # noqa
search_category = {'general': 'web',
- 'news': 'news'}
+ 'news': 'news'}
# do search-request
@@ -80,8 +87,8 @@ def response(resp):
# parse results
for result in search_res['results']:
if result['news']:
- # timestamp (how many milliseconds have passed between now and the beginning of 1970)
- publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0)
+ # timestamp (milliseconds since 1970)
+ publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0) # noqa
# append news result
results.append({'url': result['url'],
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index 491f5c2c2..79fac3fb0 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -9,7 +9,7 @@
# @stable yes (but deprecated)
# @parse url, title, img_src
-from urllib import urlencode,unquote
+from urllib import urlencode, unquote
from json import loads
# engine dependent config
diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
index de7324141..f1fcd9e1a 100644
--- a/searx/engines/kickass.py
+++ b/searx/engines/kickass.py
@@ -1,8 +1,8 @@
## Kickass Torrent (Videos, Music, Files)
-#
+#
# @website https://kickass.so
# @provide-api no (nothing found)
-#
+#
# @using-api no
# @results HTML (using search portal)
# @stable yes (HTML can change)
@@ -13,7 +13,6 @@ from cgi import escape
from urllib import quote
from lxml import html
from operator import itemgetter
-from dateutil import parser
# engine dependent config
categories = ['videos', 'music', 'files']
@@ -33,7 +32,8 @@ def request(query, params):
params['url'] = search_url.format(search_term=quote(query),
pageno=params['pageno'])
- # FIX: SSLError: hostname 'kickass.so' doesn't match either of '*.kickass.to', 'kickass.to'
+ # FIX: SSLError: hostname 'kickass.so'
+ # doesn't match either of '*.kickass.to', 'kickass.to'
params['verify'] = False
return params
diff --git a/searx/engines/mediawiki.py b/searx/engines/mediawiki.py
index 4a8b0e8b8..8ca32c62a 100644
--- a/searx/engines/mediawiki.py
+++ b/searx/engines/mediawiki.py
@@ -28,15 +28,17 @@ search_url = base_url + 'w/api.php?action=query'\
'&srprop=timestamp'\
'&format=json'\
'&sroffset={offset}'\
- '&srlimit={limit}'
+ '&srlimit={limit}' # noqa
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
+
string_args = dict(query=urlencode({'srsearch': query}),
- offset=offset,
- limit=number_of_results)
+ offset=offset,
+ limit=number_of_results)
+
format_strings = list(Formatter().parse(base_url))
if params['language'] == 'all':
@@ -67,7 +69,8 @@ def response(resp):
# parse results
for result in search_results['query']['search']:
- url = base_url.format(language=resp.search_params['language']) + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
+ url = base_url.format(language=resp.search_params['language']) +\
+ 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
# append result
results.append({'url': url,
diff --git a/searx/engines/openstreetmap.py b/searx/engines/openstreetmap.py
index 36b6011e7..68446ef5f 100644
--- a/searx/engines/openstreetmap.py
+++ b/searx/engines/openstreetmap.py
@@ -9,20 +9,24 @@
# @parse url, title
from json import loads
+from searx.utils import searx_useragent
# engine dependent config
categories = ['map']
paging = False
# search-url
-url = 'https://nominatim.openstreetmap.org/search/{query}?format=json&polygon_geojson=1&addressdetails=1'
-
+base_url = 'https://nominatim.openstreetmap.org/'
+search_string = 'search/{query}?format=json&polygon_geojson=1&addressdetails=1'
result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
# do search-request
def request(query, params):
- params['url'] = url.format(query=query)
+ params['url'] = base_url + search_string.format(query=query)
+
+ # using searx User-Agent
+ params['headers']['User-Agent'] = searx_useragent()
return params
@@ -68,8 +72,8 @@ def response(resp):
address.update({'house_number': address_raw.get('house_number'),
'road': address_raw.get('road'),
'locality': address_raw.get('city',
- address_raw.get('town',
- address_raw.get('village'))),
+ address_raw.get('town', # noqa
+ address_raw.get('village'))), # noqa
'postcode': address_raw.get('postcode'),
'country': address_raw.get('country'),
'country_code': address_raw.get('country_code')})
diff --git a/searx/engines/photon.py b/searx/engines/photon.py
new file mode 100644
index 000000000..03ad2a085
--- /dev/null
+++ b/searx/engines/photon.py
@@ -0,0 +1,128 @@
+## Photon (Map)
+#
+# @website https://photon.komoot.de
+# @provide-api yes (https://photon.komoot.de/)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title
+
+from urllib import urlencode
+from json import loads
+from searx.utils import searx_useragent
+
+# engine dependent config
+categories = ['map']
+paging = False
+language_support = True
+number_of_results = 10
+
+# search-url
+base_url = 'https://photon.komoot.de/'
+search_string = 'api/?{query}&limit={limit}'
+result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = base_url +\
+ search_string.format(query=urlencode({'q': query}),
+ limit=number_of_results)
+
+ if params['language'] != 'all':
+ params['url'] = params['url'] +\
+ "&lang=" + params['language'].replace('_', '-')
+
+ # using searx User-Agent
+ params['headers']['User-Agent'] = searx_useragent()
+
+ # FIX: SSLError: SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
+ params['verify'] = False
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+ json = loads(resp.text)
+
+ # parse results
+ for r in json.get('features', {}):
+
+ properties = r.get('properties')
+
+ if not properties:
+ continue
+
+ # get title
+ title = properties['name']
+
+ # get osm-type
+ if properties.get('osm_type') == 'N':
+ osm_type = 'node'
+ elif properties.get('osm_type') == 'W':
+ osm_type = 'way'
+ elif properties.get('osm_type') == 'R':
+ osm_type = 'relation'
+ else:
+ # continue if invalide osm-type
+ continue
+
+ url = result_base_url.format(osm_type=osm_type,
+ osm_id=properties.get('osm_id'))
+
+ osm = {'type': osm_type,
+ 'id': properties.get('osm_id')}
+
+ geojson = r.get('geometry')
+
+ if properties.get('extent'):
+ boundingbox = [properties.get('extent')[3],
+ properties.get('extent')[1],
+ properties.get('extent')[0],
+ properties.get('extent')[2]]
+ else:
+ # TODO: better boundingbox calculation
+ boundingbox = [geojson['coordinates'][1],
+ geojson['coordinates'][1],
+ geojson['coordinates'][0],
+ geojson['coordinates'][0]]
+
+ # address calculation
+ address = {}
+
+ # get name
+ if properties.get('osm_key') == 'amenity' or\
+ properties.get('osm_key') == 'shop' or\
+ properties.get('osm_key') == 'tourism' or\
+ properties.get('osm_key') == 'leisure':
+ address = {'name': properties.get('name')}
+
+ # add rest of adressdata, if something is already found
+ if address.get('name'):
+ address.update({'house_number': properties.get('housenumber'),
+ 'road': properties.get('street'),
+ 'locality': properties.get('city',
+ properties.get('town', # noqa
+ properties.get('village'))), # noqa
+ 'postcode': properties.get('postcode'),
+ 'country': properties.get('country')})
+ else:
+ address = None
+
+ # append result
+ results.append({'template': 'map.html',
+ 'title': title,
+ 'content': '',
+ 'longitude': geojson['coordinates'][0],
+ 'latitude': geojson['coordinates'][1],
+ 'boundingbox': boundingbox,
+ 'geojson': geojson,
+ 'address': address,
+ 'osm': osm,
+ 'url': url})
+
+ # return results
+ return results
diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py
index 390e7ca1f..164a569a3 100644
--- a/searx/engines/soundcloud.py
+++ b/searx/engines/soundcloud.py
@@ -20,7 +20,12 @@ guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
# search-url
url = 'https://api.soundcloud.com/'
-search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}'
+search_url = url + 'search?{query}'\
+ '&facet=model'\
+ '&limit=20'\
+ '&offset={offset}'\
+ '&linked_partitioning=1'\
+ '&client_id={client_id}' # noqa
# do search-request
diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py
index 3ee0e91c7..4c4fac7df 100644
--- a/searx/engines/yacy.py
+++ b/searx/engines/yacy.py
@@ -24,7 +24,11 @@ number_of_results = 5
# search-url
base_url = 'http://localhost:8090'
-search_url = '/yacysearch.json?{query}&startRecord={offset}&maximumRecords={limit}&contentdom={search_type}&resource=global'
+search_url = '/yacysearch.json?{query}'\
+ '&startRecord={offset}'\
+ '&maximumRecords={limit}'\
+ '&contentdom={search_type}'\
+ '&resource=global' # noqa
# yacy specific type-definitions
search_types = {'general': 'text',
@@ -39,10 +43,11 @@ def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
search_type = search_types.get(params['category'], '0')
- params['url'] = base_url + search_url.format(query=urlencode({'query': query}),
- offset=offset,
- limit=number_of_results,
- search_type=search_type)
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'query': query}),
+ offset=offset,
+ limit=number_of_results,
+ search_type=search_type)
# add language tag if specified
if params['language'] != 'all':
@@ -70,19 +75,19 @@ def response(resp):
# append result
results.append({'url': result['link'],
- 'title': result['title'],
- 'content': result['description'],
- 'publishedDate': publishedDate})
+ 'title': result['title'],
+ 'content': result['description'],
+ 'publishedDate': publishedDate})
elif resp.search_params['category'] == 'images':
# parse image results
for result in search_results:
# append result
results.append({'url': result['url'],
- 'title': result['title'],
- 'content': '',
- 'img_src': result['image'],
- 'template': 'images.html'})
+ 'title': result['title'],
+ 'content': '',
+ 'img_src': result['image'],
+ 'template': 'images.html'})
#TODO parse video, audio and file results
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
index 938540ece..c6c5b0d0d 100644
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -20,7 +20,8 @@ paging = True
language_support = True
# search-url
-search_url = 'https://search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}'
+base_url = 'https://search.yahoo.com/'
+search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
# specific xpath variables
results_xpath = '//div[@class="res"]'
@@ -57,9 +58,9 @@ def request(query, params):
else:
language = params['language'].split('_')[0]
- params['url'] = search_url.format(offset=offset,
- query=urlencode({'p': query}),
- lang=language)
+ params['url'] = base_url + search_url.format(offset=offset,
+ query=urlencode({'p': query}),
+ lang=language)
# TODO required?
params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
diff --git a/searx/https_rewrite.py b/searx/https_rewrite.py
index 9faf3599d..408474a44 100644
--- a/searx/https_rewrite.py
+++ b/searx/https_rewrite.py
@@ -16,6 +16,7 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
'''
import re
+from urlparse import urlparse
from lxml import etree
from os import listdir
from os.path import isfile, isdir, join
@@ -86,15 +87,23 @@ def load_single_https_ruleset(filepath):
# TODO hack, which convert a javascript regex group
# into a valid python regex group
- rule_from = ruleset.attrib.get('from').replace('$', '\\')
- rule_to = ruleset.attrib.get('to').replace('$', '\\')
+ rule_from = ruleset.attrib['from'].replace('$', '\\')
+ if rule_from.endswith('\\'):
+ rule_from = rule_from[:-1]+'$'
+ rule_to = ruleset.attrib['to'].replace('$', '\\')
+ if rule_to.endswith('\\'):
+ rule_to = rule_to[:-1]+'$'
# TODO, not working yet because of the hack above,
# currently doing that in webapp.py
# rule_from_rgx = re.compile(rule_from, re.I)
# append rule
- rules.append((rule_from, rule_to))
+ try:
+ rules.append((re.compile(rule_from, re.I | re.U), rule_to))
+ except:
+ # TODO log regex error
+ continue
# this child define an exclusion
elif ruleset.tag == 'exclusion':
@@ -143,3 +152,56 @@ def load_https_rules(rules_path):
https_rules.append(ruleset)
print(' * {n} https-rules loaded'.format(n=len(https_rules)))
+
+
+
+def https_url_rewrite(result):
+ skip_https_rewrite = False
+ # check if HTTPS rewrite is possible
+ for target, rules, exclusions in https_rules:
+
+ # check if target regex match with url
+ if target.match(result['parsed_url'].netloc):
+ # process exclusions
+ for exclusion in exclusions:
+ # check if exclusion match with url
+ if exclusion.match(result['url']):
+ skip_https_rewrite = True
+ break
+
+ # skip https rewrite if required
+ if skip_https_rewrite:
+ break
+
+ # process rules
+ for rule in rules:
+ try:
+ new_result_url = rule[0].sub(rule[1], result['url'])
+ except:
+ break
+
+ # parse new url
+ new_parsed_url = urlparse(new_result_url)
+
+ # continiue if nothing was rewritten
+ if result['url'] == new_result_url:
+ continue
+
+ # get domainname from result
+ # TODO, does only work correct with TLD's like
+ # asdf.com, not for asdf.com.de
+ # TODO, using publicsuffix instead of this rewrite rule
+ old_result_domainname = '.'.join(
+ result['parsed_url'].hostname.split('.')[-2:])
+ new_result_domainname = '.'.join(
+ new_parsed_url.hostname.split('.')[-2:])
+
+ # check if rewritten hostname is the same,
+ # to protect against wrong or malicious rewrite rules
+ if old_result_domainname == new_result_domainname:
+ # set new url
+ result['url'] = new_result_url
+
+ # target has matched, do not search over the other rules
+ break
+ return result
diff --git a/searx/https_rules/Soundcloud.xml b/searx/https_rules/Soundcloud.xml
index 0baa5832b..6958e8cbc 100644
--- a/searx/https_rules/Soundcloud.xml
+++ b/searx/https_rules/Soundcloud.xml
@@ -89,7 +89,7 @@
<rule from="^http://([aiw]\d|api|wis)\.sndcdn\.com/"
to="https://$1.sndcdn.com/" />
- <rule from="^http://((?:api|backstage|blog|connect|developers|ec-media|eventlogger|help-assets|media|visuals|w|www)\.)?soundcloud\.com/"
+ <rule from="^http://((?:api|backstage|blog|connect|developers|ec-media|eventlogger|help-assets|media|visuals|w|www)\.|)soundcloud\.com/"
to="https://$1soundcloud.com/" />
<rule from="^https?://scbackstage\.wpengine\.netdna-cdn\.com/"
diff --git a/searx/search.py b/searx/search.py
index 064f37220..d1d03805f 100644
--- a/searx/search.py
+++ b/searx/search.py
@@ -34,17 +34,31 @@ from searx.query import Query
number_of_searches = 0
+def search_request_wrapper(fn, url, engine_name, **kwargs):
+ try:
+ return fn(url, **kwargs)
+ except Exception, e:
+ # increase errors stats
+ engines[engine_name].stats['errors'] += 1
+
+ # print engine name and specific error message
+ print('[E] Error with engine "{0}":\n\t{1}'.format(
+ engine_name, str(e)))
+ return
+
+
def threaded_requests(requests):
timeout_limit = max(r[2]['timeout'] for r in requests)
search_start = time()
- for fn, url, request_args in requests:
+ for fn, url, request_args, engine_name in requests:
request_args['timeout'] = timeout_limit
th = threading.Thread(
- target=fn,
- args=(url,),
+ target=search_request_wrapper,
+ args=(fn, url, engine_name),
kwargs=request_args,
name='search_request',
)
+ th._engine_name = engine_name
th.start()
for th in threading.enumerate():
@@ -52,7 +66,7 @@ def threaded_requests(requests):
remaining_time = max(0.0, timeout_limit - (time() - search_start))
th.join(remaining_time)
if th.isAlive():
- print('engine timeout')
+ print('engine timeout: {0}'.format(th._engine_name))
@@ -78,16 +92,7 @@ def make_callback(engine_name, results_queue, callback, params):
return
# callback
- try:
- search_results = callback(response)
- except Exception, e:
- # increase errors stats
- engines[engine_name].stats['errors'] += 1
-
- # print engine name and specific error message
- print '[E] Error with engine "{0}":\n\t{1}'.format(
- engine_name, str(e))
- return
+ search_results = callback(response)
# add results
for result in search_results:
@@ -409,6 +414,7 @@ class Search(object):
# init vars
requests = []
results_queue = Queue()
+ results = {}
suggestions = set()
answers = set()
infoboxes = []
@@ -481,12 +487,13 @@ class Search(object):
continue
# append request to list
- requests.append((req, request_params['url'], request_args))
+ requests.append((req, request_params['url'], request_args, selected_engine['name']))
+ if not requests:
+ return results, suggestions, answers, infoboxes
# send all search-request
threaded_requests(requests)
- results = {}
while not results_queue.empty():
engine_name, engine_results = results_queue.get_nowait()
diff --git a/searx/settings.yml b/searx/settings.yml
index b51b37f1c..3a52dae3c 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -95,6 +95,10 @@ engines:
engine : openstreetmap
shortcut : osm
+ - name : photon
+ engine : photon
+ shortcut : ph
+
# - name : piratebay
# engine : piratebay
# shortcut : tpb
diff --git a/searx/webapp.py b/searx/webapp.py
index a2a135e9a..915fb3564 100644
--- a/searx/webapp.py
+++ b/searx/webapp.py
@@ -41,15 +41,12 @@ from searx.utils import (
UnicodeWriter, highlight_content, html_to_text, get_themes
)
from searx.version import VERSION_STRING
-from searx.https_rewrite import https_rules
from searx.languages import language_codes
+from searx.https_rewrite import https_url_rewrite
from searx.search import Search
from searx.query import Query
from searx.autocomplete import backends as autocomplete_backends
-from urlparse import urlparse
-import re
-
static_path, templates_path, themes =\
get_themes(settings['themes_path']
@@ -215,59 +212,7 @@ def index():
if settings['server']['https_rewrite']\
and result['parsed_url'].scheme == 'http':
- skip_https_rewrite = False
-
- # check if HTTPS rewrite is possible
- for target, rules, exclusions in https_rules:
-
- # check if target regex match with url
- if target.match(result['url']):
- # process exclusions
- for exclusion in exclusions:
- # check if exclusion match with url
- if exclusion.match(result['url']):
- skip_https_rewrite = True
- break
-
- # skip https rewrite if required
- if skip_https_rewrite:
- break
-
- # process rules
- for rule in rules:
- try:
- # TODO, precompile rule
- p = re.compile(rule[0])
-
- # rewrite url if possible
- new_result_url = p.sub(rule[1], result['url'])
- except:
- break
-
- # parse new url
- new_parsed_url = urlparse(new_result_url)
-
- # continiue if nothing was rewritten
- if result['url'] == new_result_url:
- continue
-
- # get domainname from result
- # TODO, does only work correct with TLD's like
- # asdf.com, not for asdf.com.de
- # TODO, using publicsuffix instead of this rewrite rule
- old_result_domainname = '.'.join(
- result['parsed_url'].hostname.split('.')[-2:])
- new_result_domainname = '.'.join(
- new_parsed_url.hostname.split('.')[-2:])
-
- # check if rewritten hostname is the same,
- # to protect against wrong or malicious rewrite rules
- if old_result_domainname == new_result_domainname:
- # set new url
- result['url'] = new_result_url
-
- # target has matched, do not search over the other rules
- break
+ result = https_url_rewrite(result)
if search.request_data.get('format', 'html') == 'html':
if 'content' in result: