summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--searx/engines/500px.py57
-rw-r--r--searx/engines/__init__.py7
-rw-r--r--searx/engines/bing_news.py11
-rw-r--r--searx/engines/faroo.py15
-rw-r--r--searx/engines/google_images.py2
-rw-r--r--searx/engines/kickass.py9
-rw-r--r--searx/engines/mediawiki.py11
-rw-r--r--searx/engines/openstreetmap.py14
-rw-r--r--searx/engines/photon.py132
-rw-r--r--searx/engines/searchcode_code.py65
-rw-r--r--searx/engines/searchcode_doc.py49
-rw-r--r--searx/engines/soundcloud.py7
-rw-r--r--searx/engines/yacy.py29
-rw-r--r--searx/engines/yahoo.py9
-rw-r--r--searx/https_rewrite.py68
-rw-r--r--searx/https_rules/Soundcloud.xml2
-rw-r--r--searx/search.py74
-rw-r--r--searx/settings.yml16
-rw-r--r--searx/static/oscar/img/icons/kickass.pngbin2019 -> 4527 bytes
-rw-r--r--searx/static/oscar/js/searx.min.js4
-rw-r--r--searx/static/oscar/js/searx_src/00_requirejs_config.js2
-rw-r--r--searx/static/oscar/js/searx_src/element_modifiers.js21
-rw-r--r--searx/static/oscar/js/searx_src/leaflet_map.js2
-rw-r--r--searx/webapp.py59
24 files changed, 527 insertions, 138 deletions
diff --git a/searx/engines/500px.py b/searx/engines/500px.py
new file mode 100644
index 000000000..5d53af32c
--- /dev/null
+++ b/searx/engines/500px.py
@@ -0,0 +1,57 @@
+## 500px (Images)
+#
+# @website https://500px.com
+# @provide-api yes (https://developers.500px.com/)
+#
+# @using-api no
+# @results HTML
+# @stable no (HTML can change)
+# @parse url, title, thumbnail, img_src, content
+#
+# @todo rewrite to api
+
+
+from urllib import urlencode
+from urlparse import urljoin
+from lxml import html
+
+# engine dependent config
+categories = ['images']
+paging = True
+
+# search-url
+base_url = 'https://500px.com'
+search_url = base_url+'/search?search?page={pageno}&type=photos&{query}'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(pageno=params['pageno'],
+ query=urlencode({'q': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ dom = html.fromstring(resp.text)
+
+ # parse results
+ for result in dom.xpath('//div[@class="photo"]'):
+ link = result.xpath('.//a')[0]
+ url = urljoin(base_url, link.attrib.get('href'))
+ title = result.xpath('.//div[@class="title"]//text()')[0]
+ img_src = link.xpath('.//img')[0].attrib['src']
+ content = result.xpath('.//div[@class="info"]//text()')[0]
+
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'img_src': img_src,
+ 'content': content,
+ 'template': 'images.html'})
+
+ # return results
+ return results
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 3c9ce3b57..d42339af8 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -81,7 +81,8 @@ def load_engine(engine_data):
if engine_attr.startswith('_'):
continue
if getattr(engine, engine_attr) is None:
- print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr) # noqa
+ print('[E] Engine config error: Missing attribute "{0}.{1}"'\
+ .format(engine.name, engine_attr))
sys.exit(1)
engine.stats = {
@@ -100,6 +101,10 @@ def load_engine(engine_data):
if engine.shortcut:
# TODO check duplications
+ if engine.shortcut in engine_shortcuts:
+ print('[E] Engine config error: ambigious shortcut: {0}'\
+ .format(engine.shortcut))
+ sys.exit(1)
engine_shortcuts[engine.shortcut] = engine.name
return engine
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index 5dce4a2b2..3dda04cbb 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -57,12 +57,16 @@ def response(resp):
link = result.xpath('.//div[@class="newstitle"]/a')[0]
url = link.attrib.get('href')
title = ' '.join(link.xpath('.//text()'))
- contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]//text()')
+ contentXPath = result.xpath('.//div[@class="sn_txt"]/div'
+ '//span[@class="sn_snip"]//text()')
if contentXPath is not None:
content = escape(' '.join(contentXPath))
# parse publishedDate
- publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div//span[contains(@class,"sn_ST")]//span[contains(@class,"sn_tm")]//text()')
+ publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
+ '//span[contains(@class,"sn_ST")]'
+ '//span[contains(@class,"sn_tm")]'
+ '//text()')
if publishedDateXPath is not None:
publishedDate = escape(' '.join(publishedDateXPath))
@@ -74,7 +78,8 @@ def response(resp):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))
- elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
+ elif re.match("^[0-9]+ hour(s|),"
+ " [0-9]+ minute(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))\
diff --git a/searx/engines/faroo.py b/searx/engines/faroo.py
index dada4758d..5360ea156 100644
--- a/searx/engines/faroo.py
+++ b/searx/engines/faroo.py
@@ -22,10 +22,17 @@ api_key = None
# search-url
url = 'http://www.faroo.com/'
-search_url = url + 'api?{query}&start={offset}&length={number_of_results}&l={language}&src={categorie}&i=false&f=json&key={api_key}'
+search_url = url + 'api?{query}'\
+ '&start={offset}'\
+ '&length={number_of_results}'\
+ '&l={language}'\
+ '&src={categorie}'\
+ '&i=false'\
+ '&f=json'\
+ '&key={api_key}' # noqa
search_category = {'general': 'web',
- 'news': 'news'}
+ 'news': 'news'}
# do search-request
@@ -80,8 +87,8 @@ def response(resp):
# parse results
for result in search_res['results']:
if result['news']:
- # timestamp (how many milliseconds have passed between now and the beginning of 1970)
- publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0)
+ # timestamp (milliseconds since 1970)
+ publishedDate = datetime.datetime.fromtimestamp(result['date']/1000.0) # noqa
# append news result
results.append({'url': result['url'],
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index 491f5c2c2..79fac3fb0 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -9,7 +9,7 @@
# @stable yes (but deprecated)
# @parse url, title, img_src
-from urllib import urlencode,unquote
+from urllib import urlencode, unquote
from json import loads
# engine dependent config
diff --git a/searx/engines/kickass.py b/searx/engines/kickass.py
index bd11a3b6b..f1fcd9e1a 100644
--- a/searx/engines/kickass.py
+++ b/searx/engines/kickass.py
@@ -1,8 +1,8 @@
## Kickass Torrent (Videos, Music, Files)
-#
+#
# @website https://kickass.so
# @provide-api no (nothing found)
-#
+#
# @using-api no
# @results HTML (using search portal)
# @stable yes (HTML can change)
@@ -13,7 +13,6 @@ from cgi import escape
from urllib import quote
from lxml import html
from operator import itemgetter
-from dateutil import parser
# engine dependent config
categories = ['videos', 'music', 'files']
@@ -33,6 +32,10 @@ def request(query, params):
params['url'] = search_url.format(search_term=quote(query),
pageno=params['pageno'])
+ # FIX: SSLError: hostname 'kickass.so'
+ # doesn't match either of '*.kickass.to', 'kickass.to'
+ params['verify'] = False
+
return params
diff --git a/searx/engines/mediawiki.py b/searx/engines/mediawiki.py
index 4a8b0e8b8..8ca32c62a 100644
--- a/searx/engines/mediawiki.py
+++ b/searx/engines/mediawiki.py
@@ -28,15 +28,17 @@ search_url = base_url + 'w/api.php?action=query'\
'&srprop=timestamp'\
'&format=json'\
'&sroffset={offset}'\
- '&srlimit={limit}'
+ '&srlimit={limit}' # noqa
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
+
string_args = dict(query=urlencode({'srsearch': query}),
- offset=offset,
- limit=number_of_results)
+ offset=offset,
+ limit=number_of_results)
+
format_strings = list(Formatter().parse(base_url))
if params['language'] == 'all':
@@ -67,7 +69,8 @@ def response(resp):
# parse results
for result in search_results['query']['search']:
- url = base_url.format(language=resp.search_params['language']) + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
+ url = base_url.format(language=resp.search_params['language']) +\
+ 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8'))
# append result
results.append({'url': url,
diff --git a/searx/engines/openstreetmap.py b/searx/engines/openstreetmap.py
index 36b6011e7..68446ef5f 100644
--- a/searx/engines/openstreetmap.py
+++ b/searx/engines/openstreetmap.py
@@ -9,20 +9,24 @@
# @parse url, title
from json import loads
+from searx.utils import searx_useragent
# engine dependent config
categories = ['map']
paging = False
# search-url
-url = 'https://nominatim.openstreetmap.org/search/{query}?format=json&polygon_geojson=1&addressdetails=1'
-
+base_url = 'https://nominatim.openstreetmap.org/'
+search_string = 'search/{query}?format=json&polygon_geojson=1&addressdetails=1'
result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
# do search-request
def request(query, params):
- params['url'] = url.format(query=query)
+ params['url'] = base_url + search_string.format(query=query)
+
+ # using searx User-Agent
+ params['headers']['User-Agent'] = searx_useragent()
return params
@@ -68,8 +72,8 @@ def response(resp):
address.update({'house_number': address_raw.get('house_number'),
'road': address_raw.get('road'),
'locality': address_raw.get('city',
- address_raw.get('town',
- address_raw.get('village'))),
+ address_raw.get('town', # noqa
+ address_raw.get('village'))), # noqa
'postcode': address_raw.get('postcode'),
'country': address_raw.get('country'),
'country_code': address_raw.get('country_code')})
diff --git a/searx/engines/photon.py b/searx/engines/photon.py
new file mode 100644
index 000000000..16340d24a
--- /dev/null
+++ b/searx/engines/photon.py
@@ -0,0 +1,132 @@
+## Photon (Map)
+#
+# @website https://photon.komoot.de
+# @provide-api yes (https://photon.komoot.de/)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title
+
+from urllib import urlencode
+from json import loads
+from searx.utils import searx_useragent
+
+# engine dependent config
+categories = ['map']
+paging = False
+language_support = True
+number_of_results = 10
+
+# search-url
+base_url = 'https://photon.komoot.de/'
+search_string = 'api/?{query}&limit={limit}'
+result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}'
+
+# list of supported languages
+allowed_languages = ['de', 'en', 'fr', 'it']
+
+
+# do search-request
+def request(query, params):
+ params['url'] = base_url +\
+ search_string.format(query=urlencode({'q': query}),
+ limit=number_of_results)
+
+ if params['language'] != 'all':
+ language = params['language'].split('_')[0]
+ if language in allowed_languages:
+ params['url'] = params['url'] + "&lang=" + language
+
+ # using searx User-Agent
+ params['headers']['User-Agent'] = searx_useragent()
+
+ # FIX: SSLError: SSL3_GET_SERVER_CERTIFICATE:certificate verify failed
+ params['verify'] = False
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+ json = loads(resp.text)
+
+ # parse results
+ for r in json.get('features', {}):
+
+ properties = r.get('properties')
+
+ if not properties:
+ continue
+
+ # get title
+ title = properties['name']
+
+ # get osm-type
+ if properties.get('osm_type') == 'N':
+ osm_type = 'node'
+ elif properties.get('osm_type') == 'W':
+ osm_type = 'way'
+ elif properties.get('osm_type') == 'R':
+ osm_type = 'relation'
+ else:
+ # continue if invalide osm-type
+ continue
+
+ url = result_base_url.format(osm_type=osm_type,
+ osm_id=properties.get('osm_id'))
+
+ osm = {'type': osm_type,
+ 'id': properties.get('osm_id')}
+
+ geojson = r.get('geometry')
+
+ if properties.get('extent'):
+ boundingbox = [properties.get('extent')[3],
+ properties.get('extent')[1],
+ properties.get('extent')[0],
+ properties.get('extent')[2]]
+ else:
+ # TODO: better boundingbox calculation
+ boundingbox = [geojson['coordinates'][1],
+ geojson['coordinates'][1],
+ geojson['coordinates'][0],
+ geojson['coordinates'][0]]
+
+ # address calculation
+ address = {}
+
+ # get name
+ if properties.get('osm_key') == 'amenity' or\
+ properties.get('osm_key') == 'shop' or\
+ properties.get('osm_key') == 'tourism' or\
+ properties.get('osm_key') == 'leisure':
+ address = {'name': properties.get('name')}
+
+ # add rest of adressdata, if something is already found
+ if address.get('name'):
+ address.update({'house_number': properties.get('housenumber'),
+ 'road': properties.get('street'),
+ 'locality': properties.get('city',
+ properties.get('town', # noqa
+ properties.get('village'))), # noqa
+ 'postcode': properties.get('postcode'),
+ 'country': properties.get('country')})
+ else:
+ address = None
+
+ # append result
+ results.append({'template': 'map.html',
+ 'title': title,
+ 'content': '',
+ 'longitude': geojson['coordinates'][0],
+ 'latitude': geojson['coordinates'][1],
+ 'boundingbox': boundingbox,
+ 'geojson': geojson,
+ 'address': address,
+ 'osm': osm,
+ 'url': url})
+
+ # return results
+ return results
diff --git a/searx/engines/searchcode_code.py b/searx/engines/searchcode_code.py
new file mode 100644
index 000000000..2ba0e52f1
--- /dev/null
+++ b/searx/engines/searchcode_code.py
@@ -0,0 +1,65 @@
+## Searchcode (It)
+#
+# @website https://searchcode.com/
+# @provide-api yes (https://searchcode.com/api/)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title, content
+
+from urllib import urlencode
+from json import loads
+import cgi
+import re
+
+# engine dependent config
+categories = ['it']
+paging = True
+
+# search-url
+url = 'https://searchcode.com/'
+search_url = url+'api/codesearch_I/?{query}&p={pageno}'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(query=urlencode({'q': query}),
+ pageno=params['pageno']-1)
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ search_results = loads(resp.text)
+
+ # parse results
+ for result in search_results['results']:
+ href = result['url']
+ title = "" + result['name'] + " - " + result['filename']
+ content = result['repo'] + "<br />"
+
+ lines = dict()
+ for line, code in result['lines'].items():
+ lines[int(line)] = code
+
+ content = content + '<pre class="code-formatter"><table class="code">'
+ for line, code in sorted(lines.items()):
+ content = content + '<tr><td class="line-number" style="padding-right:5px;">'
+ content = content + str(line) + '</td><td class="code-snippet">'
+ # Replace every two spaces with ' &nbps;' to keep formatting while allowing the browser to break the line if necessary
+ content = content + cgi.escape(code).replace('\t', ' ').replace(' ', '&nbsp; ').replace(' ', ' &nbsp;')
+ content = content + "</td></tr>"
+
+ content = content + "</table></pre>"
+
+ # append result
+ results.append({'url': href,
+ 'title': title,
+ 'content': content})
+
+ # return results
+ return results
diff --git a/searx/engines/searchcode_doc.py b/searx/engines/searchcode_doc.py
new file mode 100644
index 000000000..e07cbeab9
--- /dev/null
+++ b/searx/engines/searchcode_doc.py
@@ -0,0 +1,49 @@
+## Searchcode (It)
+#
+# @website https://searchcode.com/
+# @provide-api yes (https://searchcode.com/api/)
+#
+# @using-api yes
+# @results JSON
+# @stable yes
+# @parse url, title, content
+
+from urllib import urlencode
+from json import loads
+
+# engine dependent config
+categories = ['it']
+paging = True
+
+# search-url
+url = 'https://searchcode.com/'
+search_url = url+'api/search_IV/?{query}&p={pageno}'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(query=urlencode({'q': query}),
+ pageno=params['pageno']-1)
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ search_results = loads(resp.text)
+
+ # parse results
+ for result in search_results['results']:
+ href = result['url']
+ title = "[" + result['type'] + "] " + result['namespace'] + " " + result['name']
+ content = '<span class="highlight">[' + result['type'] + "] " + result['name'] + " " + result['synopsis'] + "</span><br />" + result['description']
+
+ # append result
+ results.append({'url': href,
+ 'title': title,
+ 'content': content})
+
+ # return results
+ return results
diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py
index 390e7ca1f..164a569a3 100644
--- a/searx/engines/soundcloud.py
+++ b/searx/engines/soundcloud.py
@@ -20,7 +20,12 @@ guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28'
# search-url
url = 'https://api.soundcloud.com/'
-search_url = url + 'search?{query}&facet=model&limit=20&offset={offset}&linked_partitioning=1&client_id={client_id}'
+search_url = url + 'search?{query}'\
+ '&facet=model'\
+ '&limit=20'\
+ '&offset={offset}'\
+ '&linked_partitioning=1'\
+ '&client_id={client_id}' # noqa
# do search-request
diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py
index 3ee0e91c7..4c4fac7df 100644
--- a/searx/engines/yacy.py
+++ b/searx/engines/yacy.py
@@ -24,7 +24,11 @@ number_of_results = 5
# search-url
base_url = 'http://localhost:8090'
-search_url = '/yacysearch.json?{query}&startRecord={offset}&maximumRecords={limit}&contentdom={search_type}&resource=global'
+search_url = '/yacysearch.json?{query}'\
+ '&startRecord={offset}'\
+ '&maximumRecords={limit}'\
+ '&contentdom={search_type}'\
+ '&resource=global' # noqa
# yacy specific type-definitions
search_types = {'general': 'text',
@@ -39,10 +43,11 @@ def request(query, params):
offset = (params['pageno'] - 1) * number_of_results
search_type = search_types.get(params['category'], '0')
- params['url'] = base_url + search_url.format(query=urlencode({'query': query}),
- offset=offset,
- limit=number_of_results,
- search_type=search_type)
+ params['url'] = base_url +\
+ search_url.format(query=urlencode({'query': query}),
+ offset=offset,
+ limit=number_of_results,
+ search_type=search_type)
# add language tag if specified
if params['language'] != 'all':
@@ -70,19 +75,19 @@ def response(resp):
# append result
results.append({'url': result['link'],
- 'title': result['title'],
- 'content': result['description'],
- 'publishedDate': publishedDate})
+ 'title': result['title'],
+ 'content': result['description'],
+ 'publishedDate': publishedDate})
elif resp.search_params['category'] == 'images':
# parse image results
for result in search_results:
# append result
results.append({'url': result['url'],
- 'title': result['title'],
- 'content': '',
- 'img_src': result['image'],
- 'template': 'images.html'})
+ 'title': result['title'],
+ 'content': '',
+ 'img_src': result['image'],
+ 'template': 'images.html'})
#TODO parse video, audio and file results
diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py
index 938540ece..c6c5b0d0d 100644
--- a/searx/engines/yahoo.py
+++ b/searx/engines/yahoo.py
@@ -20,7 +20,8 @@ paging = True
language_support = True
# search-url
-search_url = 'https://search.yahoo.com/search?{query}&b={offset}&fl=1&vl=lang_{lang}'
+base_url = 'https://search.yahoo.com/'
+search_url = 'search?{query}&b={offset}&fl=1&vl=lang_{lang}'
# specific xpath variables
results_xpath = '//div[@class="res"]'
@@ -57,9 +58,9 @@ def request(query, params):
else:
language = params['language'].split('_')[0]
- params['url'] = search_url.format(offset=offset,
- query=urlencode({'p': query}),
- lang=language)
+ params['url'] = base_url + search_url.format(offset=offset,
+ query=urlencode({'p': query}),
+ lang=language)
# TODO required?
params['cookies']['sB'] = 'fl=1&vl=lang_{lang}&sh=1&rw=new&v=1'\
diff --git a/searx/https_rewrite.py b/searx/https_rewrite.py
index 9faf3599d..408474a44 100644
--- a/searx/https_rewrite.py
+++ b/searx/https_rewrite.py
@@ -16,6 +16,7 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
'''
import re
+from urlparse import urlparse
from lxml import etree
from os import listdir
from os.path import isfile, isdir, join
@@ -86,15 +87,23 @@ def load_single_https_ruleset(filepath):
# TODO hack, which convert a javascript regex group
# into a valid python regex group
- rule_from = ruleset.attrib.get('from').replace('$', '\\')
- rule_to = ruleset.attrib.get('to').replace('$', '\\')
+ rule_from = ruleset.attrib['from'].replace('$', '\\')
+ if rule_from.endswith('\\'):
+ rule_from = rule_from[:-1]+'$'
+ rule_to = ruleset.attrib['to'].replace('$', '\\')
+ if rule_to.endswith('\\'):
+ rule_to = rule_to[:-1]+'$'
# TODO, not working yet because of the hack above,
# currently doing that in webapp.py
# rule_from_rgx = re.compile(rule_from, re.I)
# append rule
- rules.append((rule_from, rule_to))
+ try:
+ rules.append((re.compile(rule_from, re.I | re.U), rule_to))
+ except:
+ # TODO log regex error
+ continue
# this child define an exclusion
elif ruleset.tag == 'exclusion':
@@ -143,3 +152,56 @@ def load_https_rules(rules_path):
https_rules.append(ruleset)
print(' * {n} https-rules loaded'.format(n=len(https_rules)))
+
+
+
+def https_url_rewrite(result):
+ skip_https_rewrite = False
+ # check if HTTPS rewrite is possible
+ for target, rules, exclusions in https_rules:
+
+ # check if target regex match with url
+ if target.match(result['parsed_url'].netloc):
+ # process exclusions
+ for exclusion in exclusions:
+ # check if exclusion match with url
+ if exclusion.match(result['url']):
+ skip_https_rewrite = True
+ break
+
+ # skip https rewrite if required
+ if skip_https_rewrite:
+ break
+
+ # process rules
+ for rule in rules:
+ try:
+ new_result_url = rule[0].sub(rule[1], result['url'])
+ except:
+ break
+
+ # parse new url
+ new_parsed_url = urlparse(new_result_url)
+
+ # continiue if nothing was rewritten
+ if result['url'] == new_result_url:
+ continue
+
+ # get domainname from result
+ # TODO, does only work correct with TLD's like
+ # asdf.com, not for asdf.com.de
+ # TODO, using publicsuffix instead of this rewrite rule
+ old_result_domainname = '.'.join(
+ result['parsed_url'].hostname.split('.')[-2:])
+ new_result_domainname = '.'.join(
+ new_parsed_url.hostname.split('.')[-2:])
+
+ # check if rewritten hostname is the same,
+ # to protect against wrong or malicious rewrite rules
+ if old_result_domainname == new_result_domainname:
+ # set new url
+ result['url'] = new_result_url
+
+ # target has matched, do not search over the other rules
+ break
+ return result
diff --git a/searx/https_rules/Soundcloud.xml b/searx/https_rules/Soundcloud.xml
index 0baa5832b..6958e8cbc 100644
--- a/searx/https_rules/Soundcloud.xml
+++ b/searx/https_rules/Soundcloud.xml
@@ -89,7 +89,7 @@
<rule from="^http://([aiw]\d|api|wis)\.sndcdn\.com/"
to="https://$1.sndcdn.com/" />
- <rule from="^http://((?:api|backstage|blog|connect|developers|ec-media|eventlogger|help-assets|media|visuals|w|www)\.)?soundcloud\.com/"
+ <rule from="^http://((?:api|backstage|blog|connect|developers|ec-media|eventlogger|help-assets|media|visuals|w|www)\.|)soundcloud\.com/"
to="https://$1soundcloud.com/" />
<rule from="^https?://scbackstage\.wpengine\.netdna-cdn\.com/"
diff --git a/searx/search.py b/searx/search.py
index 4058cba20..d1d03805f 100644
--- a/searx/search.py
+++ b/searx/search.py
@@ -19,7 +19,6 @@ import requests as requests_lib
import threading
import re
from itertools import izip_longest, chain
-from datetime import datetime
from operator import itemgetter
from Queue import Queue
from time import time
@@ -35,16 +34,31 @@ from searx.query import Query
number_of_searches = 0
+def search_request_wrapper(fn, url, engine_name, **kwargs):
+ try:
+ return fn(url, **kwargs)
+ except Exception, e:
+ # increase errors stats
+ engines[engine_name].stats['errors'] += 1
+
+ # print engine name and specific error message
+ print('[E] Error with engine "{0}":\n\t{1}'.format(
+ engine_name, str(e)))
+ return
+
+
def threaded_requests(requests):
timeout_limit = max(r[2]['timeout'] for r in requests)
search_start = time()
- for fn, url, request_args in requests:
+ for fn, url, request_args, engine_name in requests:
+ request_args['timeout'] = timeout_limit
th = threading.Thread(
- target=fn,
- args=(url,),
+ target=search_request_wrapper,
+ args=(fn, url, engine_name),
kwargs=request_args,
name='search_request',
)
+ th._engine_name = engine_name
th.start()
for th in threading.enumerate():
@@ -52,41 +66,34 @@ def threaded_requests(requests):
remaining_time = max(0.0, timeout_limit - (time() - search_start))
th.join(remaining_time)
if th.isAlive():
- print('engine timeout')
+ print('engine timeout: {0}'.format(th._engine_name))
# get default reqest parameter
def default_request_params():
return {
- 'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
+ 'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}, 'verify': True}
# create a callback wrapper for the search engine results
-def make_callback(engine_name,
- results_queue,
- suggestions,
- answers,
- infoboxes,
- callback,
- params):
+def make_callback(engine_name, results_queue, callback, params):
# creating a callback wrapper for the search engine results
def process_callback(response, **kwargs):
response.search_params = params
- # callback
- try:
- search_results = callback(response)
- except Exception, e:
- # increase errors stats
+ timeout_overhead = 0.2 # seconds
+ search_duration = time() - params['started']
+ timeout_limit = engines[engine_name].timeout + timeout_overhead
+ if search_duration > timeout_limit:
+ engines[engine_name].stats['page_load_time'] += timeout_limit
engines[engine_name].stats['errors'] += 1
-
- # print engine name and specific error message
- print '[E] Error with engine "{0}":\n\t{1}'.format(
- engine_name, str(e))
return
+ # callback
+ search_results = callback(response)
+
# add results
for result in search_results:
result['engine'] = engine_name
@@ -94,8 +101,7 @@ def make_callback(engine_name,
results_queue.put_nowait((engine_name, search_results))
# update stats with current page-load-time
- engines[engine_name].stats['page_load_time'] += \
- (datetime.now() - params['started']).total_seconds()
+ engines[engine_name].stats['page_load_time'] += search_duration
return process_callback
@@ -408,6 +414,7 @@ class Search(object):
# init vars
requests = []
results_queue = Queue()
+ results = {}
suggestions = set()
answers = set()
infoboxes = []
@@ -439,14 +446,13 @@ class Search(object):
request_params = default_request_params()
request_params['headers']['User-Agent'] = user_agent
request_params['category'] = selected_engine['category']
- request_params['started'] = datetime.now()
+ request_params['started'] = time()
request_params['pageno'] = self.pageno
request_params['language'] = self.lang
# update request parameters dependent on
# search-engine (contained in engines folder)
- request_params = engine.request(self.query.encode('utf-8'),
- request_params)
+ engine.request(self.query.encode('utf-8'), request_params)
if request_params['url'] is None:
# TODO add support of offline engines
@@ -456,12 +462,8 @@ class Search(object):
callback = make_callback(
selected_engine['name'],
results_queue,
- suggestions,
- answers,
- infoboxes,
engine.response,
- request_params
- )
+ request_params)
# create dictionary which contain all
# informations about the request
@@ -469,7 +471,8 @@ class Search(object):
headers=request_params['headers'],
hooks=dict(response=callback),
cookies=request_params['cookies'],
- timeout=engine.timeout
+ timeout=engine.timeout,
+ verify=request_params['verify']
)
# specific type of request (GET or POST)
@@ -484,12 +487,13 @@ class Search(object):
continue
# append request to list
- requests.append((req, request_params['url'], request_args))
+ requests.append((req, request_params['url'], request_args, selected_engine['name']))
+ if not requests:
+ return results, suggestions, answers, infoboxes
# send all search-request
threaded_requests(requests)
- results = {}
while not results_queue.empty():
engine_name, engine_results = results_queue.get_nowait()
diff --git a/searx/settings.yml b/searx/settings.yml
index 07cd2ac3e..847235b70 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -64,6 +64,10 @@ engines:
# engine : filecrop
# categories : files
# shortcut : fc
+
+ - name : 500px
+ engine : 500px
+ shortcut : px
- name : flickr
categories : images
@@ -99,6 +103,10 @@ engines:
engine : openstreetmap
shortcut : osm
+ - name : photon
+ engine : photon
+ shortcut : ph
+
# - name : piratebay
# engine : piratebay
# shortcut : tpb
@@ -114,6 +122,14 @@ engines:
- name : stackoverflow
engine : stackoverflow
shortcut : st
+
+ - name : searchcode doc
+ engine : searchcode_doc
+ shortcut : scd
+
+ - name : searchcode code
+ engine : searchcode_code
+ shortcut : scc
- name : startpage
engine : startpage
diff --git a/searx/static/oscar/img/icons/kickass.png b/searx/static/oscar/img/icons/kickass.png
index 59e809104..567d1039f 100644
--- a/searx/static/oscar/img/icons/kickass.png
+++ b/searx/static/oscar/img/icons/kickass.png
Binary files differ
diff --git a/searx/static/oscar/js/searx.min.js b/searx/static/oscar/js/searx.min.js
index 69cb816ae..2fba71063 100644
--- a/searx/static/oscar/js/searx.min.js
+++ b/searx/static/oscar/js/searx.min.js
@@ -1,2 +1,2 @@
-/*! oscar/searx.min.js | 30-11-2014 | https://github.com/asciimoo/searx */
-requirejs.config({baseUrl:"/static/oscar/js",paths:{app:"../app"}}),searx.autocompleter&&(searx.searchResults=new Bloodhound({datumTokenizer:Bloodhound.tokenizers.obj.whitespace("value"),queryTokenizer:Bloodhound.tokenizers.whitespace,remote:"/autocompleter?q=%QUERY"}),searx.searchResults.initialize()),$(document).ready(function(){searx.autocompleter&&$("#q").typeahead(null,{name:"search-results",displayKey:function(a){return a},source:searx.searchResults.ttAdapter()})}),$(document).ready(function(){$("#q.autofocus").focus(),$(".select-all-on-click").click(function(){$(this).select()}),$(".btn-collapse").click(function(){var a=$(this).data("btn-text-collapsed"),b=$(this).data("btn-text-not-collapsed");""!==a&&""!==b&&(new_html=$(this).hasClass("collapsed")?$(this).html().replace(a,b):$(this).html().replace(b,a),$(this).html(new_html))}),$(".btn-toggle .btn").click(function(){var a="btn-"+$(this).data("btn-class"),b=$(this).data("btn-label-default"),c=$(this).data("btn-label-toggled");""!==c&&(new_html=$(this).hasClass("btn-default")?$(this).html().replace(b,c):$(this).html().replace(c,b),$(this).html(new_html)),$(this).toggleClass(a),$(this).toggleClass("btn-default")})}),$(document).ready(function(){$(".searx_overpass_request").on("click",function(a){var b="https://overpass-api.de/api/interpreter?data=",c=b+"[out:json][timeout:25];(",d=");out meta;",e=$(this).data("osm-id"),f=$(this).data("osm-type"),g=$(this).data("result-table"),h="#"+$(this).data("result-table-loadicon"),i=["addr:city","addr:country","addr:housenumber","addr:postcode","addr:street"];if(e&&f&&g){g="#"+g;var j=null;switch(f){case"node":j=c+"node("+e+");"+d;break;case"way":j=c+"way("+e+");"+d;break;case"relation":j=c+"relation("+e+");"+d}if(j){$.ajax(j).done(function(a){if(a&&a.elements&&a.elements[0]){var b=a.elements[0],c=$(g).html();for(var d in b.tags)if(null===b.tags.name||-1==i.indexOf(d)){switch(c+="<tr><td>"+d+"</td><td>",d){case"phone":case"fax":c+='<a href="tel:'+b.tags[d].replace(/ /g,"")+'">'+b.tags[d]+"</a>";break;case"email":c+='<a href="mailto:'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"website":case"url":c+='<a href="'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"wikidata":c+='<a href="https://www.wikidata.org/wiki/'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"wikipedia":if(-1!=b.tags[d].indexOf(":")){c+='<a href="https://'+b.tags[d].substring(0,b.tags[d].indexOf(":"))+".wikipedia.org/wiki/"+b.tags[d].substring(b.tags[d].indexOf(":")+1)+'">'+b.tags[d]+"</a>";break}default:c+=b.tags[d]}c+="</td></tr>"}$(g).html(c),$(g).removeClass("hidden"),$(h).addClass("hidden")}}).fail(function(){$(h).html($(h).html()+'<p class="text-muted">could not load data!</p>')})}}$(this).off(a)}),$(".searx_init_map").on("click",function(a){var b=$(this).data("leaflet-target"),c=$(this).data("map-lon"),d=$(this).data("map-lat"),e=$(this).data("map-zoom"),f=$(this).data("map-boundingbox"),g=$(this).data("map-geojson");require(["leaflet-0.7.3.min"],function(){f&&(southWest=L.latLng(f[0],f[2]),northEast=L.latLng(f[1],f[3]),map_bounds=L.latLngBounds(southWest,northEast)),L.Icon.Default.imagePath="/static/oscar/img/map";{var a=L.map(b),h="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png",i='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors',j=new L.TileLayer(h,{minZoom:1,maxZoom:19,attribution:i}),k="http://otile{s}.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.jpg",l='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="http://developer.mapquest.com/content/osm/mq_logo.png">',m=new L.TileLayer(k,{minZoom:1,maxZoom:18,subdomains:"1234",attribution:l}),n="http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg",o='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="https://developer.mapquest.com/content/osm/mq_logo.png"> | Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';new L.TileLayer(n,{minZoom:1,maxZoom:11,subdomains:"1234",attribution:o})}map_bounds?setTimeout(function(){a.fitBounds(map_bounds,{maxZoom:17})},0):c&&d&&(e?a.setView(new L.LatLng(d,c),e):a.setView(new L.LatLng(d,c),8)),a.addLayer(m);var p={"OSM Mapnik":j,MapQuest:m};L.control.layers(p).addTo(a),g&&L.geoJson(g).addTo(a)}),$(this).off(a)})}); \ No newline at end of file
+/*! oscar/searx.min.js | 19-12-2014 | https://github.com/asciimoo/searx */
+requirejs.config({baseUrl:"./static/oscar/js",paths:{app:"../app"}}),searx.autocompleter&&(searx.searchResults=new Bloodhound({datumTokenizer:Bloodhound.tokenizers.obj.whitespace("value"),queryTokenizer:Bloodhound.tokenizers.whitespace,remote:"/autocompleter?q=%QUERY"}),searx.searchResults.initialize()),$(document).ready(function(){searx.autocompleter&&$("#q").typeahead(null,{name:"search-results",displayKey:function(a){return a},source:searx.searchResults.ttAdapter()})}),$(document).ready(function(){$("#q.autofocus").focus(),$(".select-all-on-click").click(function(){$(this).select()}),$(".btn-collapse").click(function(){var a=$(this).data("btn-text-collapsed"),b=$(this).data("btn-text-not-collapsed");""!==a&&""!==b&&(new_html=$(this).hasClass("collapsed")?$(this).html().replace(a,b):$(this).html().replace(b,a),$(this).html(new_html))}),$(".btn-toggle .btn").click(function(){var a="btn-"+$(this).data("btn-class"),b=$(this).data("btn-label-default"),c=$(this).data("btn-label-toggled");""!==c&&(new_html=$(this).hasClass("btn-default")?$(this).html().replace(b,c):$(this).html().replace(c,b),$(this).html(new_html)),$(this).toggleClass(a),$(this).toggleClass("btn-default")}),$(".btn-sm").dblclick(function(){var a="btn-"+$(this).data("btn-class");$(this).hasClass("btn-default")?($(".btn-sm > input").attr("checked","checked"),$(".btn-sm > input").prop("checked",!0),$(".btn-sm").addClass(a),$(".btn-sm").addClass("active"),$(".btn-sm").removeClass("btn-default")):($(".btn-sm > input").attr("checked",""),$(".btn-sm > input").removeAttr("checked"),$(".btn-sm > input").checked=!1,$(".btn-sm").removeClass(a),$(".btn-sm").removeClass("active"),$(".btn-sm").addClass("btn-default"))})}),$(document).ready(function(){$(".searx_overpass_request").on("click",function(a){var b="https://overpass-api.de/api/interpreter?data=",c=b+"[out:json][timeout:25];(",d=");out meta;",e=$(this).data("osm-id"),f=$(this).data("osm-type"),g=$(this).data("result-table"),h="#"+$(this).data("result-table-loadicon"),i=["addr:city","addr:country","addr:housenumber","addr:postcode","addr:street"];if(e&&f&&g){g="#"+g;var j=null;switch(f){case"node":j=c+"node("+e+");"+d;break;case"way":j=c+"way("+e+");"+d;break;case"relation":j=c+"relation("+e+");"+d}if(j){$.ajax(j).done(function(a){if(a&&a.elements&&a.elements[0]){var b=a.elements[0],c=$(g).html();for(var d in b.tags)if(null===b.tags.name||-1==i.indexOf(d)){switch(c+="<tr><td>"+d+"</td><td>",d){case"phone":case"fax":c+='<a href="tel:'+b.tags[d].replace(/ /g,"")+'">'+b.tags[d]+"</a>";break;case"email":c+='<a href="mailto:'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"website":case"url":c+='<a href="'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"wikidata":c+='<a href="https://www.wikidata.org/wiki/'+b.tags[d]+'">'+b.tags[d]+"</a>";break;case"wikipedia":if(-1!=b.tags[d].indexOf(":")){c+='<a href="https://'+b.tags[d].substring(0,b.tags[d].indexOf(":"))+".wikipedia.org/wiki/"+b.tags[d].substring(b.tags[d].indexOf(":")+1)+'">'+b.tags[d]+"</a>";break}default:c+=b.tags[d]}c+="</td></tr>"}$(g).html(c),$(g).removeClass("hidden"),$(h).addClass("hidden")}}).fail(function(){$(h).html($(h).html()+'<p class="text-muted">could not load data!</p>')})}}$(this).off(a)}),$(".searx_init_map").on("click",function(a){var b=$(this).data("leaflet-target"),c=$(this).data("map-lon"),d=$(this).data("map-lat"),e=$(this).data("map-zoom"),f=$(this).data("map-boundingbox"),g=$(this).data("map-geojson");require(["leaflet-0.7.3.min"],function(){f&&(southWest=L.latLng(f[0],f[2]),northEast=L.latLng(f[1],f[3]),map_bounds=L.latLngBounds(southWest,northEast)),L.Icon.Default.imagePath="./static/oscar/img/map";{var a=L.map(b),h="https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png",i='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors',j=new L.TileLayer(h,{minZoom:1,maxZoom:19,attribution:i}),k="http://otile{s}.mqcdn.com/tiles/1.0.0/map/{z}/{x}/{y}.jpg",l='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="http://developer.mapquest.com/content/osm/mq_logo.png">',m=new L.TileLayer(k,{minZoom:1,maxZoom:18,subdomains:"1234",attribution:l}),n="http://otile{s}.mqcdn.com/tiles/1.0.0/sat/{z}/{x}/{y}.jpg",o='Map data © <a href="https://openstreetmap.org">OpenStreetMap</a> contributors | Tiles Courtesy of <a href="http://www.mapquest.com/" target="_blank">MapQuest</a> <img src="https://developer.mapquest.com/content/osm/mq_logo.png"> | Portions Courtesy NASA/JPL-Caltech and U.S. Depart. of Agriculture, Farm Service Agency';new L.TileLayer(n,{minZoom:1,maxZoom:11,subdomains:"1234",attribution:o})}map_bounds?setTimeout(function(){a.fitBounds(map_bounds,{maxZoom:17})},0):c&&d&&(e?a.setView(new L.LatLng(d,c),e):a.setView(new L.LatLng(d,c),8)),a.addLayer(m);var p={"OSM Mapnik":j,MapQuest:m};L.control.layers(p).addTo(a),g&&L.geoJson(g).addTo(a)}),$(this).off(a)})}); \ No newline at end of file
diff --git a/searx/static/oscar/js/searx_src/00_requirejs_config.js b/searx/static/oscar/js/searx_src/00_requirejs_config.js
index 36767843b..99ec4b585 100644
--- a/searx/static/oscar/js/searx_src/00_requirejs_config.js
+++ b/searx/static/oscar/js/searx_src/00_requirejs_config.js
@@ -16,7 +16,7 @@
*/
requirejs.config({
- baseUrl: '/static/oscar/js',
+ baseUrl: './static/oscar/js',
paths: {
app: '../app'
}
diff --git a/searx/static/oscar/js/searx_src/element_modifiers.js b/searx/static/oscar/js/searx_src/element_modifiers.js
index 088bd7a46..dd45b77e0 100644
--- a/searx/static/oscar/js/searx_src/element_modifiers.js
+++ b/searx/static/oscar/js/searx_src/element_modifiers.js
@@ -63,4 +63,25 @@ $(document).ready(function(){
$(this).toggleClass(btnClass);
$(this).toggleClass('btn-default');
});
+
+ /**
+ * Select or deselect every categories on double clic
+ */
+ $(".btn-sm").dblclick(function() {
+ var btnClass = 'btn-' + $(this).data('btn-class'); // primary
+ if($(this).hasClass('btn-default')) {
+ $(".btn-sm > input").attr('checked', 'checked');
+ $(".btn-sm > input").prop("checked", true);
+ $(".btn-sm").addClass(btnClass);
+ $(".btn-sm").addClass('active');
+ $(".btn-sm").removeClass('btn-default');
+ } else {
+ $(".btn-sm > input").attr('checked', '');
+ $(".btn-sm > input").removeAttr('checked');
+ $(".btn-sm > input").checked = false;
+ $(".btn-sm").removeClass(btnClass);
+ $(".btn-sm").removeClass('active');
+ $(".btn-sm").addClass('btn-default');
+ }
+ });
});
diff --git a/searx/static/oscar/js/searx_src/leaflet_map.js b/searx/static/oscar/js/searx_src/leaflet_map.js
index 88af1e712..b5112ef5e 100644
--- a/searx/static/oscar/js/searx_src/leaflet_map.js
+++ b/searx/static/oscar/js/searx_src/leaflet_map.js
@@ -116,7 +116,7 @@ $(document).ready(function(){
// TODO hack
// change default imagePath
- L.Icon.Default.imagePath = "/static/oscar/img/map";
+ L.Icon.Default.imagePath = "./static/oscar/img/map";
// init map
var map = L.map(leaflet_target);
diff --git a/searx/webapp.py b/searx/webapp.py
index a2a135e9a..915fb3564 100644
--- a/searx/webapp.py
+++ b/searx/webapp.py
@@ -41,15 +41,12 @@ from searx.utils import (
UnicodeWriter, highlight_content, html_to_text, get_themes
)
from searx.version import VERSION_STRING
-from searx.https_rewrite import https_rules
from searx.languages import language_codes
+from searx.https_rewrite import https_url_rewrite
from searx.search import Search
from searx.query import Query
from searx.autocomplete import backends as autocomplete_backends
-from urlparse import urlparse
-import re
-
static_path, templates_path, themes =\
get_themes(settings['themes_path']
@@ -215,59 +212,7 @@ def index():
if settings['server']['https_rewrite']\
and result['parsed_url'].scheme == 'http':
- skip_https_rewrite = False
-
- # check if HTTPS rewrite is possible
- for target, rules, exclusions in https_rules:
-
- # check if target regex match with url
- if target.match(result['url']):
- # process exclusions
- for exclusion in exclusions:
- # check if exclusion match with url
- if exclusion.match(result['url']):
- skip_https_rewrite = True
- break
-
- # skip https rewrite if required
- if skip_https_rewrite:
- break
-
- # process rules
- for rule in rules:
- try:
- # TODO, precompile rule
- p = re.compile(rule[0])
-
- # rewrite url if possible
- new_result_url = p.sub(rule[1], result['url'])
- except:
- break
-
- # parse new url
- new_parsed_url = urlparse(new_result_url)
-
- # continiue if nothing was rewritten
- if result['url'] == new_result_url:
- continue
-
- # get domainname from result
- # TODO, does only work correct with TLD's like
- # asdf.com, not for asdf.com.de
- # TODO, using publicsuffix instead of this rewrite rule
- old_result_domainname = '.'.join(
- result['parsed_url'].hostname.split('.')[-2:])
- new_result_domainname = '.'.join(
- new_parsed_url.hostname.split('.')[-2:])
-
- # check if rewritten hostname is the same,
- # to protect against wrong or malicious rewrite rules
- if old_result_domainname == new_result_domainname:
- # set new url
- result['url'] = new_result_url
-
- # target has matched, do not search over the other rules
- break
+ result = https_url_rewrite(result)
if search.request_data.get('format', 'html') == 'html':
if 'content' in result: