diff options
| author | pw3t <romain@berthor.fr> | 2014-01-23 22:11:36 +0100 |
|---|---|---|
| committer | pw3t <romain@berthor.fr> | 2014-01-23 22:11:36 +0100 |
| commit | 132681b3aaf5b330d9d19624038b51fe2ebfd8d5 (patch) | |
| tree | 393114f41b487eea4b71dd4073903726310a1257 /searx/engines | |
| parent | d6b017efb5b51623a02c85690c7335cfc6674092 (diff) | |
| parent | 59eeeaab87951fd6fa3302ec240db98902a20b2c (diff) | |
Merge branch 'master' of https://github.com/asciimoo/searx
Diffstat (limited to 'searx/engines')
| -rw-r--r-- | searx/engines/__init__.py | 122 | ||||
| -rw-r--r-- | searx/engines/bing.py | 5 | ||||
| -rw-r--r-- | searx/engines/currency_convert.py | 35 | ||||
| -rw-r--r-- | searx/engines/dailymotion.py | 13 | ||||
| -rw-r--r-- | searx/engines/deviantart.py | 8 | ||||
| -rw-r--r-- | searx/engines/duckduckgo.py | 12 | ||||
| -rw-r--r-- | searx/engines/duckduckgo_definitions.py | 12 | ||||
| -rw-r--r-- | searx/engines/filecrop.py | 25 | ||||
| -rw-r--r-- | searx/engines/flickr.py | 10 | ||||
| -rw-r--r-- | searx/engines/github.py | 7 | ||||
| -rw-r--r-- | searx/engines/google_images.py | 10 | ||||
| -rw-r--r-- | searx/engines/json_engine.py | 20 | ||||
| -rw-r--r-- | searx/engines/mediawiki.py | 8 | ||||
| -rw-r--r-- | searx/engines/piratebay.py | 27 | ||||
| -rw-r--r-- | searx/engines/soundcloud.py | 7 | ||||
| -rw-r--r-- | searx/engines/stackoverflow.py | 4 | ||||
| -rw-r--r-- | searx/engines/startpage.py | 6 | ||||
| -rw-r--r-- | searx/engines/twitter.py | 11 | ||||
| -rw-r--r-- | searx/engines/vimeo.py | 27 | ||||
| -rw-r--r-- | searx/engines/xpath.py | 29 | ||||
| -rw-r--r-- | searx/engines/yacy.py | 6 | ||||
| -rw-r--r-- | searx/engines/youtube.py | 14 |
22 files changed, 269 insertions, 149 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py index 457af4cda..96b074ae9 100644 --- a/searx/engines/__init__.py +++ b/searx/engines/__init__.py @@ -26,6 +26,7 @@ from searx import settings from searx.utils import gen_useragent import sys from datetime import datetime +from flask.ext.babel import gettext engine_dir = dirname(realpath(__file__)) @@ -35,6 +36,7 @@ engines = {} categories = {'general': []} + def load_module(filename): modname = splitext(filename)[0] if modname in sys.modules: @@ -50,7 +52,7 @@ if not 'engines' in settings or not settings['engines']: for engine_data in settings['engines']: engine_name = engine_data['engine'] - engine = load_module(engine_name+'.py') + engine = load_module(engine_name + '.py') for param_name in engine_data: if param_name == 'engine': continue @@ -58,38 +60,50 @@ for engine_data in settings['engines']: if engine_data['categories'] == 'none': engine.categories = [] else: - engine.categories = map(str.strip, engine_data['categories'].split(',')) + engine.categories = map( + str.strip, engine_data['categories'].split(',')) continue setattr(engine, param_name, engine_data[param_name]) for engine_attr in dir(engine): if engine_attr.startswith('_'): continue - if getattr(engine, engine_attr) == None: - print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr) + if getattr(engine, engine_attr) is None: + print '[E] Engine config error: Missing attribute "{0}.{1}"'.format(engine.name, engine_attr) # noqa sys.exit(1) engines[engine.name] = engine - engine.stats = {'result_count': 0, 'search_count': 0, 'page_load_time': 0, 'score_count': 0, 'errors': 0} + engine.stats = { + 'result_count': 0, + 'search_count': 0, + 'page_load_time': 0, + 'score_count': 0, + 'errors': 0 + } if hasattr(engine, 'categories'): for category_name in engine.categories: categories.setdefault(category_name, []).append(engine) else: categories['general'].append(engine) + def default_request_params(): - return {'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}} + return { + 'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}} + def make_callback(engine_name, results, suggestions, callback, params): # creating a callback wrapper for the search engine results def process_callback(response, **kwargs): cb_res = [] response.search_params = params - engines[engine_name].stats['page_load_time'] += (datetime.now() - params['started']).total_seconds() + engines[engine_name].stats['page_load_time'] += \ + (datetime.now() - params['started']).total_seconds() try: search_results = callback(response) except Exception, e: engines[engine_name].stats['errors'] += 1 results[engine_name] = cb_res - print '[E] Error with engine "{0}":\n\t{1}'.format(engine_name, str(e)) + print '[E] Error with engine "{0}":\n\t{1}'.format( + engine_name, str(e)) return for result in search_results: result['engine'] = engine_name @@ -101,23 +115,25 @@ def make_callback(engine_name, results, suggestions, callback, params): results[engine_name] = cb_res return process_callback + def score_results(results): - flat_res = filter(None, chain.from_iterable(izip_longest(*results.values()))) + flat_res = filter( + None, chain.from_iterable(izip_longest(*results.values()))) flat_len = len(flat_res) engines_len = len(results) results = [] # deduplication + scoring - for i,res in enumerate(flat_res): + for i, res in enumerate(flat_res): res['parsed_url'] = urlparse(res['url']) res['engines'] = [res['engine']] weight = 1.0 if hasattr(engines[res['engine']], 'weight'): weight = float(engines[res['engine']].weight) - score = int((flat_len - i)/engines_len)*weight+1 + score = int((flat_len - i) / engines_len) * weight + 1 duplicated = False for new_res in results: - p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path - p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path + p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path # noqa + p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path # noqa if res['parsed_url'].netloc == new_res['parsed_url'].netloc and\ p1 == p2 and\ res['parsed_url'].query == new_res['parsed_url'].query and\ @@ -125,7 +141,7 @@ def score_results(results): duplicated = new_res break if duplicated: - if len(res.get('content', '')) > len(duplicated.get('content', '')): + if len(res.get('content', '')) > len(duplicated.get('content', '')): # noqa duplicated['content'] = res['content'] duplicated['score'] += score duplicated['engines'].append(res['engine']) @@ -139,6 +155,7 @@ def score_results(results): results.append(res) return sorted(results, key=itemgetter('score'), reverse=True) + def search(query, request, selected_engines): global engines, categories, number_of_searches requests = [] @@ -160,13 +177,20 @@ def search(query, request, selected_engines): request_params['started'] = datetime.now() request_params = engine.request(query, request_params) - callback = make_callback(selected_engine['name'], results, suggestions, engine.response, request_params) - - request_args = dict(headers = request_params['headers'] - ,hooks = dict(response=callback) - ,cookies = request_params['cookies'] - ,timeout = settings['server']['request_timeout'] - ) + callback = make_callback( + selected_engine['name'], + results, + suggestions, + engine.response, + request_params + ) + + request_args = dict( + headers=request_params['headers'], + hooks=dict(response=callback), + cookies=request_params['cookies'], + timeout=settings['server']['request_timeout'] + ) if request_params['method'] == 'GET': req = grequests.get @@ -180,7 +204,7 @@ def search(query, request, selected_engines): requests.append(req(request_params['url'], **request_args)) grequests.map(requests) - for engine_name,engine_results in results.items(): + for engine_name, engine_results in results.items(): engines[engine_name].stats['search_count'] += 1 engines[engine_name].stats['result_count'] += len(engine_results) @@ -192,6 +216,7 @@ def search(query, request, selected_engines): return results, suggestions + def get_engines_stats(): # TODO refactor pageloads = [] @@ -200,14 +225,15 @@ def get_engines_stats(): errors = [] scores_per_result = [] - max_pageload = max_results = max_score = max_errors = max_score_per_result = 0 + max_pageload = max_results = max_score = max_errors = max_score_per_result = 0 # noqa for engine in engines.values(): if engine.stats['search_count'] == 0: continue - results_num = engine.stats['result_count']/float(engine.stats['search_count']) - load_times = engine.stats['page_load_time']/float(engine.stats['search_count']) + results_num = \ + engine.stats['result_count'] / float(engine.stats['search_count']) + load_times = engine.stats['page_load_time'] / float(engine.stats['search_count']) # noqa if results_num: - score = engine.stats['score_count'] / float(engine.stats['search_count']) + score = engine.stats['score_count'] / float(engine.stats['search_count']) # noqa score_per_result = score / results_num else: score = score_per_result = 0.0 @@ -220,30 +246,48 @@ def get_engines_stats(): results.append({'avg': results_num, 'name': engine.name}) scores.append({'avg': score, 'name': engine.name}) errors.append({'avg': engine.stats['errors'], 'name': engine.name}) - scores_per_result.append({'avg': score_per_result, 'name': engine.name}) + scores_per_result.append({ + 'avg': score_per_result, + 'name': engine.name + }) for engine in pageloads: - engine['percentage'] = int(engine['avg']/max_pageload*100) + engine['percentage'] = int(engine['avg'] / max_pageload * 100) for engine in results: - engine['percentage'] = int(engine['avg']/max_results*100) + engine['percentage'] = int(engine['avg'] / max_results * 100) for engine in scores: - engine['percentage'] = int(engine['avg']/max_score*100) + engine['percentage'] = int(engine['avg'] / max_score * 100) for engine in scores_per_result: - engine['percentage'] = int(engine['avg']/max_score_per_result*100) + engine['percentage'] = int(engine['avg'] / max_score_per_result * 100) for engine in errors: if max_errors: - engine['percentage'] = int(float(engine['avg'])/max_errors*100) + engine['percentage'] = int(float(engine['avg']) / max_errors * 100) else: engine['percentage'] = 0 - - return [('Page loads (sec)', sorted(pageloads, key=itemgetter('avg'))) - ,('Number of results', sorted(results, key=itemgetter('avg'), reverse=True)) - ,('Scores', sorted(scores, key=itemgetter('avg'), reverse=True)) - ,('Scores per result', sorted(scores_per_result, key=itemgetter('avg'), reverse=True)) - ,('Errors', sorted(errors, key=itemgetter('avg'), reverse=True)) - ] + return [ + ( + gettext('Page loads (sec)'), + sorted(pageloads, key=itemgetter('avg')) + ), + ( + gettext('Number of results'), + sorted(results, key=itemgetter('avg'), reverse=True) + ), + ( + gettext('Scores'), + sorted(scores, key=itemgetter('avg'), reverse=True) + ), + ( + gettext('Scores per result'), + sorted(scores_per_result, key=itemgetter('avg'), reverse=True) + ), + ( + gettext('Errors'), + sorted(errors, key=itemgetter('avg'), reverse=True) + ), + ] diff --git a/searx/engines/bing.py b/searx/engines/bing.py index 6b0bf5a3f..c4b945633 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -4,11 +4,12 @@ from cgi import escape base_url = 'http://www.bing.com/' search_string = 'search?{query}' -locale = 'en-US' # see http://msdn.microsoft.com/en-us/library/dd251064.aspx +locale = 'en-US' # see http://msdn.microsoft.com/en-us/library/dd251064.aspx def request(query, params): - search_path = search_string.format(query=urlencode({'q': query, 'setmkt': locale})) + search_path = search_string.format( + query=urlencode({'q': query, 'setmkt': locale})) #if params['category'] == 'images': # params['url'] = base_url + 'images/' + search_path params['url'] = base_url + search_path diff --git a/searx/engines/currency_convert.py b/searx/engines/currency_convert.py index 358d6b67e..ce6b3b854 100644 --- a/searx/engines/currency_convert.py +++ b/searx/engines/currency_convert.py @@ -5,7 +5,8 @@ categories = [] url = 'http://finance.yahoo.com/d/quotes.csv?e=.csv&f=sl1d1t1&s={query}=X' weight = 100 -parser_re = re.compile(r'^\W*(\d+(?:\.\d+)?)\W*([a-z]{3})\W*(?:in)?\W*([a-z]{3})\W*$', re.I) +parser_re = re.compile(r'^\W*(\d+(?:\.\d+)?)\W*([a-z]{3})\W*(?:in)?\W*([a-z]{3})\W*$', re.I) # noqa + def request(query, params): m = parser_re.match(query) @@ -19,7 +20,7 @@ def request(query, params): # wrong params return params - q = (from_currency+to_currency).upper() + q = (from_currency + to_currency).upper() params['url'] = url.format(query=q) params['ammount'] = ammount @@ -33,25 +34,29 @@ def response(resp): global base_url results = [] try: - _,conversion_rate,_ = resp.text.split(',', 2) + _, conversion_rate, _ = resp.text.split(',', 2) conversion_rate = float(conversion_rate) except: return results - title = '{0} {1} in {2} is {3}'.format(resp.search_params['ammount'] - ,resp.search_params['from'] - ,resp.search_params['to'] - ,resp.search_params['ammount']*conversion_rate - ) + title = '{0} {1} in {2} is {3}'.format( + resp.search_params['ammount'], + resp.search_params['from'], + resp.search_params['to'], + resp.search_params['ammount'] * conversion_rate + ) - content = '1 {0} is {1} {2}'.format(resp.search_params['from'], conversion_rate, resp.search_params['to']) + content = '1 {0} is {1} {2}'.format(resp.search_params['from'], + conversion_rate, + resp.search_params['to']) now_date = datetime.now().strftime('%Y%m%d') - url = 'http://finance.yahoo.com/currency/converter-results/{0}/{1}-{2}-to-{3}.html' - url = url.format(now_date - ,resp.search_params['ammount'] - ,resp.search_params['from'].lower() - ,resp.search_params['to'].lower() - ) + url = 'http://finance.yahoo.com/currency/converter-results/{0}/{1}-{2}-to-{3}.html' # noqa + url = url.format( + now_date, + resp.search_params['ammount'], + resp.search_params['from'].lower(), + resp.search_params['to'].lower() + ) results.append({'title': title, 'content': content, 'url': url}) return results diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py index 655485957..510dbbfae 100644 --- a/searx/engines/dailymotion.py +++ b/searx/engines/dailymotion.py @@ -1,17 +1,21 @@ from urllib import urlencode from lxml import html from json import loads -from cgi import escape categories = ['videos'] locale = 'en_US' # see http://www.dailymotion.com/doc/api/obj-video.html -search_url = 'https://api.dailymotion.com/videos?fields=title,description,duration,url,thumbnail_360_url&sort=relevance&limit=25&page=1&{query}' +search_url = 'https://api.dailymotion.com/videos?fields=title,description,duration,url,thumbnail_360_url&sort=relevance&limit=25&page=1&{query}' # noqa + +# TODO use video result template +content_tpl = '<a href="{0}" title="{0}" ><img src="{1}" /></a><br />' + def request(query, params): global search_url - params['url'] = search_url.format(query=urlencode({'search': query, 'localization': locale })) + params['url'] = search_url.format( + query=urlencode({'search': query, 'localization': locale})) return params @@ -24,7 +28,7 @@ def response(resp): title = res['title'] url = res['url'] if res['thumbnail_360_url']: - content = '<a href="{0}" title="{0}" ><img src="{1}" /></a><br />'.format(url, res['thumbnail_360_url']) + content = content_tpl.format(url, res['thumbnail_360_url']) else: content = '' if res['description']: @@ -33,6 +37,7 @@ def response(resp): results.append({'url': url, 'title': title, 'content': content}) return results + def text_content_from_html(html_string): desc_html = html.fragment_fromstring(html_string, create_parent=True) return desc_html.text_content() diff --git a/searx/engines/deviantart.py b/searx/engines/deviantart.py index 9a4a8abde..94a94bf16 100644 --- a/searx/engines/deviantart.py +++ b/searx/engines/deviantart.py @@ -7,6 +7,7 @@ categories = ['images'] base_url = 'https://www.deviantart.com/' search_url = base_url+'search?' + def request(query, params): global search_url params['url'] = search_url + urlencode({'q': query}) @@ -22,8 +23,11 @@ def response(resp): for result in dom.xpath('//div[contains(@class, "tt-a tt-fh")]'): link = result.xpath('.//a[contains(@class, "thumb")]')[0] url = urljoin(base_url, link.attrib.get('href')) - title_links = result.xpath('.//span[@class="details"]//a[contains(@class, "t")]') + title_links = result.xpath('.//span[@class="details"]//a[contains(@class, "t")]') # noqa title = ''.join(title_links[0].xpath('.//text()')) img_src = link.xpath('.//img')[0].attrib['src'] - results.append({'url': url, 'title': title, 'img_src': img_src, 'template': 'images.html'}) + results.append({'url': url, + 'title': title, + 'img_src': img_src, + 'template': 'images.html'}) return results diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index 4bf770972..7cae87d95 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -6,8 +6,11 @@ url = 'https://duckduckgo.com/' search_url = url + 'd.js?{query}&p=1&s=0' locale = 'us-en' + def request(query, params): - params['url'] = search_url.format(query=urlencode({'q': query, 'l': locale})) + q = urlencode({'q': query, + 'l': locale}) + params['url'] = search_url.format(query=q) return params @@ -17,8 +20,7 @@ def response(resp): for r in search_res: if not r.get('t'): continue - results.append({'title': r['t'] - ,'content': html_to_text(r['a']) - ,'url': r['u'] - }) + results.append({'title': r['t'], + 'content': html_to_text(r['a']), + 'url': r['u']}) return results diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py index 7b3950b85..3037aae53 100644 --- a/searx/engines/duckduckgo_definitions.py +++ b/searx/engines/duckduckgo_definitions.py @@ -3,8 +3,9 @@ from urllib import urlencode url = 'http://api.duckduckgo.com/?{query}&format=json&pretty=0&no_redirect=1' + def request(query, params): - params['url'] = url.format(query=urlencode({'q': query})) + params['url'] = url.format(query=urlencode({'q': query})) return params @@ -13,11 +14,10 @@ def response(resp): results = [] if 'Definition' in search_res: if search_res.get('AbstractURL'): - res = {'title' : search_res.get('Heading', '') - ,'content' : search_res.get('Definition', '') - ,'url' : search_res.get('AbstractURL', '') - ,'class' : 'definition_result' - } + res = {'title': search_res.get('Heading', ''), + 'content': search_res.get('Definition', ''), + 'url': search_res.get('AbstractURL', ''), + 'class': 'definition_result'} results.append(res) return results diff --git a/searx/engines/filecrop.py b/searx/engines/filecrop.py index 52426b84a..81340e601 100644 --- a/searx/engines/filecrop.py +++ b/searx/engines/filecrop.py @@ -2,7 +2,8 @@ from urllib import urlencode from HTMLParser import HTMLParser url = 'http://www.filecrop.com/' -search_url = url + '/search.php?{query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1' +search_url = url + '/search.php?{query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1' # noqa + class FilecropResultParser(HTMLParser): def __init__(self): @@ -18,22 +19,28 @@ class FilecropResultParser(HTMLParser): def handle_starttag(self, tag, attrs): if tag == 'tr': - if ('bgcolor', '#edeff5') in attrs or ('bgcolor', '#ffffff') in attrs: + if ('bgcolor', '#edeff5') in attrs or\ + ('bgcolor', '#ffffff') in attrs: self.__start_processing = True if not self.__start_processing: return if tag == 'label': - self.result['title'] = [attr[1] for attr in attrs if attr[0] == 'title'][0] - elif tag == 'a' and ('rel', 'nofollow') in attrs and ('class', 'sourcelink') in attrs: + self.result['title'] = [attr[1] for attr in attrs + if attr[0] == 'title'][0] + elif tag == 'a' and ('rel', 'nofollow') in attrs\ + and ('class', 'sourcelink') in attrs: if 'content' in self.result: - self.result['content'] += [attr[1] for attr in attrs if attr[0] == 'title'][0] + self.result['content'] += [attr[1] for attr in attrs + if attr[0] == 'title'][0] else: - self.result['content'] = [attr[1] for attr in attrs if attr[0] == 'title'][0] + self.result['content'] = [attr[1] for attr in attrs + if attr[0] == 'title'][0] self.result['content'] += ' ' elif tag == 'a': - self.result['url'] = url + [attr[1] for attr in attrs if attr[0] == 'href'][0] + self.result['url'] = url + [attr[1] for attr in attrs + if attr[0] == 'href'][0] def handle_endtag(self, tag): if self.__start_processing is False: @@ -60,10 +67,12 @@ class FilecropResultParser(HTMLParser): self.data_counter += 1 + def request(query, params): - params['url'] = search_url.format(query=urlencode({'w' :query})) + params['url'] = search_url.format(query=urlencode({'w': query})) return params + def response(resp): parser = FilecropResultParser() parser.feed(resp.text) diff --git a/searx/engines/flickr.py b/searx/engines/flickr.py index a9832856d..d9554b99a 100644 --- a/searx/engines/flickr.py +++ b/searx/engines/flickr.py @@ -8,21 +8,27 @@ categories = ['images'] url = 'https://secure.flickr.com/' search_url = url+'search/?{query}' +results_xpath = '//div[@id="thumbnails"]//a[@class="rapidnofollow photo-click" and @data-track="photo-click"]' # noqa + def request(query, params): params['url'] = search_url.format(query=urlencode({'q': query})) return params + def response(resp): global base_url results = [] dom = html.fromstring(resp.text) - for result in dom.xpath('//div[@id="thumbnails"]//a[@class="rapidnofollow photo-click" and @data-track="photo-click"]'): + for result in dom.xpath(results_xpath): href = urljoin(url, result.attrib.get('href')) img = result.xpath('.//img')[0] title = img.attrib.get('alt', '') img_src = img.attrib.get('data-defer-src') if not img_src: continue - results.append({'url': href, 'title': title, 'img_src': img_src, 'template': 'images.html'}) + results.append({'url': href, + 'title': title, + 'img_src': img_src, + 'template': 'images.html'}) return results diff --git a/searx/engines/github.py b/searx/engines/github.py index b4baea6e8..be2cfe7c5 100644 --- a/searx/engines/github.py +++ b/searx/engines/github.py @@ -4,12 +4,15 @@ from cgi import escape categories = ['it'] -search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}' +search_url = 'https://api.github.com/search/repositories?sort=stars&order=desc&{query}' # noqa + +accept_header = 'application/vnd.github.preview.text-match+json' + def request(query, params): global search_url params['url'] = search_url.format(query=urlencode({'q': query})) - params['headers']['Accept'] = 'application/vnd.github.preview.text-match+json' + params['headers']['Accept'] = accept_header return params diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py index d828a9c4b..57e749265 100644 --- a/searx/engines/google_images.py +++ b/searx/engines/google_images.py @@ -6,12 +6,14 @@ from json import loads categories = ['images'] url = 'https://ajax.googleapis.com/' -search_url = url + 'ajax/services/search/images?v=1.0&start=0&rsz=large&safe=off&filter=off&{query}' +search_url = url + 'ajax/services/search/images?v=1.0&start=0&rsz=large&safe=off&filter=off&{query}' # noqa + def request(query, params): params['url'] = search_url.format(query=urlencode({'q': query})) return params + def response(resp): results = [] search_res = loads(resp.text) @@ -24,5 +26,9 @@ def response(resp): title = result['title'] if not result['url']: continue - results.append({'url': href, 'title': title, 'content': '', 'img_src': result['url'], 'template': 'images.html'}) + results.append({'url': href, + 'title': title, + 'content': '', + 'img_src': result['url'], + 'template': 'images.html'}) return results diff --git a/searx/engines/json_engine.py b/searx/engines/json_engine.py index 0386d53f7..e7cc808bb 100644 --- a/searx/engines/json_engine.py +++ b/searx/engines/json_engine.py @@ -2,12 +2,13 @@ from urllib import urlencode from json import loads from collections import Iterable -search_url = None -url_query = None +search_url = None +url_query = None content_query = None -title_query = None +title_query = None #suggestion_xpath = '' + def iterate(iterable): if type(iterable) == dict: it = iterable.iteritems() @@ -17,11 +18,15 @@ def iterate(iterable): for index, value in it: yield str(index), value + def is_iterable(obj): - if type(obj) == str: return False - if type(obj) == unicode: return False + if type(obj) == str: + return False + if type(obj) == unicode: + return False return isinstance(obj, Iterable) + def parse(query): q = [] for part in query.split('/'): @@ -31,6 +36,7 @@ def parse(query): q.append(part) return q + def do_query(data, q): ret = [] if not len(q): @@ -38,7 +44,7 @@ def do_query(data, q): qkey = q[0] - for key,value in iterate(data): + for key, value in iterate(data): if len(q) == 1: if key == qkey: @@ -54,11 +60,13 @@ def do_query(data, q): ret.extend(do_query(value, q)) return ret + def query(data, query_string): q = parse(query_string) return do_query(data, q) + def request(query, params): query = urlencode({'q': query})[2:] params['url'] = search_url.format(query=query) diff --git a/searx/engines/mediawiki.py b/searx/engines/mediawiki.py index 00ad0f106..bc4aab6df 100644 --- a/searx/engines/mediawiki.py +++ b/searx/engines/mediawiki.py @@ -3,10 +3,12 @@ from urllib import urlencode, quote url = 'https://en.wikipedia.org/' +search_url = url + 'w/api.php?action=query&list=search&{query}&srprop=timestamp&format=json' # noqa + number_of_results = 10 + def request(query, params): - search_url = url + 'w/api.php?action=query&list=search&{query}&srprop=timestamp&format=json' params['url'] = search_url.format(query=urlencode({'srsearch': query})) return params @@ -14,7 +16,5 @@ def request(query, params): def response(resp): search_results = loads(resp.text) res = search_results.get('query', {}).get('search', []) - - return [{'url': url + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8')), + return [{'url': url + 'wiki/' + quote(result['title'].replace(' ', '_').encode('utf-8')), # noqa 'title': result['title']} for result in res[:int(number_of_results)]] - diff --git a/searx/engines/piratebay.py b/searx/engines/piratebay.py index 9cf410106..7319b49c1 100644 --- a/searx/engines/piratebay.py +++ b/searx/engines/piratebay.py @@ -7,13 +7,18 @@ categories = ['videos', 'music'] url = 'https://thepiratebay.se/' search_url = url + 'search/{search_term}/0/99/{search_type}' -search_types = {'videos': '200' - ,'music' : '100' - ,'files' : '0' - } +search_types = {'videos': '200', + 'music': '100', + 'files': '0'} + +magnet_xpath = './/a[@title="Download this torrent using magnet"]' +content_xpath = './/font[@class="detDesc"]//text()' + def request(query, params): - params['url'] = search_url.format(search_term=quote(query), search_type=search_types.get(params['category'])) + search_type = search_types.get(params['category']) + params['url'] = search_url.format(search_term=quote(query), + search_type=search_type) return params @@ -27,10 +32,14 @@ def response(resp): link = result.xpath('.//div[@class="detName"]//a')[0] href = urljoin(url, link.attrib.get('href')) title = ' '.join(link.xpath('.//text()')) - content = escape(' '.join(result.xpath('.//font[@class="detDesc"]//text()'))) + content = escape(' '.join(result.xpath(content_xpath))) seed, leech = result.xpath('.//td[@align="right"]/text()')[:2] - magnetlink = result.xpath('.//a[@title="Download this torrent using magnet"]')[0] - results.append({'url': href, 'title': title, 'content': content, - 'seed': seed, 'leech': leech, 'magnetlink': magnetlink.attrib['href'], + magnetlink = result.xpath(magnet_xpath)[0] + results.append({'url': href, + 'title': title, + 'content': content, + 'seed': seed, + 'leech': leech, + 'magnetlink': magnetlink.attrib['href'], 'template': 'torrent.html'}) return results diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py index 50414f153..b1930b2ee 100644 --- a/searx/engines/soundcloud.py +++ b/searx/engines/soundcloud.py @@ -5,7 +5,8 @@ categories = ['music'] guest_client_id = 'b45b1aa10f1ac2941910a7f0d10f8e28' url = 'https://api.soundcloud.com/' -search_url = url + 'search?{query}&facet=model&limit=20&offset=0&linked_partitioning=1&client_id='+guest_client_id +search_url = url + 'search?{query}&facet=model&limit=20&offset=0&linked_partitioning=1&client_id='+guest_client_id # noqa + def request(query, params): global search_url @@ -21,5 +22,7 @@ def response(resp): if result['kind'] in ('track', 'playlist'): title = result['title'] content = result['description'] - results.append({'url': result['permalink_url'], 'title': title, 'content': content}) + results.append({'url': result['permalink_url'], + 'title': title, + 'content': content}) return results diff --git a/searx/engines/stackoverflow.py b/searx/engines/stackoverflow.py index 9ee89bc6e..35230600f 100644 --- a/searx/engines/stackoverflow.py +++ b/searx/engines/stackoverflow.py @@ -7,6 +7,8 @@ categories = ['it'] url = 'http://stackoverflow.com/' search_url = url+'search?' +result_xpath = './/div[@class="excerpt"]//text()' + def request(query, params): params['url'] = search_url + urlencode({'q': query}) @@ -20,6 +22,6 @@ def response(resp): link = result.xpath('.//div[@class="result-link"]//a')[0] href = urljoin(url, link.attrib.get('href')) title = escape(' '.join(link.xpath('.//text()'))) - content = escape(' '.join(result.xpath('.//div[@class="excerpt"]//text()'))) + content = escape(' '.join(result.xpath(result_xpath))) results.append({'url': href, 'title': title, 'content': content}) return results diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py index 87c091e2d..d6d7cf44d 100644 --- a/searx/engines/startpage.py +++ b/searx/engines/startpage.py @@ -1,11 +1,10 @@ from urllib import urlencode from lxml import html -from urlparse import urlparse -from cgi import escape base_url = 'https://startpage.com/' search_url = base_url+'do/search' + def request(query, params): global search_url query = urlencode({'q': query})[2:] @@ -20,11 +19,10 @@ def response(resp): results = [] dom = html.fromstring(resp.content) # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"] - # not ads : div[@class="result"] are the direct childs of div[@id="results"] + # not ads: div[@class="result"] are the direct childs of div[@id="results"] for result in dom.xpath('//div[@id="results"]/div[@class="result"]'): link = result.xpath('.//h3/a')[0] url = link.attrib.get('href') - parsed_url = urlparse(url) title = link.text_content() content = result.xpath('./p[@class="desc"]')[0].text_content() results.append({'url': url, 'title': title, 'content': content}) diff --git a/searx/engines/twitter.py b/searx/engines/twitter.py index f9d9e26ad..23393ac4d 100644 --- a/searx/engines/twitter.py +++ b/searx/engines/twitter.py @@ -7,6 +7,9 @@ categories = ['social media'] base_url = 'https://twitter.com/' search_url = base_url+'search?' +title_xpath = './/span[@class="username js-action-profile-name"]//text()' +content_xpath = './/p[@class="js-tweet-text tweet-text"]//text()' + def request(query, params): global search_url @@ -21,7 +24,9 @@ def response(resp): for tweet in dom.xpath('//li[@data-item-type="tweet"]'): link = tweet.xpath('.//small[@class="time"]//a')[0] url = urljoin(base_url, link.attrib.get('href')) - title = ''.join(tweet.xpath('.//span[@class="username js-action-profile-name"]//text()')) - content = escape(''.join(tweet.xpath('.//p[@class="js-tweet-text tweet-text"]//text()'))) - results.append({'url': url, 'title': title, 'content': content}) + title = ''.join(tweet.xpath(title_xpath)) + content = escape(''.join(tweet.xpath(content_xpath))) + results.append({'url': url, + 'title': title, + 'content': content}) return results diff --git a/searx/engines/vimeo.py b/searx/engines/vimeo.py index 35bc3d50a..924497a99 100644 --- a/searx/engines/vimeo.py +++ b/searx/engines/vimeo.py @@ -5,27 +5,31 @@ from lxml import html base_url = 'http://vimeo.com' search_url = base_url + '/search?{query}' -url_xpath = None +url_xpath = None content_xpath = None -title_xpath = None +title_xpath = None results_xpath = '' +content_tpl = '<a href="{0}"> <img src="{2}"/> </a>' -# the cookie set by vimeo contains all the following values, but only __utma seems to be requiered +# the cookie set by vimeo contains all the following values, +# but only __utma seems to be requiered cookie = { #'vuid':'918282893.1027205400' # 'ab_bs':'%7B%223%22%3A279%7D' - '__utma':'00000000.000#0000000.0000000000.0000000000.0000000000.0' + '__utma': '00000000.000#0000000.0000000000.0000000000.0000000000.0' # '__utmb':'18302654.1.10.1388942090' #, '__utmc':'18302654' - #, '__utmz':'18#302654.1388942090.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)' + #, '__utmz':'18#302654.1388942090.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)' # noqa #, '__utml':'search' } + def request(query, params): - params['url'] = search_url.format(query=urlencode({'q' :query})) + params['url'] = search_url.format(query=urlencode({'q': query})) params['cookies'] = cookie return params + def response(resp): results = [] dom = html.fromstring(resp.text) @@ -36,10 +40,9 @@ def response(resp): url = base_url + result.xpath(url_xpath)[0] title = p.unescape(extract_text(result.xpath(title_xpath))) thumbnail = extract_text(result.xpath(content_xpath)[0]) - content = '<a href="{0}"> <img src="{2}"/> </a>'.format(url, title, thumbnail) - results.append({'url': url - , 'title': title - , 'content': content - , 'template':'videos.html' - , 'thumbnail': thumbnail}) + results.append({'url': url, + 'title': title, + 'content': content_tpl.format(url, title, thumbnail), + 'template': 'videos.html', + 'thumbnail': thumbnail}) return results diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py index 5e2c3c38b..8960b5f21 100644 --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -1,21 +1,25 @@ from lxml import html from urllib import urlencode, unquote from urlparse import urlparse, urljoin -from cgi import escape from lxml.etree import _ElementStringResult +from searx.utils import html_to_text -search_url = None -url_xpath = None +search_url = None +url_xpath = None content_xpath = None -title_xpath = None +title_xpath = None suggestion_xpath = '' results_xpath = '' + ''' if xpath_results is list, extract the text from each result and concat the list -if xpath_results is a xml element, extract all the text node from it ( text_content() method from lxml ) +if xpath_results is a xml element, extract all the text node from it + ( text_content() method from lxml ) if xpath_results is a string element, then it's already done ''' + + def extract_text(xpath_results): if type(xpath_results) == list: # it's list of result : concat everything using recursive call @@ -30,7 +34,7 @@ def extract_text(xpath_results): return ''.join(xpath_results) else: # it's a element - return xpath_results.text_content() + return html_to_text(xpath_results.text_content()) def extract_url(xpath_results): @@ -60,7 +64,8 @@ def normalize_url(url): url += '/' # FIXME : hack for yahoo - if parsed_url.hostname == 'search.yahoo.com' and parsed_url.path.startswith('/r'): + if parsed_url.hostname == 'search.yahoo.com'\ + and parsed_url.path.startswith('/r'): p = parsed_url.path mark = p.find('/**') if mark != -1: @@ -82,15 +87,15 @@ def response(resp): if results_xpath: for result in dom.xpath(results_xpath): url = extract_url(result.xpath(url_xpath)) - title = extract_text(result.xpath(title_xpath)[0 ]) + title = extract_text(result.xpath(title_xpath)[0]) content = extract_text(result.xpath(content_xpath)[0]) results.append({'url': url, 'title': title, 'content': content}) else: for url, title, content in zip( - map(extract_url, dom.xpath(url_xpath)), \ - map(extract_text, dom.xpath(title_xpath)), \ - map(extract_text, dom.xpath(content_xpath)), \ - ): + map(extract_url, dom.xpath(url_xpath)), + map(extract_text, dom.xpath(title_xpath)), + map(extract_text, dom.xpath(content_xpath)) + ): results.append({'url': url, 'title': title, 'content': content}) if not suggestion_xpath: diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py index c93ac522f..a4a41ac3b 100644 --- a/searx/engines/yacy.py +++ b/searx/engines/yacy.py @@ -4,10 +4,12 @@ from urllib import urlencode url = 'http://localhost:8090' search_url = '/yacysearch.json?{query}&maximumRecords=10' + def request(query, params): - params['url'] = url + search_url.format(query=urlencode({'query':query})) + params['url'] = url + search_url.format(query=urlencode({'query': query})) return params + def response(resp): raw_search_results = loads(resp.text) @@ -25,7 +27,7 @@ def response(resp): tmp_result['content'] = '' if len(result['description']): - tmp_result['content'] += result['description'] +"<br/>" + tmp_result['content'] += result['description'] + "<br/>" if len(result['pubDate']): tmp_result['content'] += result['pubDate'] + "<br/>" diff --git a/searx/engines/youtube.py b/searx/engines/youtube.py index cefdb6536..62884702f 100644 --- a/searx/engines/youtube.py +++ b/searx/engines/youtube.py @@ -5,6 +5,7 @@ categories = ['videos'] search_url = 'https://gdata.youtube.com/feeds/api/videos?alt=json&{query}' + def request(query, params): params['url'] = search_url.format(query=urlencode({'q': query})) return params @@ -30,17 +31,16 @@ def response(resp): thumbnail = '' if len(result['media$group']['media$thumbnail']): thumbnail = result['media$group']['media$thumbnail'][0]['url'] - content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail) + content += '<a href="{0}" title="{0}" ><img src="{1}" /></a>'.format(url, thumbnail) # noqa if len(content): content += '<br />' + result['content']['$t'] else: content = result['content']['$t'] - results.append({'url': url - , 'title': title - , 'content': content - , 'template':'videos.html' - , 'thumbnail':thumbnail}) + results.append({'url': url, + 'title': title, + 'content': content, + 'template': 'videos.html', + 'thumbnail': thumbnail}) return results - |