summaryrefslogtreecommitdiff
path: root/searx/engines/__init__.py
diff options
context:
space:
mode:
authorAdam Tauber <asciimoo@gmail.com>2014-07-07 13:59:27 +0200
committerAdam Tauber <asciimoo@gmail.com>2014-07-07 13:59:27 +0200
commitb0ba367a1a575fbbd3d847bdbe39b6920d2f46e4 (patch)
tree096246e40b1f9b651322b7c97b25333ceedd1979 /searx/engines/__init__.py
parenta07b2b514c38cff031e0e36b99878a6041873842 (diff)
[enh][mod] search refactor
Diffstat (limited to 'searx/engines/__init__.py')
-rw-r--r--searx/engines/__init__.py163
1 files changed, 1 insertions, 162 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py
index 20a34c153..82c9407a2 100644
--- a/searx/engines/__init__.py
+++ b/searx/engines/__init__.py
@@ -19,19 +19,12 @@ along with searx. If not, see < http://www.gnu.org/licenses/ >.
from os.path import realpath, dirname, splitext, join
import sys
from imp import load_source
-from itertools import izip_longest, chain
-from operator import itemgetter
-from urlparse import urlparse, unquote
-from datetime import datetime
-import grequests
from flask.ext.babel import gettext
+from operator import itemgetter
from searx import settings
-from searx.utils import gen_useragent
engine_dir = dirname(realpath(__file__))
-number_of_searches = 0
-
engines = {}
categories = {'general': []}
@@ -114,160 +107,6 @@ for engine_data in settings['engines']:
engine_shortcuts[engine.shortcut] = engine.name
-def default_request_params():
- return {
- 'method': 'GET', 'headers': {}, 'data': {}, 'url': '', 'cookies': {}}
-
-
-def make_callback(engine_name, results, suggestions, callback, params):
- # creating a callback wrapper for the search engine results
- def process_callback(response, **kwargs):
- cb_res = []
- response.search_params = params
- engines[engine_name].stats['page_load_time'] += \
- (datetime.now() - params['started']).total_seconds()
- try:
- search_results = callback(response)
- except Exception, e:
- engines[engine_name].stats['errors'] += 1
- results[engine_name] = cb_res
- print '[E] Error with engine "{0}":\n\t{1}'.format(
- engine_name, str(e))
- return
- for result in search_results:
- result['engine'] = engine_name
- if 'suggestion' in result:
- # TODO type checks
- suggestions.add(result['suggestion'])
- continue
- cb_res.append(result)
- results[engine_name] = cb_res
- return process_callback
-
-
-def score_results(results):
- flat_res = filter(
- None, chain.from_iterable(izip_longest(*results.values())))
- flat_len = len(flat_res)
- engines_len = len(results)
- results = []
- # deduplication + scoring
- for i, res in enumerate(flat_res):
-
- res['parsed_url'] = urlparse(res['url'])
-
- res['host'] = res['parsed_url'].netloc
-
- if res['host'].startswith('www.'):
- res['host'] = res['host'].replace('www.', '', 1)
-
- res['engines'] = [res['engine']]
- weight = 1.0
-
- if hasattr(engines[res['engine']], 'weight'):
- weight = float(engines[res['engine']].weight)
-
- score = int((flat_len - i) / engines_len) * weight + 1
- duplicated = False
-
- for new_res in results:
- p1 = res['parsed_url'].path[:-1] if res['parsed_url'].path.endswith('/') else res['parsed_url'].path # noqa
- p2 = new_res['parsed_url'].path[:-1] if new_res['parsed_url'].path.endswith('/') else new_res['parsed_url'].path # noqa
- if res['host'] == new_res['host'] and\
- unquote(p1) == unquote(p2) and\
- res['parsed_url'].query == new_res['parsed_url'].query and\
- res.get('template') == new_res.get('template'):
- duplicated = new_res
- break
- if duplicated:
- if res.get('content') > duplicated.get('content'):
- duplicated['content'] = res['content']
- duplicated['score'] += score
- duplicated['engines'].append(res['engine'])
- if duplicated['parsed_url'].scheme == 'https':
- continue
- elif res['parsed_url'].scheme == 'https':
- duplicated['url'] = res['parsed_url'].geturl()
- duplicated['parsed_url'] = res['parsed_url']
- else:
- res['score'] = score
- results.append(res)
- return sorted(results, key=itemgetter('score'), reverse=True)
-
-
-def search(query, request, selected_engines, pageno=1, lang='all'):
- global engines, categories, number_of_searches
- requests = []
- results = {}
- suggestions = set()
- number_of_searches += 1
- #user_agent = request.headers.get('User-Agent', '')
- user_agent = gen_useragent()
-
- for selected_engine in selected_engines:
- if selected_engine['name'] not in engines:
- continue
-
- engine = engines[selected_engine['name']]
-
- if pageno > 1 and not engine.paging:
- continue
-
- if lang != 'all' and not engine.language_support:
- continue
-
- request_params = default_request_params()
- request_params['headers']['User-Agent'] = user_agent
- request_params['category'] = selected_engine['category']
- request_params['started'] = datetime.now()
- request_params['pageno'] = pageno
- request_params['language'] = lang
- request_params = engine.request(query.encode('utf-8'), request_params)
-
- if request_params['url'] is None:
- # TODO add support of offline engines
- pass
-
- callback = make_callback(
- selected_engine['name'],
- results,
- suggestions,
- engine.response,
- request_params
- )
-
- request_args = dict(
- headers=request_params['headers'],
- hooks=dict(response=callback),
- cookies=request_params['cookies'],
- timeout=engine.timeout
- )
-
- if request_params['method'] == 'GET':
- req = grequests.get
- else:
- req = grequests.post
- request_args['data'] = request_params['data']
-
- # ignoring empty urls
- if not request_params['url']:
- continue
-
- requests.append(req(request_params['url'], **request_args))
- grequests.map(requests)
- for engine_name, engine_results in results.items():
- engines[engine_name].stats['search_count'] += 1
- engines[engine_name].stats['result_count'] += len(engine_results)
-
- results = score_results(results)
-
- for result in results:
- for res_engine in result['engines']:
- engines[result['engine']].stats['score_count'] += result['score']
-
- return results, suggestions
-
-
def get_engines_stats():
# TODO refactor
pageloads = []