diff options
Diffstat (limited to 'searx/engines')
42 files changed, 831 insertions, 509 deletions
diff --git a/searx/engines/__init__.py b/searx/engines/__init__.py index f32b57202..9ccef8b54 100644 --- a/searx/engines/__init__.py +++ b/searx/engines/__init__.py @@ -27,7 +27,7 @@ from json import loads from requests import get from searx import settings from searx import logger -from searx.utils import load_module, match_language +from searx.utils import load_module, match_language, get_engine_from_settings logger = logger.getChild('engines') @@ -53,15 +53,22 @@ engine_default_args = {'paging': False, 'disabled': False, 'suspend_end_time': 0, 'continuous_errors': 0, - 'time_range_support': False} + 'time_range_support': False, + 'offline': False, + 'tokens': []} def load_engine(engine_data): - - if '_' in engine_data['name']: - logger.error('Engine name conains underscore: "{}"'.format(engine_data['name'])) + engine_name = engine_data['name'] + if '_' in engine_name: + logger.error('Engine name contains underscore: "{}"'.format(engine_name)) sys.exit(1) + if engine_name.lower() != engine_name: + logger.warn('Engine name is not lowercase: "{}", converting to lowercase'.format(engine_name)) + engine_name = engine_name.lower() + engine_data['name'] = engine_name + engine_module = engine_data['engine'] try: @@ -123,14 +130,16 @@ def load_engine(engine_data): engine.stats = { 'result_count': 0, 'search_count': 0, - 'page_load_time': 0, - 'page_load_count': 0, 'engine_time': 0, 'engine_time_count': 0, 'score_count': 0, 'errors': 0 } + if not engine.offline: + engine.stats['page_load_time'] = 0 + engine.stats['page_load_count'] = 0 + for category_name in engine.categories: categories.setdefault(category_name, []).append(engine) @@ -152,7 +161,7 @@ def to_percentage(stats, maxvalue): return stats -def get_engines_stats(): +def get_engines_stats(preferences): # TODO refactor pageloads = [] engine_times = [] @@ -163,16 +172,15 @@ def get_engines_stats(): max_pageload = max_engine_times = max_results = max_score = max_errors = max_score_per_result = 0 # noqa for engine in engines.values(): + if not preferences.validate_token(engine): + continue + if engine.stats['search_count'] == 0: continue + results_num = \ engine.stats['result_count'] / float(engine.stats['search_count']) - if engine.stats['page_load_count'] != 0: - load_times = engine.stats['page_load_time'] / float(engine.stats['page_load_count']) # noqa - else: - load_times = 0 - if engine.stats['engine_time_count'] != 0: this_engine_time = engine.stats['engine_time'] / float(engine.stats['engine_time_count']) # noqa else: @@ -184,14 +192,19 @@ def get_engines_stats(): else: score = score_per_result = 0.0 - max_pageload = max(load_times, max_pageload) + if not engine.offline: + load_times = 0 + if engine.stats['page_load_count'] != 0: + load_times = engine.stats['page_load_time'] / float(engine.stats['page_load_count']) # noqa + max_pageload = max(load_times, max_pageload) + pageloads.append({'avg': load_times, 'name': engine.name}) + max_engine_times = max(this_engine_time, max_engine_times) max_results = max(results_num, max_results) max_score = max(score, max_score) max_score_per_result = max(score_per_result, max_score_per_result) max_errors = max(max_errors, engine.stats['errors']) - pageloads.append({'avg': load_times, 'name': engine.name}) engine_times.append({'avg': this_engine_time, 'name': engine.name}) results.append({'avg': results_num, 'name': engine.name}) scores.append({'avg': score, 'name': engine.name}) @@ -248,12 +261,14 @@ def load_engines(engine_list): def initialize_engines(engine_list): load_engines(engine_list) + + def engine_init(engine_name, init_fn): + init_fn(get_engine_from_settings(engine_name)) + logger.debug('%s engine: Initialized', engine_name) + for engine_name, engine in engines.items(): if hasattr(engine, 'init'): init_fn = getattr(engine, 'init') - - def engine_init(): - init_fn() - logger.debug('%s engine initialized', engine_name) - logger.debug('Starting background initialization of %s engine', engine_name) - threading.Thread(target=engine_init).start() + if init_fn: + logger.debug('%s engine: Starting background initialization', engine_name) + threading.Thread(target=engine_init, args=(engine_name, init_fn)).start() diff --git a/searx/engines/arxiv.py b/searx/engines/arxiv.py index 5ef84f0c1..e3c871d17 100644 --- a/searx/engines/arxiv.py +++ b/searx/engines/arxiv.py @@ -17,6 +17,7 @@ from searx.url_utils import urlencode categories = ['science'] +paging = True base_url = 'http://export.arxiv.org/api/query?search_query=all:'\ + '{query}&start={offset}&max_results={number_of_results}' @@ -29,7 +30,7 @@ def request(query, params): # basic search offset = (params['pageno'] - 1) * number_of_results - string_args = dict(query=query, + string_args = dict(query=query.decode('utf-8'), offset=offset, number_of_results=number_of_results) diff --git a/searx/engines/bing.py b/searx/engines/bing.py index 742379c1a..b193f7c60 100644 --- a/searx/engines/bing.py +++ b/searx/engines/bing.py @@ -13,10 +13,14 @@ @todo publishedDate """ +import re from lxml import html +from searx import logger, utils from searx.engines.xpath import extract_text from searx.url_utils import urlencode -from searx.utils import match_language, gen_useragent +from searx.utils import match_language, gen_useragent, eval_xpath + +logger = logger.getChild('bing engine') # engine dependent config categories = ['general'] @@ -30,9 +34,13 @@ base_url = 'https://www.bing.com/' search_string = 'search?{query}&first={offset}' +def _get_offset_from_pageno(pageno): + return (pageno - 1) * 10 + 1 + + # do search-request def request(query, params): - offset = (params['pageno'] - 1) * 10 + 1 + offset = _get_offset_from_pageno(params.get('pageno', 0)) if params['language'] == 'all': lang = 'EN' @@ -47,29 +55,21 @@ def request(query, params): params['url'] = base_url + search_path - params['headers']['User-Agent'] = gen_useragent('Windows NT 6.3; WOW64') - return params # get response from search-request def response(resp): results = [] + result_len = 0 dom = html.fromstring(resp.text) - - try: - results.append({'number_of_results': int(dom.xpath('//span[@class="sb_count"]/text()')[0] - .split()[0].replace(',', ''))}) - except: - pass - # parse results - for result in dom.xpath('//div[@class="sa_cc"]'): - link = result.xpath('.//h3/a')[0] + for result in eval_xpath(dom, '//div[@class="sa_cc"]'): + link = eval_xpath(result, './/h3/a')[0] url = link.attrib.get('href') title = extract_text(link) - content = extract_text(result.xpath('.//p')) + content = extract_text(eval_xpath(result, './/p')) # append result results.append({'url': url, @@ -77,18 +77,34 @@ def response(resp): 'content': content}) # parse results again if nothing is found yet - for result in dom.xpath('//li[@class="b_algo"]'): - link = result.xpath('.//h2/a')[0] + for result in eval_xpath(dom, '//li[@class="b_algo"]'): + link = eval_xpath(result, './/h2/a')[0] url = link.attrib.get('href') title = extract_text(link) - content = extract_text(result.xpath('.//p')) + content = extract_text(eval_xpath(result, './/p')) # append result results.append({'url': url, 'title': title, 'content': content}) - # return results + try: + result_len_container = "".join(eval_xpath(dom, '//span[@class="sb_count"]//text()')) + if "-" in result_len_container: + # Remove the part "from-to" for paginated request ... + result_len_container = result_len_container[result_len_container.find("-") * 2 + 2:] + + result_len_container = re.sub('[^0-9]', '', result_len_container) + if len(result_len_container) > 0: + result_len = int(result_len_container) + except Exception as e: + logger.debug('result error :\n%s', e) + pass + + if result_len and _get_offset_from_pageno(resp.search_params.get("pageno", 0)) > result_len: + return [] + + results.append({'number_of_results': result_len}) return results @@ -96,9 +112,9 @@ def response(resp): def _fetch_supported_languages(resp): supported_languages = [] dom = html.fromstring(resp.text) - options = dom.xpath('//div[@id="limit-languages"]//input') + options = eval_xpath(dom, '//div[@id="limit-languages"]//input') for option in options: - code = option.xpath('./@id')[0].replace('_', '-') + code = eval_xpath(option, './@id')[0].replace('_', '-') if code == 'nb': code = 'no' supported_languages.append(code) diff --git a/searx/engines/bing_images.py b/searx/engines/bing_images.py index e2495200c..44e2c3bbc 100644 --- a/searx/engines/bing_images.py +++ b/searx/engines/bing_images.py @@ -10,9 +10,6 @@ @stable no (HTML can change) @parse url, title, img_src - @todo currently there are up to 35 images receive per page, - because bing does not parse count=10. - limited response to 10 images """ from lxml import html @@ -28,10 +25,15 @@ safesearch = True time_range_support = True language_support = True supported_languages_url = 'https://www.bing.com/account/general' +number_of_results = 28 # search-url base_url = 'https://www.bing.com/' -search_string = 'images/search?{query}&count=10&first={offset}' +search_string = 'images/search'\ + '?{query}'\ + '&count={count}'\ + '&first={first}'\ + '&FORM=IBASEP' time_range_string = '&qft=+filterui:age-lt{interval}' time_range_dict = {'day': '1440', 'week': '10080', @@ -44,16 +46,14 @@ safesearch_types = {2: 'STRICT', 0: 'OFF'} -_quote_keys_regex = re.compile('({|,)([a-z][a-z0-9]*):(")', re.I | re.U) - - # do search-request def request(query, params): - offset = (params['pageno'] - 1) * 10 + 1 + offset = ((params['pageno'] - 1) * number_of_results) + 1 search_path = search_string.format( query=urlencode({'q': query}), - offset=offset) + count=number_of_results, + first=offset) language = match_language(params['language'], supported_languages, language_aliases).lower() @@ -77,32 +77,31 @@ def response(resp): dom = html.fromstring(resp.text) # parse results - for result in dom.xpath('//div[@id="mmComponent_images_1"]/ul/li/div/div[@class="imgpt"]'): - link = result.xpath('./a')[0] - - # TODO find actual title - title = link.xpath('.//img/@alt')[0] - - # parse json-data (it is required to add a space, to make it parsable) - json_data = loads(_quote_keys_regex.sub(r'\1"\2": \3', link.attrib.get('m'))) - - url = json_data.get('purl') - img_src = json_data.get('murl') - thumbnail = json_data.get('turl') - - # append result - results.append({'template': 'images.html', - 'url': url, - 'title': title, - 'content': '', - 'thumbnail_src': thumbnail, - 'img_src': img_src}) - - # TODO stop parsing if 10 images are found - # if len(results) >= 10: - # break + for result in dom.xpath('//div[@class="imgpt"]'): + + img_format = result.xpath('./div[contains(@class, "img_info")]/span/text()')[0] + # Microsoft seems to experiment with this code so don't make the path too specific, + # just catch the text section for the first anchor in img_info assuming this to be + # the originating site. + source = result.xpath('./div[contains(@class, "img_info")]//a/text()')[0] + + try: + m = loads(result.xpath('./a/@m')[0]) + + # strip 'Unicode private use area' highlighting, they render to Tux + # the Linux penguin and a standing diamond on my machine... + title = m.get('t', '').replace(u'\ue000', '').replace(u'\ue001', '') + results.append({'template': 'images.html', + 'url': m['purl'], + 'thumbnail_src': m['turl'], + 'img_src': m['murl'], + 'content': '', + 'title': title, + 'source': source, + 'img_format': img_format}) + except: + continue - # return results return results diff --git a/searx/engines/bing_videos.py b/searx/engines/bing_videos.py index bf17f9168..f1e636819 100644 --- a/searx/engines/bing_videos.py +++ b/searx/engines/bing_videos.py @@ -13,7 +13,6 @@ from json import loads from lxml import html from searx.engines.bing_images import _fetch_supported_languages, supported_languages_url -from searx.engines.xpath import extract_text from searx.url_utils import urlencode from searx.utils import match_language @@ -22,11 +21,16 @@ categories = ['videos'] paging = True safesearch = True time_range_support = True -number_of_results = 10 +number_of_results = 28 language_support = True -search_url = 'https://www.bing.com/videos/asyncv2?{query}&async=content&'\ - 'first={offset}&count={number_of_results}&CW=1366&CH=25&FORM=R5VR5' +base_url = 'https://www.bing.com/' +search_string = 'videos/search'\ + '?{query}'\ + '&count={count}'\ + '&first={first}'\ + '&scope=video'\ + '&FORM=QBLH' time_range_string = '&qft=+filterui:videoage-lt{interval}' time_range_dict = {'day': '1440', 'week': '10080', @@ -41,7 +45,12 @@ safesearch_types = {2: 'STRICT', # do search-request def request(query, params): - offset = (params['pageno'] - 1) * 10 + 1 + offset = ((params['pageno'] - 1) * number_of_results) + 1 + + search_path = search_string.format( + query=urlencode({'q': query}), + count=number_of_results, + first=offset) # safesearch cookie params['cookies']['SRCHHPGUSR'] = \ @@ -52,9 +61,7 @@ def request(query, params): params['cookies']['_EDGE_S'] = 'mkt=' + language + '&F=1' # query and paging - params['url'] = search_url.format(query=urlencode({'q': query}), - offset=offset, - number_of_results=number_of_results) + params['url'] = base_url + search_path # time range if params['time_range'] in time_range_dict: @@ -70,19 +77,18 @@ def response(resp): dom = html.fromstring(resp.text) for result in dom.xpath('//div[@class="dg_u"]'): - url = result.xpath('./div[@class="mc_vtvc"]/a/@href')[0] - url = 'https://bing.com' + url - title = extract_text(result.xpath('./div/a/div/div[@class="mc_vtvc_title"]/@title')) - content = extract_text(result.xpath('./div/a/div/div/div/div/text()')) - thumbnail = result.xpath('./div/a/div/div/img/@src')[0] - - results.append({'url': url, - 'title': title, - 'content': content, - 'thumbnail': thumbnail, - 'template': 'videos.html'}) - - if len(results) >= number_of_results: - break + try: + metadata = loads(result.xpath('.//div[@class="vrhdata"]/@vrhm')[0]) + info = ' - '.join(result.xpath('.//div[@class="mc_vtvc_meta_block"]//span/text()')).strip() + content = '{0} - {1}'.format(metadata['du'], info) + thumbnail = '{0}th?id={1}'.format(base_url, metadata['thid']) + results.append({'url': metadata['murl'], + 'thumbnail': thumbnail, + 'title': metadata.get('vt', ''), + 'content': content, + 'template': 'videos.html'}) + + except: + continue return results diff --git a/searx/engines/btdigg.py b/searx/engines/btdigg.py index 40438673f..82eedc24b 100644 --- a/searx/engines/btdigg.py +++ b/searx/engines/btdigg.py @@ -1,7 +1,7 @@ """ BTDigg (Videos, Music, Files) - @website https://btdigg.org + @website https://btdig.com @provide-api yes (on demand) @using-api no @@ -21,7 +21,7 @@ categories = ['videos', 'music', 'files'] paging = True # search-url -url = 'https://btdigg.org' +url = 'https://btdig.com' search_url = url + '/search?q={search_term}&p={pageno}' @@ -39,7 +39,7 @@ def response(resp): dom = html.fromstring(resp.text) - search_res = dom.xpath('//div[@id="search_res"]/table/tr') + search_res = dom.xpath('//div[@class="one_result"]') # return empty array if nothing is found if not search_res: @@ -47,46 +47,39 @@ def response(resp): # parse results for result in search_res: - link = result.xpath('.//td[@class="torrent_name"]//a')[0] + link = result.xpath('.//div[@class="torrent_name"]//a')[0] href = urljoin(url, link.attrib.get('href')) title = extract_text(link) - content = extract_text(result.xpath('.//pre[@class="snippet"]')[0]) - content = "<br />".join(content.split("\n")) - filesize = result.xpath('.//span[@class="attr_val"]/text()')[0].split()[0] - filesize_multiplier = result.xpath('.//span[@class="attr_val"]/text()')[0].split()[1] - files = result.xpath('.//span[@class="attr_val"]/text()')[1] - seed = result.xpath('.//span[@class="attr_val"]/text()')[2] + excerpt = result.xpath('.//div[@class="torrent_excerpt"]')[0] + content = html.tostring(excerpt, encoding='unicode', method='text', with_tail=False) + # it is better to emit <br/> instead of |, but html tags are verboten + content = content.strip().replace('\n', ' | ') + content = ' '.join(content.split()) - # convert seed to int if possible - if seed.isdigit(): - seed = int(seed) - else: - seed = 0 - - leech = 0 + filesize = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[0] + filesize_multiplier = result.xpath('.//span[@class="torrent_size"]/text()')[0].split()[1] + files = (result.xpath('.//span[@class="torrent_files"]/text()') or ['1'])[0] # convert filesize to byte if possible filesize = get_torrent_size(filesize, filesize_multiplier) # convert files to int if possible - if files.isdigit(): + try: files = int(files) - else: + except: files = None - magnetlink = result.xpath('.//td[@class="ttth"]//a')[0].attrib['href'] + magnetlink = result.xpath('.//div[@class="torrent_magnet"]//a')[0].attrib['href'] # append result results.append({'url': href, 'title': title, 'content': content, - 'seed': seed, - 'leech': leech, 'filesize': filesize, 'files': files, 'magnetlink': magnetlink, 'template': 'torrent.html'}) # return results sorted by seeder - return sorted(results, key=itemgetter('seed'), reverse=True) + return results diff --git a/searx/engines/dailymotion.py b/searx/engines/dailymotion.py index 06a9c41f3..1038e64bf 100644 --- a/searx/engines/dailymotion.py +++ b/searx/engines/dailymotion.py @@ -15,7 +15,7 @@ from json import loads from datetime import datetime from searx.url_utils import urlencode -from searx.utils import match_language +from searx.utils import match_language, html_to_text # engine dependent config categories = ['videos'] @@ -26,7 +26,7 @@ language_support = True # see http://www.dailymotion.com/doc/api/obj-video.html search_url = 'https://api.dailymotion.com/videos?fields=created_time,title,description,duration,url,thumbnail_360_url,id&sort=relevance&limit=5&page={pageno}&{query}' # noqa embedded_url = '<iframe frameborder="0" width="540" height="304" ' +\ - 'data-src="//www.dailymotion.com/embed/video/{videoid}" allowfullscreen></iframe>' + 'data-src="https://www.dailymotion.com/embed/video/{videoid}" allowfullscreen></iframe>' supported_languages_url = 'https://api.dailymotion.com/languages' @@ -59,7 +59,7 @@ def response(resp): for res in search_res['list']: title = res['title'] url = res['url'] - content = res['description'] + content = html_to_text(res['description']) thumbnail = res['thumbnail_360_url'] publishedDate = datetime.fromtimestamp(res['created_time'], None) embedded = embedded_url.format(videoid=res['id']) diff --git a/searx/engines/deviantart.py b/searx/engines/deviantart.py index bb85c6dc5..a0e27e622 100644 --- a/searx/engines/deviantart.py +++ b/searx/engines/deviantart.py @@ -24,7 +24,7 @@ time_range_support = True # search-url base_url = 'https://www.deviantart.com/' -search_url = base_url + 'browse/all/?offset={offset}&{query}' +search_url = base_url + 'search?page={page}&{query}' time_range_url = '&order={range}' time_range_dict = {'day': 11, @@ -37,9 +37,7 @@ def request(query, params): if params['time_range'] and params['time_range'] not in time_range_dict: return params - offset = (params['pageno'] - 1) * 24 - - params['url'] = search_url.format(offset=offset, + params['url'] = search_url.format(page=params['pageno'], query=urlencode({'q': query})) if params['time_range'] in time_range_dict: params['url'] += time_range_url.format(range=time_range_dict[params['time_range']]) @@ -57,28 +55,27 @@ def response(resp): dom = html.fromstring(resp.text) - regex = re.compile(r'\/200H\/') - # parse results - for result in dom.xpath('.//span[@class="thumb wide"]'): - link = result.xpath('.//a[@class="torpedo-thumb-link"]')[0] - url = link.attrib.get('href') - title = extract_text(result.xpath('.//span[@class="title"]')) - thumbnail_src = link.xpath('.//img')[0].attrib.get('src') - img_src = regex.sub('/', thumbnail_src) - - # http to https, remove domain sharding - thumbnail_src = re.sub(r"https?://(th|fc)\d+.", "https://th01.", thumbnail_src) - thumbnail_src = re.sub(r"http://", "https://", thumbnail_src) - - url = re.sub(r"http://(.*)\.deviantart\.com/", "https://\\1.deviantart.com/", url) - - # append result - results.append({'url': url, - 'title': title, - 'img_src': img_src, - 'thumbnail_src': thumbnail_src, - 'template': 'images.html'}) + for row in dom.xpath('//div[contains(@data-hook, "content_row")]'): + for result in row.xpath('./div'): + link = result.xpath('.//a[@data-hook="deviation_link"]')[0] + url = link.attrib.get('href') + title = link.attrib.get('title') + thumbnail_src = result.xpath('.//img')[0].attrib.get('src') + img_src = thumbnail_src + + # http to https, remove domain sharding + thumbnail_src = re.sub(r"https?://(th|fc)\d+.", "https://th01.", thumbnail_src) + thumbnail_src = re.sub(r"http://", "https://", thumbnail_src) + + url = re.sub(r"http://(.*)\.deviantart\.com/", "https://\\1.deviantart.com/", url) + + # append result + results.append({'url': url, + 'title': title, + 'img_src': img_src, + 'thumbnail_src': thumbnail_src, + 'template': 'images.html'}) # return results return results diff --git a/searx/engines/dictzone.py b/searx/engines/dictzone.py index 7cc44df73..423af0971 100644 --- a/searx/engines/dictzone.py +++ b/searx/engines/dictzone.py @@ -11,11 +11,11 @@ import re from lxml import html -from searx.utils import is_valid_lang +from searx.utils import is_valid_lang, eval_xpath from searx.url_utils import urljoin categories = ['general'] -url = u'http://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}' +url = u'https://dictzone.com/{from_lang}-{to_lang}-dictionary/{query}' weight = 100 parser_re = re.compile(b'.*?([a-z]+)-([a-z]+) ([^ ]+)$', re.I) @@ -47,14 +47,14 @@ def response(resp): dom = html.fromstring(resp.text) - for k, result in enumerate(dom.xpath(results_xpath)[1:]): + for k, result in enumerate(eval_xpath(dom, results_xpath)[1:]): try: - from_result, to_results_raw = result.xpath('./td') + from_result, to_results_raw = eval_xpath(result, './td') except: continue to_results = [] - for to_result in to_results_raw.xpath('./p/a'): + for to_result in eval_xpath(to_results_raw, './p/a'): t = to_result.text_content() if t.strip(): to_results.append(to_result.text_content()) diff --git a/searx/engines/digg.py b/searx/engines/digg.py index 4369ccb84..073410eb0 100644 --- a/searx/engines/digg.py +++ b/searx/engines/digg.py @@ -15,7 +15,8 @@ import string from dateutil import parser from json import loads from lxml import html -from searx.url_utils import quote_plus +from searx.url_utils import urlencode +from datetime import datetime # engine dependent config categories = ['news', 'social media'] @@ -23,7 +24,7 @@ paging = True # search-url base_url = 'https://digg.com/' -search_url = base_url + 'api/search/{query}.json?position={position}&format=html' +search_url = base_url + 'api/search/?{query}&from={position}&size=20&format=html' # specific xpath variables results_xpath = '//article' @@ -38,9 +39,9 @@ digg_cookie_chars = string.ascii_uppercase + string.ascii_lowercase +\ # do search-request def request(query, params): - offset = (params['pageno'] - 1) * 10 + offset = (params['pageno'] - 1) * 20 params['url'] = search_url.format(position=offset, - query=quote_plus(query)) + query=urlencode({'q': query})) params['cookies']['frontend.auid'] = ''.join(random.choice( digg_cookie_chars) for _ in range(22)) return params @@ -52,30 +53,17 @@ def response(resp): search_result = loads(resp.text) - if 'html' not in search_result or search_result['html'] == '': - return results - - dom = html.fromstring(search_result['html']) - # parse results - for result in dom.xpath(results_xpath): - url = result.attrib.get('data-contenturl') - thumbnail = result.xpath('.//img')[0].attrib.get('src') - title = ''.join(result.xpath(title_xpath)) - content = ''.join(result.xpath(content_xpath)) - pubdate = result.xpath(pubdate_xpath)[0].attrib.get('datetime') - publishedDate = parser.parse(pubdate) - - # http to https - thumbnail = thumbnail.replace("http://static.digg.com", "https://static.digg.com") + for result in search_result['mapped']: + published = datetime.strptime(result['created']['ISO'], "%Y-%m-%d %H:%M:%S") # append result - results.append({'url': url, - 'title': title, - 'content': content, + results.append({'url': result['url'], + 'title': result['title'], + 'content': result['excerpt'], 'template': 'videos.html', - 'publishedDate': publishedDate, - 'thumbnail': thumbnail}) + 'publishedDate': published, + 'thumbnail': result['images']['thumbImage']}) # return results return results diff --git a/searx/engines/doku.py b/searx/engines/doku.py index a391be444..d20e66026 100644 --- a/searx/engines/doku.py +++ b/searx/engines/doku.py @@ -11,6 +11,7 @@ from lxml.html import fromstring from searx.engines.xpath import extract_text +from searx.utils import eval_xpath from searx.url_utils import urlencode # engine dependent config @@ -45,16 +46,16 @@ def response(resp): # parse results # Quickhits - for r in doc.xpath('//div[@class="search_quickresult"]/ul/li'): + for r in eval_xpath(doc, '//div[@class="search_quickresult"]/ul/li'): try: - res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] + res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1] except: continue if not res_url: continue - title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) + title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title')) # append result results.append({'title': title, @@ -62,13 +63,13 @@ def response(resp): 'url': base_url + res_url}) # Search results - for r in doc.xpath('//dl[@class="search_results"]/*'): + for r in eval_xpath(doc, '//dl[@class="search_results"]/*'): try: if r.tag == "dt": - res_url = r.xpath('.//a[@class="wikilink1"]/@href')[-1] - title = extract_text(r.xpath('.//a[@class="wikilink1"]/@title')) + res_url = eval_xpath(r, './/a[@class="wikilink1"]/@href')[-1] + title = extract_text(eval_xpath(r, './/a[@class="wikilink1"]/@title')) elif r.tag == "dd": - content = extract_text(r.xpath('.')) + content = extract_text(eval_xpath(r, '.')) # append result results.append({'title': title, diff --git a/searx/engines/duckduckgo.py b/searx/engines/duckduckgo.py index fb8f523ac..0d2c0af2d 100644 --- a/searx/engines/duckduckgo.py +++ b/searx/engines/duckduckgo.py @@ -18,7 +18,7 @@ from json import loads from searx.engines.xpath import extract_text from searx.poolrequests import get from searx.url_utils import urlencode -from searx.utils import match_language +from searx.utils import match_language, eval_xpath # engine dependent config categories = ['general'] @@ -65,21 +65,36 @@ def get_region_code(lang, lang_list=[]): def request(query, params): - if params['time_range'] and params['time_range'] not in time_range_dict: + if params['time_range'] not in (None, 'None', '') and params['time_range'] not in time_range_dict: return params offset = (params['pageno'] - 1) * 30 region_code = get_region_code(params['language'], supported_languages) - if region_code: - params['url'] = url.format( - query=urlencode({'q': query, 'kl': region_code}), offset=offset, dc_param=offset) + params['url'] = 'https://duckduckgo.com/html/' + if params['pageno'] > 1: + params['method'] = 'POST' + params['data']['q'] = query + params['data']['s'] = offset + params['data']['dc'] = 30 + params['data']['nextParams'] = '' + params['data']['v'] = 'l' + params['data']['o'] = 'json' + params['data']['api'] = '/d.js' + if params['time_range'] in time_range_dict: + params['data']['df'] = time_range_dict[params['time_range']] + if region_code: + params['data']['kl'] = region_code else: - params['url'] = url.format( - query=urlencode({'q': query}), offset=offset, dc_param=offset) + if region_code: + params['url'] = url.format( + query=urlencode({'q': query, 'kl': region_code}), offset=offset, dc_param=offset) + else: + params['url'] = url.format( + query=urlencode({'q': query}), offset=offset, dc_param=offset) - if params['time_range'] in time_range_dict: - params['url'] += time_range_url.format(range=time_range_dict[params['time_range']]) + if params['time_range'] in time_range_dict: + params['url'] += time_range_url.format(range=time_range_dict[params['time_range']]) return params @@ -91,17 +106,19 @@ def response(resp): doc = fromstring(resp.text) # parse results - for r in doc.xpath(result_xpath): + for i, r in enumerate(eval_xpath(doc, result_xpath)): + if i >= 30: + break try: - res_url = r.xpath(url_xpath)[-1] + res_url = eval_xpath(r, url_xpath)[-1] except: continue if not res_url: continue - title = extract_text(r.xpath(title_xpath)) - content = extract_text(r.xpath(content_xpath)) + title = extract_text(eval_xpath(r, title_xpath)) + content = extract_text(eval_xpath(r, content_xpath)) # append result results.append({'title': title, diff --git a/searx/engines/duckduckgo_definitions.py b/searx/engines/duckduckgo_definitions.py index 957a13ea6..79d10c303 100644 --- a/searx/engines/duckduckgo_definitions.py +++ b/searx/engines/duckduckgo_definitions.py @@ -1,3 +1,14 @@ +""" +DuckDuckGo (definitions) + +- `Instant Answer API`_ +- `DuckDuckGo query`_ + +.. _Instant Answer API: https://duckduckgo.com/api +.. _DuckDuckGo query: https://api.duckduckgo.com/?q=DuckDuckGo&format=json&pretty=1 + +""" + import json from lxml import html from re import compile @@ -25,7 +36,8 @@ def result_to_text(url, text, htmlResult): def request(query, params): params['url'] = url.format(query=urlencode({'q': query})) language = match_language(params['language'], supported_languages, language_aliases) - params['headers']['Accept-Language'] = language.split('-')[0] + language = language.split('-')[0] + params['headers']['Accept-Language'] = language return params @@ -43,8 +55,9 @@ def response(resp): # add answer if there is one answer = search_res.get('Answer', '') - if answer != '': - results.append({'answer': html_to_text(answer)}) + if answer: + if search_res.get('AnswerType', '') not in ['calc']: + results.append({'answer': html_to_text(answer)}) # add infobox if 'Definition' in search_res: diff --git a/searx/engines/duden.py b/searx/engines/duden.py index 881ff9d9c..cf2f1a278 100644 --- a/searx/engines/duden.py +++ b/searx/engines/duden.py @@ -11,7 +11,8 @@ from lxml import html, etree import re from searx.engines.xpath import extract_text -from searx.url_utils import quote +from searx.utils import eval_xpath +from searx.url_utils import quote, urljoin from searx import logger categories = ['general'] @@ -20,7 +21,7 @@ language_support = False # search-url base_url = 'https://www.duden.de/' -search_url = base_url + 'suchen/dudenonline/{query}?page={offset}' +search_url = base_url + 'suchen/dudenonline/{query}?search_api_fulltext=&page={offset}' def request(query, params): @@ -35,7 +36,11 @@ def request(query, params): ''' offset = (params['pageno'] - 1) - params['url'] = search_url.format(offset=offset, query=quote(query)) + if offset == 0: + search_url_fmt = base_url + 'suchen/dudenonline/{query}' + params['url'] = search_url_fmt.format(query=quote(query)) + else: + params['url'] = search_url.format(offset=offset, query=quote(query)) return params @@ -48,9 +53,9 @@ def response(resp): dom = html.fromstring(resp.text) try: - number_of_results_string = re.sub('[^0-9]', '', dom.xpath( - '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0] - ) + number_of_results_string =\ + re.sub('[^0-9]', '', + eval_xpath(dom, '//a[@class="active" and contains(@href,"/suchen/dudenonline")]/span/text()')[0]) results.append({'number_of_results': int(number_of_results_string)}) @@ -58,13 +63,12 @@ def response(resp): logger.debug("Couldn't read number of results.") pass - for result in dom.xpath('//section[@class="wide" and not(contains(@style,"overflow:hidden"))]'): + for result in eval_xpath(dom, '//section[not(contains(@class, "essay"))]'): try: - logger.debug("running for %s" % str(result)) - link = result.xpath('.//h2/a')[0] - url = link.attrib.get('href') - title = result.xpath('string(.//h2/a)') - content = extract_text(result.xpath('.//p')) + url = eval_xpath(result, './/h2/a')[0].get('href') + url = urljoin(base_url, url) + title = eval_xpath(result, 'string(.//h2/a)').strip() + content = extract_text(eval_xpath(result, './/p')) # append result results.append({'url': url, 'title': title, diff --git a/searx/engines/dummy-offline.py b/searx/engines/dummy-offline.py new file mode 100644 index 000000000..13a9ecc01 --- /dev/null +++ b/searx/engines/dummy-offline.py @@ -0,0 +1,12 @@ +""" + Dummy Offline + + @results one result + @stable yes +""" + + +def search(query, request_params): + return [{ + 'result': 'this is what you get', + }] diff --git a/searx/engines/fdroid.py b/searx/engines/fdroid.py index a6b01a8ee..4066dc716 100644 --- a/searx/engines/fdroid.py +++ b/searx/engines/fdroid.py @@ -18,13 +18,13 @@ categories = ['files'] paging = True # search-url -base_url = 'https://f-droid.org/' -search_url = base_url + 'repository/browse/?{query}' +base_url = 'https://search.f-droid.org/' +search_url = base_url + '?{query}' # do search-request def request(query, params): - query = urlencode({'fdfilter': query, 'fdpage': params['pageno']}) + query = urlencode({'q': query, 'page': params['pageno'], 'lang': ''}) params['url'] = search_url.format(query=query) return params @@ -35,17 +35,16 @@ def response(resp): dom = html.fromstring(resp.text) - for app in dom.xpath('//div[@id="appheader"]'): - url = app.xpath('./ancestor::a/@href')[0] - title = app.xpath('./p/span/text()')[0] - img_src = app.xpath('.//img/@src')[0] - - content = extract_text(app.xpath('./p')[0]) - content = content.replace(title, '', 1).strip() - - results.append({'url': url, - 'title': title, - 'content': content, - 'img_src': img_src}) + for app in dom.xpath('//a[@class="package-header"]'): + app_url = app.xpath('./@href')[0] + app_title = extract_text(app.xpath('./div/h4[@class="package-name"]/text()')) + app_content = extract_text(app.xpath('./div/div/span[@class="package-summary"]')).strip() \ + + ' - ' + extract_text(app.xpath('./div/div/span[@class="package-license"]')).strip() + app_img_src = app.xpath('./img[@class="package-icon"]/@src')[0] + + results.append({'url': app_url, + 'title': app_title, + 'content': app_content, + 'img_src': app_img_src}) return results diff --git a/searx/engines/flickr_noapi.py b/searx/engines/flickr_noapi.py index 08f07f7ce..c8ee34f7a 100644 --- a/searx/engines/flickr_noapi.py +++ b/searx/engines/flickr_noapi.py @@ -17,7 +17,7 @@ from time import time import re from searx.engines import logger from searx.url_utils import urlencode - +from searx.utils import ecma_unescape, html_to_text logger = logger.getChild('flickr-noapi') @@ -27,7 +27,7 @@ url = 'https://www.flickr.com/' search_url = url + 'search?{query}&page={page}' time_range_url = '&min_upload_date={start}&max_upload_date={end}' photo_url = 'https://www.flickr.com/photos/{userid}/{photoid}' -regex = re.compile(r"\"search-photos-lite-models\",\"photos\":(.*}),\"totalItems\":", re.DOTALL) +modelexport_re = re.compile(r"^\s*modelExport:\s*({.*}),$", re.M) image_sizes = ('o', 'k', 'h', 'b', 'c', 'z', 'n', 'm', 't', 'q', 's') paging = True @@ -57,40 +57,44 @@ def request(query, params): def response(resp): results = [] - matches = regex.search(resp.text) + matches = modelexport_re.search(resp.text) if matches is None: return results match = matches.group(1) - search_results = loads(match) - - if '_data' not in search_results: - return [] + model_export = loads(match) - photos = search_results['_data'] + if 'legend' not in model_export: + return results - for photo in photos: + legend = model_export['legend'] - # In paged configuration, the first pages' photos - # are represented by a None object - if photo is None: - continue + # handle empty page + if not legend or not legend[0]: + return results + for index in legend: + photo = model_export['main'][index[0]][int(index[1])][index[2]][index[3]][int(index[4])] + author = ecma_unescape(photo.get('realname', '')) + source = ecma_unescape(photo.get('username', '')) + ' @ Flickr' + title = ecma_unescape(photo.get('title', '')) + content = html_to_text(ecma_unescape(photo.get('description', ''))) img_src = None # From the biggest to the lowest format for image_size in image_sizes: if image_size in photo['sizes']: img_src = photo['sizes'][image_size]['url'] + img_format = 'jpg ' \ + + str(photo['sizes'][image_size]['width']) \ + + 'x' \ + + str(photo['sizes'][image_size]['height']) break if not img_src: logger.debug('cannot find valid image size: {0}'.format(repr(photo))) continue - if 'ownerNsid' not in photo: - continue - # For a bigger thumbnail, keep only the url_z, not the url_n if 'n' in photo['sizes']: thumbnail_src = photo['sizes']['n']['url'] @@ -99,19 +103,28 @@ def response(resp): else: thumbnail_src = img_src - url = build_flickr_url(photo['ownerNsid'], photo['id']) - - title = photo.get('title', '') - - author = photo['username'] - - # append result - results.append({'url': url, - 'title': title, - 'img_src': img_src, - 'thumbnail_src': thumbnail_src, - 'content': '', - 'author': author, - 'template': 'images.html'}) + if 'ownerNsid' not in photo: + # should not happen, disowned photo? Show it anyway + url = img_src + else: + url = build_flickr_url(photo['ownerNsid'], photo['id']) + + result = { + 'url': url, + 'img_src': img_src, + 'thumbnail_src': thumbnail_src, + 'source': source, + 'img_format': img_format, + 'template': 'images.html' + } + try: + result['author'] = author + result['title'] = title + result['content'] = content + except: + result['author'] = '' + result['title'] = '' + result['content'] = '' + results.append(result) return results diff --git a/searx/engines/framalibre.py b/searx/engines/framalibre.py index 146cdaeec..f3441fa5f 100644 --- a/searx/engines/framalibre.py +++ b/searx/engines/framalibre.py @@ -10,7 +10,10 @@ @parse url, title, content, thumbnail, img_src """ -from cgi import escape +try: + from cgi import escape +except: + from html import escape from lxml import html from searx.engines.xpath import extract_text from searx.url_utils import urljoin, urlencode diff --git a/searx/engines/genius.py b/searx/engines/genius.py index b265e9d76..aa5afad9b 100644 --- a/searx/engines/genius.py +++ b/searx/engines/genius.py @@ -72,6 +72,7 @@ def parse_album(hit): result.update({'content': 'Released: {}'.format(year)}) return result + parse = {'lyric': parse_lyric, 'song': parse_lyric, 'artist': parse_artist, 'album': parse_album} diff --git a/searx/engines/gigablast.py b/searx/engines/gigablast.py index a6aa5d718..2bb29a9fe 100644 --- a/searx/engines/gigablast.py +++ b/searx/engines/gigablast.py @@ -14,7 +14,9 @@ import random from json import loads from time import time from lxml.html import fromstring +from searx.poolrequests import get from searx.url_utils import urlencode +from searx.utils import eval_xpath # engine dependent config categories = ['general'] @@ -30,13 +32,9 @@ search_string = 'search?{query}'\ '&c=main'\ '&s={offset}'\ '&format=json'\ - '&qh=0'\ - '&qlang={lang}'\ + '&langcountry={lang}'\ '&ff={safesearch}'\ - '&rxiec={rxieu}'\ - '&ulse={ulse}'\ - '&rand={rxikd}' # current unix timestamp - + '&rand={rxikd}' # specific xpath variables results_xpath = '//response//result' url_xpath = './/url' @@ -45,9 +43,26 @@ content_xpath = './/sum' supported_languages_url = 'https://gigablast.com/search?&rxikd=1' +extra_param = '' # gigablast requires a random extra parameter +# which can be extracted from the source code of the search page + + +def parse_extra_param(text): + global extra_param + param_lines = [x for x in text.splitlines() if x.startswith('var url=') or x.startswith('url=url+')] + extra_param = '' + for l in param_lines: + extra_param += l.split("'")[1] + extra_param = extra_param.split('&')[-1] + + +def init(engine_settings=None): + parse_extra_param(get('http://gigablast.com/search?c=main&qlangcountry=en-us&q=south&s=10').text) + # do search-request def request(query, params): + print("EXTRAPARAM:", extra_param) offset = (params['pageno'] - 1) * number_of_results if params['language'] == 'all': @@ -66,13 +81,11 @@ def request(query, params): search_path = search_string.format(query=urlencode({'q': query}), offset=offset, number_of_results=number_of_results, - rxikd=int(time() * 1000), - rxieu=random.randint(1000000000, 9999999999), - ulse=random.randint(100000000, 999999999), lang=language, + rxikd=int(time() * 1000), safesearch=safesearch) - params['url'] = base_url + search_path + params['url'] = base_url + search_path + '&' + extra_param return params @@ -82,7 +95,11 @@ def response(resp): results = [] # parse results - response_json = loads(resp.text) + try: + response_json = loads(resp.text) + except: + parse_extra_param(resp.text) + raise Exception('extra param expired, please reload') for result in response_json['results']: # append result @@ -98,9 +115,9 @@ def response(resp): def _fetch_supported_languages(resp): supported_languages = [] dom = fromstring(resp.text) - links = dom.xpath('//span[@id="menu2"]/a') + links = eval_xpath(dom, '//span[@id="menu2"]/a') for link in links: - href = link.xpath('./@href')[0].split('lang%3A') + href = eval_xpath(link, './@href')[0].split('lang%3A') if len(href) == 2: code = href[1].split('_') if len(code) == 2: diff --git a/searx/engines/google.py b/searx/engines/google.py index 03f0523e7..eed3a044e 100644 --- a/searx/engines/google.py +++ b/searx/engines/google.py @@ -14,7 +14,7 @@ from lxml import html, etree from searx.engines.xpath import extract_text, extract_url from searx import logger from searx.url_utils import urlencode, urlparse, parse_qsl -from searx.utils import match_language +from searx.utils import match_language, eval_xpath logger = logger.getChild('google engine') @@ -107,13 +107,12 @@ images_path = '/images' supported_languages_url = 'https://www.google.com/preferences?#languages' # specific xpath variables -results_xpath = '//div[@class="g"]' -url_xpath = './/h3/a/@href' -title_xpath = './/h3' -content_xpath = './/span[@class="st"]' -content_misc_xpath = './/div[@class="f slp"]' -suggestion_xpath = '//p[@class="_Bmc"]' -spelling_suggestion_xpath = '//a[@class="spell"]' +results_xpath = '//div[contains(@class, "ZINbbc")]' +url_xpath = './/div[@class="kCrYT"][1]/a/@href' +title_xpath = './/div[@class="kCrYT"][1]/a/div[1]' +content_xpath = './/div[@class="kCrYT"][2]//div[contains(@class, "BNeawe")]//div[contains(@class, "BNeawe")]' +suggestion_xpath = '//div[contains(@class, "ZINbbc")][last()]//div[@class="rVLSBd"]/a//div[contains(@class, "BNeawe")]' +spelling_suggestion_xpath = '//div[@id="scc"]//a' # map : detail location map_address_xpath = './/div[@class="s"]//table//td[2]/span/text()' @@ -156,7 +155,7 @@ def parse_url(url_string, google_hostname): # returns extract_text on the first result selected by the xpath or None def extract_text_from_dom(result, xpath): - r = result.xpath(xpath) + r = eval_xpath(result, xpath) if len(r) > 0: return extract_text(r[0]) return None @@ -199,9 +198,6 @@ def request(query, params): params['headers']['Accept-Language'] = language + ',' + language + '-' + country params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' - # Force Internet Explorer 12 user agent to avoid loading the new UI that Searx can't parse - params['headers']['User-Agent'] = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)" - params['google_hostname'] = google_hostname return params @@ -226,21 +222,21 @@ def response(resp): # convert the text to dom dom = html.fromstring(resp.text) - instant_answer = dom.xpath('//div[@id="_vBb"]//text()') + instant_answer = eval_xpath(dom, '//div[@id="_vBb"]//text()') if instant_answer: results.append({'answer': u' '.join(instant_answer)}) try: - results_num = int(dom.xpath('//div[@id="resultStats"]//text()')[0] + results_num = int(eval_xpath(dom, '//div[@id="resultStats"]//text()')[0] .split()[1].replace(',', '')) results.append({'number_of_results': results_num}) except: pass # parse results - for result in dom.xpath(results_xpath): + for result in eval_xpath(dom, results_xpath): try: - title = extract_text(result.xpath(title_xpath)[0]) - url = parse_url(extract_url(result.xpath(url_xpath), google_url), google_hostname) + title = extract_text(eval_xpath(result, title_xpath)[0]) + url = parse_url(extract_url(eval_xpath(result, url_xpath), google_url), google_hostname) parsed_url = urlparse(url, google_hostname) # map result @@ -249,7 +245,7 @@ def response(resp): continue # if parsed_url.path.startswith(maps_path) or parsed_url.netloc.startswith(map_hostname_start): # print "yooooo"*30 - # x = result.xpath(map_near) + # x = eval_xpath(result, map_near) # if len(x) > 0: # # map : near the location # results = results + parse_map_near(parsed_url, x, google_hostname) @@ -273,9 +269,7 @@ def response(resp): content = extract_text_from_dom(result, content_xpath) if content is None: continue - content_misc = extract_text_from_dom(result, content_misc_xpath) - if content_misc is not None: - content = content_misc + "<br />" + content + # append result results.append({'url': url, 'title': title, @@ -286,11 +280,11 @@ def response(resp): continue # parse suggestion - for suggestion in dom.xpath(suggestion_xpath): + for suggestion in eval_xpath(dom, suggestion_xpath): # append suggestion results.append({'suggestion': extract_text(suggestion)}) - for correction in dom.xpath(spelling_suggestion_xpath): + for correction in eval_xpath(dom, spelling_suggestion_xpath): results.append({'correction': extract_text(correction)}) # return results @@ -299,9 +293,9 @@ def response(resp): def parse_images(result, google_hostname): results = [] - for image in result.xpath(images_xpath): - url = parse_url(extract_text(image.xpath(image_url_xpath)[0]), google_hostname) - img_src = extract_text(image.xpath(image_img_src_xpath)[0]) + for image in eval_xpath(result, images_xpath): + url = parse_url(extract_text(eval_xpath(image, image_url_xpath)[0]), google_hostname) + img_src = extract_text(eval_xpath(image, image_img_src_xpath)[0]) # append result results.append({'url': url, @@ -388,10 +382,10 @@ def attributes_to_html(attributes): def _fetch_supported_languages(resp): supported_languages = {} dom = html.fromstring(resp.text) - options = dom.xpath('//*[@id="langSec"]//input[@name="lr"]') + options = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lr"]') for option in options: - code = option.xpath('./@value')[0].split('_')[-1] - name = option.xpath('./@data-name')[0].title() + code = eval_xpath(option, './@value')[0].split('_')[-1] + name = eval_xpath(option, './@data-name')[0].title() supported_languages[code] = {"name": name} return supported_languages diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py index d9a49e9cc..636913114 100644 --- a/searx/engines/google_images.py +++ b/searx/engines/google_images.py @@ -70,11 +70,21 @@ def response(resp): try: metadata = loads(result) - img_format = "{0} {1}x{2}".format(metadata['ity'], str(metadata['ow']), str(metadata['oh'])) - source = "{0} ({1})".format(metadata['st'], metadata['isu']) + + img_format = metadata.get('ity', '') + img_width = metadata.get('ow', '') + img_height = metadata.get('oh', '') + if img_width and img_height: + img_format += " {0}x{1}".format(img_width, img_height) + + source = metadata.get('st', '') + source_url = metadata.get('isu', '') + if source_url: + source += " ({0})".format(source_url) + results.append({'url': metadata['ru'], 'title': metadata['pt'], - 'content': metadata['s'], + 'content': metadata.get('s', ''), 'source': source, 'img_format': img_format, 'thumbnail_src': metadata['tu'], diff --git a/searx/engines/google_videos.py b/searx/engines/google_videos.py index 9a41b2dfa..fd6b2e3be 100644 --- a/searx/engines/google_videos.py +++ b/searx/engines/google_videos.py @@ -75,15 +75,17 @@ def response(resp): # get thumbnails script = str(dom.xpath('//script[contains(., "_setImagesSrc")]')[0].text) - id = result.xpath('.//div[@class="s"]//img/@id')[0] - thumbnails_data = re.findall('s=\'(.*?)(?:\\\\[a-z,1-9,\\\\]+\'|\')\;var ii=\[(?:|[\'vidthumb\d+\',]+)\'' + id, - script) - tmp = [] - if len(thumbnails_data) != 0: - tmp = re.findall('(data:image/jpeg;base64,[a-z,A-Z,0-9,/,\+]+)', thumbnails_data[0]) - thumbnail = '' - if len(tmp) != 0: - thumbnail = tmp[-1] + ids = result.xpath('.//div[@class="s"]//img/@id') + if len(ids) > 0: + thumbnails_data = \ + re.findall('s=\'(.*?)(?:\\\\[a-z,1-9,\\\\]+\'|\')\;var ii=\[(?:|[\'vidthumb\d+\',]+)\'' + ids[0], + script) + tmp = [] + if len(thumbnails_data) != 0: + tmp = re.findall('(data:image/jpeg;base64,[a-z,A-Z,0-9,/,\+]+)', thumbnails_data[0]) + thumbnail = '' + if len(tmp) != 0: + thumbnail = tmp[-1] # append result results.append({'url': url, diff --git a/searx/engines/ina.py b/searx/engines/ina.py index 37a05f099..ea509649f 100644 --- a/searx/engines/ina.py +++ b/searx/engines/ina.py @@ -32,7 +32,7 @@ base_url = 'https://www.ina.fr' search_url = base_url + '/layout/set/ajax/recherche/result?autopromote=&hf={ps}&b={start}&type=Video&r=&{query}' # specific xpath variables -results_xpath = '//div[contains(@class,"search-results--list")]/div[@class="media"]' +results_xpath = '//div[contains(@class,"search-results--list")]//div[@class="media-body"]' url_xpath = './/a/@href' title_xpath = './/h3[@class="h3--title media-heading"]' thumbnail_xpath = './/img/@src' @@ -65,8 +65,11 @@ def response(resp): videoid = result.xpath(url_xpath)[0] url = base_url + videoid title = p.unescape(extract_text(result.xpath(title_xpath))) - thumbnail = extract_text(result.xpath(thumbnail_xpath)[0]) - if thumbnail[0] == '/': + try: + thumbnail = extract_text(result.xpath(thumbnail_xpath)[0]) + except: + thumbnail = '' + if thumbnail and thumbnail[0] == '/': thumbnail = base_url + thumbnail d = extract_text(result.xpath(publishedDate_xpath)[0]) d = d.split('/') diff --git a/searx/engines/invidious.py b/searx/engines/invidious.py new file mode 100644 index 000000000..8d81691fc --- /dev/null +++ b/searx/engines/invidious.py @@ -0,0 +1,100 @@ +# Invidious (Videos) +# +# @website https://invidio.us/ +# @provide-api yes (https://github.com/omarroth/invidious/wiki/API) +# +# @using-api yes +# @results JSON +# @stable yes +# @parse url, title, content, publishedDate, thumbnail, embedded + +from searx.url_utils import quote_plus +from dateutil import parser +import time + +# engine dependent config +categories = ["videos", "music"] +paging = True +language_support = True +time_range_support = True + +# search-url +base_url = "https://invidio.us/" + + +# do search-request +def request(query, params): + time_range_dict = { + "day": "today", + "week": "week", + "month": "month", + "year": "year", + } + search_url = base_url + "api/v1/search?q={query}" + params["url"] = search_url.format( + query=quote_plus(query) + ) + "&page={pageno}".format(pageno=params["pageno"]) + + if params["time_range"] in time_range_dict: + params["url"] += "&date={timerange}".format( + timerange=time_range_dict[params["time_range"]] + ) + + if params["language"] != "all": + lang = params["language"].split("-") + if len(lang) == 2: + params["url"] += "&range={lrange}".format(lrange=lang[1]) + + return params + + +# get response from search-request +def response(resp): + results = [] + + search_results = resp.json() + embedded_url = ( + '<iframe width="540" height="304" ' + + 'data-src="' + + base_url + + 'embed/{videoid}" ' + + 'frameborder="0" allowfullscreen></iframe>' + ) + + base_invidious_url = base_url + "watch?v=" + + for result in search_results: + rtype = result.get("type", None) + if rtype == "video": + videoid = result.get("videoId", None) + if not videoid: + continue + + url = base_invidious_url + videoid + embedded = embedded_url.format(videoid=videoid) + thumbs = result.get("videoThumbnails", []) + thumb = next( + (th for th in thumbs if th["quality"] == "sddefault"), None + ) + if thumb: + thumbnail = thumb.get("url", "") + else: + thumbnail = "" + + publishedDate = parser.parse( + time.ctime(result.get("published", 0)) + ) + + results.append( + { + "url": url, + "title": result.get("title", ""), + "content": result.get("description", ""), + "template": "videos.html", + "publishedDate": publishedDate, + "embedded": embedded, + "thumbnail": thumbnail, + } + ) + + return results diff --git a/searx/engines/microsoft_academic.py b/searx/engines/microsoft_academic.py index 9387b08d0..9bac0069c 100644 --- a/searx/engines/microsoft_academic.py +++ b/searx/engines/microsoft_academic.py @@ -45,6 +45,8 @@ def request(query, params): def response(resp): results = [] response_data = loads(resp.text) + if not response_data: + return results for result in response_data['results']: url = _get_url(result) diff --git a/searx/engines/openstreetmap.py b/searx/engines/openstreetmap.py index 733ba6203..cec10a3c7 100644 --- a/searx/engines/openstreetmap.py +++ b/searx/engines/openstreetmap.py @@ -24,7 +24,7 @@ result_base_url = 'https://openstreetmap.org/{osm_type}/{osm_id}' # do search-request def request(query, params): - params['url'] = base_url + search_string.format(query=query) + params['url'] = base_url + search_string.format(query=query.decode('utf-8')) return params diff --git a/searx/engines/qwant.py b/searx/engines/qwant.py index de12955c6..54e9dafad 100644 --- a/searx/engines/qwant.py +++ b/searx/engines/qwant.py @@ -50,6 +50,7 @@ def request(query, params): language = match_language(params['language'], supported_languages, language_aliases) params['url'] += '&locale=' + language.replace('-', '_').lower() + params['headers']['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64; rv:69.0) Gecko/20100101 Firefox/69.0' return params diff --git a/searx/engines/scanr_structures.py b/searx/engines/scanr_structures.py index 72fd2b3c9..7208dcb70 100644 --- a/searx/engines/scanr_structures.py +++ b/searx/engines/scanr_structures.py @@ -29,7 +29,7 @@ def request(query, params): params['url'] = search_url params['method'] = 'POST' params['headers']['Content-type'] = "application/json" - params['data'] = dumps({"query": query, + params['data'] = dumps({"query": query.decode('utf-8'), "searchField": "ALL", "sortDirection": "ASC", "sortOrder": "RELEVANCY", diff --git a/searx/engines/seedpeer.py b/searx/engines/seedpeer.py new file mode 100644 index 000000000..f9b1f99c8 --- /dev/null +++ b/searx/engines/seedpeer.py @@ -0,0 +1,78 @@ +# Seedpeer (Videos, Music, Files) +# +# @website https://seedpeer.me +# @provide-api no (nothing found) +# +# @using-api no +# @results HTML (using search portal) +# @stable yes (HTML can change) +# @parse url, title, content, seed, leech, magnetlink + +from lxml import html +from json import loads +from operator import itemgetter +from searx.url_utils import quote, urljoin +from searx.engines.xpath import extract_text + + +url = 'https://seedpeer.me/' +search_url = url + 'search/{search_term}?page={page_no}' +torrent_file_url = url + 'torrent/{torrent_hash}' + +# specific xpath variables +script_xpath = '//script[@type="text/javascript"][not(@src)]' +torrent_xpath = '(//table)[2]/tbody/tr' +link_xpath = '(./td)[1]/a/@href' +age_xpath = '(./td)[2]' +size_xpath = '(./td)[3]' + + +# do search-request +def request(query, params): + params['url'] = search_url.format(search_term=quote(query), + page_no=params['pageno']) + return params + + +# get response from search-request +def response(resp): + results = [] + dom = html.fromstring(resp.text) + result_rows = dom.xpath(torrent_xpath) + + try: + script_element = dom.xpath(script_xpath)[0] + json_string = script_element.text[script_element.text.find('{'):] + torrents_json = loads(json_string) + except: + return [] + + # parse results + for torrent_row, torrent_json in zip(result_rows, torrents_json['data']['list']): + title = torrent_json['name'] + seed = int(torrent_json['seeds']) + leech = int(torrent_json['peers']) + size = int(torrent_json['size']) + torrent_hash = torrent_json['hash'] + + torrentfile = torrent_file_url.format(torrent_hash=torrent_hash) + magnetlink = 'magnet:?xt=urn:btih:{}'.format(torrent_hash) + + age = extract_text(torrent_row.xpath(age_xpath)) + link = torrent_row.xpath(link_xpath)[0] + + href = urljoin(url, link) + + # append result + results.append({'url': href, + 'title': title, + 'content': age, + 'seed': seed, + 'leech': leech, + 'filesize': size, + 'torrentfile': torrentfile, + 'magnetlink': magnetlink, + 'template': 'torrent.html'}) + + # return results sorted by seeder + return sorted(results, key=itemgetter('seed'), reverse=True) diff --git a/searx/engines/soundcloud.py b/searx/engines/soundcloud.py index d59755e04..284689bf6 100644 --- a/searx/engines/soundcloud.py +++ b/searx/engines/soundcloud.py @@ -28,8 +28,10 @@ categories = ['music'] paging = True # search-url -url = 'https://api.soundcloud.com/' +# missing attribute: user_id, app_version, app_locale +url = 'https://api-v2.soundcloud.com/' search_url = url + 'search?{query}'\ + '&variant_ids='\ '&facet=model'\ '&limit=20'\ '&offset={offset}'\ @@ -49,7 +51,9 @@ def get_client_id(): if response.ok: tree = html.fromstring(response.content) - script_tags = tree.xpath("//script[contains(@src, '/assets/app')]") + # script_tags has been moved from /assets/app/ to /assets/ path. I + # found client_id in https://a-v2.sndcdn.com/assets/49-a0c01933-3.js + script_tags = tree.xpath("//script[contains(@src, '/assets/')]") app_js_urls = [script_tag.get('src') for script_tag in script_tags if script_tag is not None] # extracts valid app_js urls from soundcloud.com content @@ -57,14 +61,14 @@ def get_client_id(): # gets app_js and searches for the clientid response = http_get(app_js_url) if response.ok: - cids = cid_re.search(response.text) + cids = cid_re.search(response.content.decode("utf-8")) if cids is not None and len(cids.groups()): return cids.groups()[0] logger.warning("Unable to fetch guest client_id from SoundCloud, check parser!") return "" -def init(): +def init(engine_settings=None): global guest_client_id # api-key guest_client_id = get_client_id() diff --git a/searx/engines/spotify.py b/searx/engines/spotify.py index aed756be3..00c395706 100644 --- a/searx/engines/spotify.py +++ b/searx/engines/spotify.py @@ -12,10 +12,14 @@ from json import loads from searx.url_utils import urlencode +import requests +import base64 # engine dependent config categories = ['music'] paging = True +api_client_id = None +api_client_secret = None # search-url url = 'https://api.spotify.com/' @@ -31,6 +35,16 @@ def request(query, params): params['url'] = search_url.format(query=urlencode({'q': query}), offset=offset) + r = requests.post( + 'https://accounts.spotify.com/api/token', + data={'grant_type': 'client_credentials'}, + headers={'Authorization': 'Basic ' + base64.b64encode( + "{}:{}".format(api_client_id, api_client_secret).encode('utf-8') + ).decode('utf-8')} + ) + j = loads(r.text) + params['headers'] = {'Authorization': 'Bearer {}'.format(j.get('access_token'))} + return params diff --git a/searx/engines/startpage.py b/searx/engines/startpage.py index 6638f3d83..76567396f 100644 --- a/searx/engines/startpage.py +++ b/searx/engines/startpage.py @@ -15,6 +15,8 @@ from dateutil import parser from datetime import datetime, timedelta import re from searx.engines.xpath import extract_text +from searx.languages import language_codes +from searx.utils import eval_xpath # engine dependent config categories = ['general'] @@ -22,7 +24,7 @@ categories = ['general'] # (probably the parameter qid), require # storing of qid's between mulitble search-calls -# paging = False +paging = True language_support = True # search-url @@ -32,23 +34,32 @@ search_url = base_url + 'do/search' # specific xpath variables # ads xpath //div[@id="results"]/div[@id="sponsored"]//div[@class="result"] # not ads: div[@class="result"] are the direct childs of div[@id="results"] -results_xpath = '//li[contains(@class, "search-result") and contains(@class, "search-item")]' -link_xpath = './/h3/a' -content_xpath = './p[@class="search-item__body"]' +results_xpath = '//div[@class="w-gl__result"]' +link_xpath = './/a[@class="w-gl__result-title"]' +content_xpath = './/p[@class="w-gl__description"]' # do search-request def request(query, params): - offset = (params['pageno'] - 1) * 10 params['url'] = search_url params['method'] = 'POST' - params['data'] = {'query': query, - 'startat': offset} + params['data'] = { + 'query': query, + 'page': params['pageno'], + 'cat': 'web', + 'cmd': 'process_search', + 'engine0': 'v1all', + } # set language if specified if params['language'] != 'all': - params['data']['with_language'] = ('lang_' + params['language'].split('-')[0]) + language = 'english' + for lc, _, _, lang in language_codes: + if lc == params['language']: + language = lang + params['data']['language'] = language + params['data']['lui'] = language return params @@ -60,8 +71,8 @@ def response(resp): dom = html.fromstring(resp.text) # parse results - for result in dom.xpath(results_xpath): - links = result.xpath(link_xpath) + for result in eval_xpath(dom, results_xpath): + links = eval_xpath(result, link_xpath) if not links: continue link = links[0] @@ -77,8 +88,8 @@ def response(resp): title = extract_text(link) - if result.xpath(content_xpath): - content = extract_text(result.xpath(content_xpath)) + if eval_xpath(result, content_xpath): + content = extract_text(eval_xpath(result, content_xpath)) else: content = '' diff --git a/searx/engines/vimeo.py b/searx/engines/vimeo.py index 1408be8df..a92271019 100644 --- a/searx/engines/vimeo.py +++ b/searx/engines/vimeo.py @@ -24,7 +24,7 @@ paging = True base_url = 'https://vimeo.com/' search_url = base_url + '/search/page:{pageno}?{query}' -embedded_url = '<iframe data-src="//player.vimeo.com/video/{videoid}" ' +\ +embedded_url = '<iframe data-src="https://player.vimeo.com/video/{videoid}" ' +\ 'width="540" height="304" frameborder="0" ' +\ 'webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>' diff --git a/searx/engines/wikidata.py b/searx/engines/wikidata.py index 03a58a31a..e913b3915 100644 --- a/searx/engines/wikidata.py +++ b/searx/engines/wikidata.py @@ -16,10 +16,11 @@ from searx.poolrequests import get from searx.engines.xpath import extract_text from searx.engines.wikipedia import _fetch_supported_languages, supported_languages_url from searx.url_utils import urlencode -from searx.utils import match_language +from searx.utils import match_language, eval_xpath from json import loads from lxml.html import fromstring +from lxml import etree logger = logger.getChild('wikidata') result_count = 1 @@ -27,23 +28,23 @@ result_count = 1 # urls wikidata_host = 'https://www.wikidata.org' url_search = wikidata_host \ - + '/w/index.php?{query}' + + '/w/index.php?{query}&ns0=1' wikidata_api = wikidata_host + '/w/api.php' url_detail = wikidata_api\ + '?action=parse&format=json&{query}'\ - + '&redirects=1&prop=text%7Cdisplaytitle%7Clanglinks%7Crevid'\ - + '&disableeditsection=1&disabletidy=1&preview=1§ionpreview=1&disabletoc=1&utf8=1&formatversion=2' + + '&redirects=1&prop=text%7Cdisplaytitle%7Cparsewarnings'\ + + '&disableeditsection=1&preview=1§ionpreview=1&disabletoc=1&utf8=1&formatversion=2' url_map = 'https://www.openstreetmap.org/'\ + '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M' url_image = 'https://commons.wikimedia.org/wiki/Special:FilePath/{filename}?width=500&height=400' # xpaths +div_ids_xpath = '//div[@id]' wikidata_ids_xpath = '//ul[@class="mw-search-results"]/li//a/@href' title_xpath = '//*[contains(@class,"wikibase-title-label")]' description_xpath = '//div[contains(@class,"wikibase-entitytermsview-heading-description")]' -property_xpath = '//div[@id="{propertyid}"]' label_xpath = './/div[contains(@class,"wikibase-statementgroupview-property-label")]/a' url_xpath = './/a[contains(@class,"external free") or contains(@class, "wb-external-id")]' wikilink_xpath = './/ul[contains(@class,"wikibase-sitelinklistview-listview")]'\ @@ -57,6 +58,15 @@ calendar_name_xpath = './/sup[contains(@class,"wb-calendar-name")]' media_xpath = value_xpath + '//div[contains(@class,"commons-media-caption")]//a' +def get_id_cache(result): + id_cache = {} + for e in eval_xpath(result, div_ids_xpath): + id = e.get('id') + if id.startswith('P'): + id_cache[id] = e + return id_cache + + def request(query, params): params['url'] = url_search.format( query=urlencode({'search': query})) @@ -65,8 +75,9 @@ def request(query, params): def response(resp): results = [] - html = fromstring(resp.text) - search_results = html.xpath(wikidata_ids_xpath) + htmlparser = etree.HTMLParser() + html = fromstring(resp.content.decode("utf-8"), parser=htmlparser) + search_results = eval_xpath(html, wikidata_ids_xpath) if resp.search_params['language'].split('-')[0] == 'all': language = 'en' @@ -78,13 +89,13 @@ def response(resp): wikidata_id = search_result.split('/')[-1] url = url_detail.format(query=urlencode({'page': wikidata_id, 'uselang': language})) htmlresponse = get(url) - jsonresponse = loads(htmlresponse.text) - results += getDetail(jsonresponse, wikidata_id, language, resp.search_params['language']) + jsonresponse = loads(htmlresponse.content.decode("utf-8")) + results += getDetail(jsonresponse, wikidata_id, language, resp.search_params['language'], htmlparser) return results -def getDetail(jsonresponse, wikidata_id, language, locale): +def getDetail(jsonresponse, wikidata_id, language, locale, htmlparser): results = [] urls = [] attributes = [] @@ -95,21 +106,23 @@ def getDetail(jsonresponse, wikidata_id, language, locale): if not title or not result: return results - title = fromstring(title) - for elem in title.xpath(language_fallback_xpath): + title = fromstring(title, parser=htmlparser) + for elem in eval_xpath(title, language_fallback_xpath): elem.getparent().remove(elem) - title = extract_text(title.xpath(title_xpath)) + title = extract_text(eval_xpath(title, title_xpath)) - result = fromstring(result) - for elem in result.xpath(language_fallback_xpath): + result = fromstring(result, parser=htmlparser) + for elem in eval_xpath(result, language_fallback_xpath): elem.getparent().remove(elem) - description = extract_text(result.xpath(description_xpath)) + description = extract_text(eval_xpath(result, description_xpath)) + + id_cache = get_id_cache(result) # URLS # official website - add_url(urls, result, 'P856', results=results) + add_url(urls, result, id_cache, 'P856', results=results) # wikipedia wikipedia_link_count = 0 @@ -130,30 +143,30 @@ def getDetail(jsonresponse, wikidata_id, language, locale): # if wikipedia_link_count == 0: # more wikis - add_url(urls, result, default_label='Wikivoyage (' + language + ')', link_type=language + 'wikivoyage') - add_url(urls, result, default_label='Wikiquote (' + language + ')', link_type=language + 'wikiquote') - add_url(urls, result, default_label='Wikimedia Commons', link_type='commonswiki') + add_url(urls, result, id_cache, default_label='Wikivoyage (' + language + ')', link_type=language + 'wikivoyage') + add_url(urls, result, id_cache, default_label='Wikiquote (' + language + ')', link_type=language + 'wikiquote') + add_url(urls, result, id_cache, default_label='Wikimedia Commons', link_type='commonswiki') - add_url(urls, result, 'P625', 'OpenStreetMap', link_type='geo') + add_url(urls, result, id_cache, 'P625', 'OpenStreetMap', link_type='geo') # musicbrainz - add_url(urls, result, 'P434', 'MusicBrainz', 'http://musicbrainz.org/artist/') - add_url(urls, result, 'P435', 'MusicBrainz', 'http://musicbrainz.org/work/') - add_url(urls, result, 'P436', 'MusicBrainz', 'http://musicbrainz.org/release-group/') - add_url(urls, result, 'P966', 'MusicBrainz', 'http://musicbrainz.org/label/') + add_url(urls, result, id_cache, 'P434', 'MusicBrainz', 'http://musicbrainz.org/artist/') + add_url(urls, result, id_cache, 'P435', 'MusicBrainz', 'http://musicbrainz.org/work/') + add_url(urls, result, id_cache, 'P436', 'MusicBrainz', 'http://musicbrainz.org/release-group/') + add_url(urls, result, id_cache, 'P966', 'MusicBrainz', 'http://musicbrainz.org/label/') # IMDb - add_url(urls, result, 'P345', 'IMDb', 'https://www.imdb.com/', link_type='imdb') + add_url(urls, result, id_cache, 'P345', 'IMDb', 'https://www.imdb.com/', link_type='imdb') # source code repository - add_url(urls, result, 'P1324') + add_url(urls, result, id_cache, 'P1324') # blog - add_url(urls, result, 'P1581') + add_url(urls, result, id_cache, 'P1581') # social media links - add_url(urls, result, 'P2397', 'YouTube', 'https://www.youtube.com/channel/') - add_url(urls, result, 'P1651', 'YouTube', 'https://www.youtube.com/watch?v=') - add_url(urls, result, 'P2002', 'Twitter', 'https://twitter.com/') - add_url(urls, result, 'P2013', 'Facebook', 'https://facebook.com/') - add_url(urls, result, 'P2003', 'Instagram', 'https://instagram.com/') + add_url(urls, result, id_cache, 'P2397', 'YouTube', 'https://www.youtube.com/channel/') + add_url(urls, result, id_cache, 'P1651', 'YouTube', 'https://www.youtube.com/watch?v=') + add_url(urls, result, id_cache, 'P2002', 'Twitter', 'https://twitter.com/') + add_url(urls, result, id_cache, 'P2013', 'Facebook', 'https://facebook.com/') + add_url(urls, result, id_cache, 'P2003', 'Instagram', 'https://instagram.com/') urls.append({'title': 'Wikidata', 'url': 'https://www.wikidata.org/wiki/' @@ -163,132 +176,132 @@ def getDetail(jsonresponse, wikidata_id, language, locale): # DATES # inception date - add_attribute(attributes, result, 'P571', date=True) + add_attribute(attributes, id_cache, 'P571', date=True) # dissolution date - add_attribute(attributes, result, 'P576', date=True) + add_attribute(attributes, id_cache, 'P576', date=True) # start date - add_attribute(attributes, result, 'P580', date=True) + add_attribute(attributes, id_cache, 'P580', date=True) # end date - add_attribute(attributes, result, 'P582', date=True) + add_attribute(attributes, id_cache, 'P582', date=True) # date of birth - add_attribute(attributes, result, 'P569', date=True) + add_attribute(attributes, id_cache, 'P569', date=True) # date of death - add_attribute(attributes, result, 'P570', date=True) + add_attribute(attributes, id_cache, 'P570', date=True) # date of spacecraft launch - add_attribute(attributes, result, 'P619', date=True) + add_attribute(attributes, id_cache, 'P619', date=True) # date of spacecraft landing - add_attribute(attributes, result, 'P620', date=True) + add_attribute(attributes, id_cache, 'P620', date=True) # nationality - add_attribute(attributes, result, 'P27') + add_attribute(attributes, id_cache, 'P27') # country of origin - add_attribute(attributes, result, 'P495') + add_attribute(attributes, id_cache, 'P495') # country - add_attribute(attributes, result, 'P17') + add_attribute(attributes, id_cache, 'P17') # headquarters - add_attribute(attributes, result, 'Q180') + add_attribute(attributes, id_cache, 'Q180') # PLACES # capital - add_attribute(attributes, result, 'P36', trim=True) + add_attribute(attributes, id_cache, 'P36', trim=True) # head of state - add_attribute(attributes, result, 'P35', trim=True) + add_attribute(attributes, id_cache, 'P35', trim=True) # head of government - add_attribute(attributes, result, 'P6', trim=True) + add_attribute(attributes, id_cache, 'P6', trim=True) # type of government - add_attribute(attributes, result, 'P122') + add_attribute(attributes, id_cache, 'P122') # official language - add_attribute(attributes, result, 'P37') + add_attribute(attributes, id_cache, 'P37') # population - add_attribute(attributes, result, 'P1082', trim=True) + add_attribute(attributes, id_cache, 'P1082', trim=True) # area - add_attribute(attributes, result, 'P2046') + add_attribute(attributes, id_cache, 'P2046') # currency - add_attribute(attributes, result, 'P38', trim=True) + add_attribute(attributes, id_cache, 'P38', trim=True) # heigth (building) - add_attribute(attributes, result, 'P2048') + add_attribute(attributes, id_cache, 'P2048') # MEDIA # platform (videogames) - add_attribute(attributes, result, 'P400') + add_attribute(attributes, id_cache, 'P400') # author - add_attribute(attributes, result, 'P50') + add_attribute(attributes, id_cache, 'P50') # creator - add_attribute(attributes, result, 'P170') + add_attribute(attributes, id_cache, 'P170') # director - add_attribute(attributes, result, 'P57') + add_attribute(attributes, id_cache, 'P57') # performer - add_attribute(attributes, result, 'P175') + add_attribute(attributes, id_cache, 'P175') # developer - add_attribute(attributes, result, 'P178') + add_attribute(attributes, id_cache, 'P178') # producer - add_attribute(attributes, result, 'P162') + add_attribute(attributes, id_cache, 'P162') # manufacturer - add_attribute(attributes, result, 'P176') + add_attribute(attributes, id_cache, 'P176') # screenwriter - add_attribute(attributes, result, 'P58') + add_attribute(attributes, id_cache, 'P58') # production company - add_attribute(attributes, result, 'P272') + add_attribute(attributes, id_cache, 'P272') # record label - add_attribute(attributes, result, 'P264') + add_attribute(attributes, id_cache, 'P264') # publisher - add_attribute(attributes, result, 'P123') + add_attribute(attributes, id_cache, 'P123') # original network - add_attribute(attributes, result, 'P449') + add_attribute(attributes, id_cache, 'P449') # distributor - add_attribute(attributes, result, 'P750') + add_attribute(attributes, id_cache, 'P750') # composer - add_attribute(attributes, result, 'P86') + add_attribute(attributes, id_cache, 'P86') # publication date - add_attribute(attributes, result, 'P577', date=True) + add_attribute(attributes, id_cache, 'P577', date=True) # genre - add_attribute(attributes, result, 'P136') + add_attribute(attributes, id_cache, 'P136') # original language - add_attribute(attributes, result, 'P364') + add_attribute(attributes, id_cache, 'P364') # isbn - add_attribute(attributes, result, 'Q33057') + add_attribute(attributes, id_cache, 'Q33057') # software license - add_attribute(attributes, result, 'P275') + add_attribute(attributes, id_cache, 'P275') # programming language - add_attribute(attributes, result, 'P277') + add_attribute(attributes, id_cache, 'P277') # version - add_attribute(attributes, result, 'P348', trim=True) + add_attribute(attributes, id_cache, 'P348', trim=True) # narrative location - add_attribute(attributes, result, 'P840') + add_attribute(attributes, id_cache, 'P840') # LANGUAGES # number of speakers - add_attribute(attributes, result, 'P1098') + add_attribute(attributes, id_cache, 'P1098') # writing system - add_attribute(attributes, result, 'P282') + add_attribute(attributes, id_cache, 'P282') # regulatory body - add_attribute(attributes, result, 'P1018') + add_attribute(attributes, id_cache, 'P1018') # language code - add_attribute(attributes, result, 'P218') + add_attribute(attributes, id_cache, 'P218') # OTHER # ceo - add_attribute(attributes, result, 'P169', trim=True) + add_attribute(attributes, id_cache, 'P169', trim=True) # founder - add_attribute(attributes, result, 'P112') + add_attribute(attributes, id_cache, 'P112') # legal form (company/organization) - add_attribute(attributes, result, 'P1454') + add_attribute(attributes, id_cache, 'P1454') # operator - add_attribute(attributes, result, 'P137') + add_attribute(attributes, id_cache, 'P137') # crew members (tripulation) - add_attribute(attributes, result, 'P1029') + add_attribute(attributes, id_cache, 'P1029') # taxon - add_attribute(attributes, result, 'P225') + add_attribute(attributes, id_cache, 'P225') # chemical formula - add_attribute(attributes, result, 'P274') + add_attribute(attributes, id_cache, 'P274') # winner (sports/contests) - add_attribute(attributes, result, 'P1346') + add_attribute(attributes, id_cache, 'P1346') # number of deaths - add_attribute(attributes, result, 'P1120') + add_attribute(attributes, id_cache, 'P1120') # currency code - add_attribute(attributes, result, 'P498') + add_attribute(attributes, id_cache, 'P498') - image = add_image(result) + image = add_image(id_cache) if len(attributes) == 0 and len(urls) == 2 and len(description) == 0: results.append({ @@ -310,43 +323,42 @@ def getDetail(jsonresponse, wikidata_id, language, locale): # only returns first match -def add_image(result): +def add_image(id_cache): # P15: route map, P242: locator map, P154: logo, P18: image, P242: map, P41: flag, P2716: collage, P2910: icon property_ids = ['P15', 'P242', 'P154', 'P18', 'P242', 'P41', 'P2716', 'P2910'] for property_id in property_ids: - image = result.xpath(property_xpath.replace('{propertyid}', property_id)) - if image: - image_name = image[0].xpath(media_xpath) + image = id_cache.get(property_id, None) + if image is not None: + image_name = eval_xpath(image, media_xpath) image_src = url_image.replace('{filename}', extract_text(image_name[0])) return image_src # setting trim will only returned high ranked rows OR the first row -def add_attribute(attributes, result, property_id, default_label=None, date=False, trim=False): - attribute = result.xpath(property_xpath.replace('{propertyid}', property_id)) - if attribute: +def add_attribute(attributes, id_cache, property_id, default_label=None, date=False, trim=False): + attribute = id_cache.get(property_id, None) + if attribute is not None: if default_label: label = default_label else: - label = extract_text(attribute[0].xpath(label_xpath)) + label = extract_text(eval_xpath(attribute, label_xpath)) label = label[0].upper() + label[1:] if date: trim = True # remove calendar name - calendar_name = attribute[0].xpath(calendar_name_xpath) + calendar_name = eval_xpath(attribute, calendar_name_xpath) for calendar in calendar_name: calendar.getparent().remove(calendar) concat_values = "" values = [] first_value = None - for row in attribute[0].xpath(property_row_xpath): - if not first_value or not trim or row.xpath(preferred_rank_xpath): - - value = row.xpath(value_xpath) + for row in eval_xpath(attribute, property_row_xpath): + if not first_value or not trim or eval_xpath(row, preferred_rank_xpath): + value = eval_xpath(row, value_xpath) if not value: continue value = extract_text(value) @@ -369,18 +381,18 @@ def add_attribute(attributes, result, property_id, default_label=None, date=Fals # requires property_id unless it's a wiki link (defined in link_type) -def add_url(urls, result, property_id=None, default_label=None, url_prefix=None, results=None, link_type=None): +def add_url(urls, result, id_cache, property_id=None, default_label=None, url_prefix=None, results=None, + link_type=None): links = [] # wiki links don't have property in wikidata page if link_type and 'wiki' in link_type: links.append(get_wikilink(result, link_type)) else: - dom_element = result.xpath(property_xpath.replace('{propertyid}', property_id)) - if dom_element: - dom_element = dom_element[0] + dom_element = id_cache.get(property_id, None) + if dom_element is not None: if not default_label: - label = extract_text(dom_element.xpath(label_xpath)) + label = extract_text(eval_xpath(dom_element, label_xpath)) label = label[0].upper() + label[1:] if link_type == 'geo': @@ -390,7 +402,7 @@ def add_url(urls, result, property_id=None, default_label=None, url_prefix=None, links.append(get_imdblink(dom_element, url_prefix)) else: - url_results = dom_element.xpath(url_xpath) + url_results = eval_xpath(dom_element, url_xpath) for link in url_results: if link is not None: if url_prefix: @@ -410,7 +422,7 @@ def add_url(urls, result, property_id=None, default_label=None, url_prefix=None, def get_imdblink(result, url_prefix): - imdb_id = result.xpath(value_xpath) + imdb_id = eval_xpath(result, value_xpath) if imdb_id: imdb_id = extract_text(imdb_id) id_prefix = imdb_id[:2] @@ -430,7 +442,7 @@ def get_imdblink(result, url_prefix): def get_geolink(result): - coordinates = result.xpath(value_xpath) + coordinates = eval_xpath(result, value_xpath) if not coordinates: return None coordinates = extract_text(coordinates[0]) @@ -477,7 +489,7 @@ def get_geolink(result): def get_wikilink(result, wikiid): - url = result.xpath(wikilink_xpath.replace('{wikiid}', wikiid)) + url = eval_xpath(result, wikilink_xpath.replace('{wikiid}', wikiid)) if not url: return None url = url[0] diff --git a/searx/engines/wikipedia.py b/searx/engines/wikipedia.py index 4dae735d1..a216ba886 100644 --- a/searx/engines/wikipedia.py +++ b/searx/engines/wikipedia.py @@ -21,7 +21,8 @@ search_url = base_url + u'w/api.php?'\ 'action=query'\ '&format=json'\ '&{query}'\ - '&prop=extracts|pageimages'\ + '&prop=extracts|pageimages|pageprops'\ + '&ppprop=disambiguation'\ '&exintro'\ '&explaintext'\ '&pithumbsize=300'\ @@ -79,12 +80,15 @@ def response(resp): # wikipedia article's unique id # first valid id is assumed to be the requested article + if 'pages' not in search_result['query']: + return results + for article_id in search_result['query']['pages']: page = search_result['query']['pages'][article_id] if int(article_id) > 0: break - if int(article_id) < 0: + if int(article_id) < 0 or 'disambiguation' in page.get('pageprops', {}): return [] title = page.get('title') @@ -96,6 +100,7 @@ def response(resp): extract = page.get('extract') summary = extract_first_paragraph(extract, title, image) + summary = summary.replace('() ', '') # link to wikipedia article wikipedia_link = base_url.format(language=url_lang(resp.search_params['language'])) \ diff --git a/searx/engines/wolframalpha_noapi.py b/searx/engines/wolframalpha_noapi.py index 2cbbc5adc..387c9fa17 100644 --- a/searx/engines/wolframalpha_noapi.py +++ b/searx/engines/wolframalpha_noapi.py @@ -55,7 +55,7 @@ def obtain_token(): return token -def init(): +def init(engine_settings=None): obtain_token() diff --git a/searx/engines/www1x.py b/searx/engines/www1x.py index 508803240..f1154b16d 100644 --- a/searx/engines/www1x.py +++ b/searx/engines/www1x.py @@ -11,8 +11,8 @@ """ from lxml import html -import re from searx.url_utils import urlencode, urljoin +from searx.engines.xpath import extract_text # engine dependent config categories = ['images'] @@ -34,41 +34,18 @@ def request(query, params): def response(resp): results = [] - # get links from result-text - regex = re.compile('(</a>|<a)') - results_parts = re.split(regex, resp.text) - - cur_element = '' - - # iterate over link parts - for result_part in results_parts: + dom = html.fromstring(resp.text) + for res in dom.xpath('//div[@class="List-item MainListing"]'): # processed start and end of link - if result_part == '<a': - cur_element = result_part - continue - elif result_part != '</a>': - cur_element += result_part - continue - - cur_element += result_part - - # fix xml-error - cur_element = cur_element.replace('"></a>', '"/></a>') - - dom = html.fromstring(cur_element) - link = dom.xpath('//a')[0] + link = res.xpath('//a')[0] url = urljoin(base_url, link.attrib.get('href')) - title = link.attrib.get('title', '') + title = extract_text(link) - thumbnail_src = urljoin(base_url, link.xpath('.//img')[0].attrib['src']) + thumbnail_src = urljoin(base_url, res.xpath('.//img')[0].attrib['src']) # TODO: get image with higher resolution img_src = thumbnail_src - # check if url is showing to a photo - if '/photo/' not in url: - continue - # append result results.append({'url': url, 'title': title, diff --git a/searx/engines/xpath.py b/searx/engines/xpath.py index 50f98d935..b75896cc7 100644 --- a/searx/engines/xpath.py +++ b/searx/engines/xpath.py @@ -1,12 +1,13 @@ from lxml import html from lxml.etree import _ElementStringResult, _ElementUnicodeResult -from searx.utils import html_to_text +from searx.utils import html_to_text, eval_xpath from searx.url_utils import unquote, urlencode, urljoin, urlparse search_url = None url_xpath = None content_xpath = None title_xpath = None +thumbnail_xpath = False paging = False suggestion_xpath = '' results_xpath = '' @@ -40,7 +41,9 @@ def extract_text(xpath_results): return ''.join(xpath_results) else: # it's a element - text = html.tostring(xpath_results, encoding='unicode', method='text', with_tail=False) + text = html.tostring( + xpath_results, encoding='unicode', method='text', with_tail=False + ) text = text.strip().replace('\n', ' ') return ' '.join(text.split()) @@ -101,22 +104,30 @@ def response(resp): results = [] dom = html.fromstring(resp.text) if results_xpath: - for result in dom.xpath(results_xpath): - url = extract_url(result.xpath(url_xpath), search_url) - title = extract_text(result.xpath(title_xpath)) - content = extract_text(result.xpath(content_xpath)) - results.append({'url': url, 'title': title, 'content': content}) + for result in eval_xpath(dom, results_xpath): + url = extract_url(eval_xpath(result, url_xpath), search_url) + title = extract_text(eval_xpath(result, title_xpath)) + content = extract_text(eval_xpath(result, content_xpath)) + tmp_result = {'url': url, 'title': title, 'content': content} + + # add thumbnail if available + if thumbnail_xpath: + thumbnail_xpath_result = eval_xpath(result, thumbnail_xpath) + if len(thumbnail_xpath_result) > 0: + tmp_result['img_src'] = extract_url(thumbnail_xpath_result, search_url) + + results.append(tmp_result) else: for url, title, content in zip( (extract_url(x, search_url) for - x in dom.xpath(url_xpath)), - map(extract_text, dom.xpath(title_xpath)), - map(extract_text, dom.xpath(content_xpath)) + x in eval_xpath(dom, url_xpath)), + map(extract_text, eval_xpath(dom, title_xpath)), + map(extract_text, eval_xpath(dom, content_xpath)) ): results.append({'url': url, 'title': title, 'content': content}) if not suggestion_xpath: return results - for suggestion in dom.xpath(suggestion_xpath): + for suggestion in eval_xpath(dom, suggestion_xpath): results.append({'suggestion': extract_text(suggestion)}) return results diff --git a/searx/engines/yahoo.py b/searx/engines/yahoo.py index 73b78bcf7..36c1a11f8 100644 --- a/searx/engines/yahoo.py +++ b/searx/engines/yahoo.py @@ -14,7 +14,7 @@ from lxml import html from searx.engines.xpath import extract_text, extract_url from searx.url_utils import unquote, urlencode -from searx.utils import match_language +from searx.utils import match_language, eval_xpath # engine dependent config categories = ['general'] @@ -109,21 +109,21 @@ def response(resp): dom = html.fromstring(resp.text) try: - results_num = int(dom.xpath('//div[@class="compPagination"]/span[last()]/text()')[0] + results_num = int(eval_xpath(dom, '//div[@class="compPagination"]/span[last()]/text()')[0] .split()[0].replace(',', '')) results.append({'number_of_results': results_num}) except: pass # parse results - for result in dom.xpath(results_xpath): + for result in eval_xpath(dom, results_xpath): try: - url = parse_url(extract_url(result.xpath(url_xpath), search_url)) - title = extract_text(result.xpath(title_xpath)[0]) + url = parse_url(extract_url(eval_xpath(result, url_xpath), search_url)) + title = extract_text(eval_xpath(result, title_xpath)[0]) except: continue - content = extract_text(result.xpath(content_xpath)[0]) + content = extract_text(eval_xpath(result, content_xpath)[0]) # append result results.append({'url': url, @@ -131,7 +131,7 @@ def response(resp): 'content': content}) # if no suggestion found, return results - suggestions = dom.xpath(suggestion_xpath) + suggestions = eval_xpath(dom, suggestion_xpath) if not suggestions: return results @@ -148,9 +148,9 @@ def response(resp): def _fetch_supported_languages(resp): supported_languages = [] dom = html.fromstring(resp.text) - options = dom.xpath('//div[@id="yschlang"]/span/label/input') + options = eval_xpath(dom, '//div[@id="yschlang"]/span/label/input') for option in options: - code_parts = option.xpath('./@value')[0][5:].split('_') + code_parts = eval_xpath(option, './@value')[0][5:].split('_') if len(code_parts) == 2: code = code_parts[0] + '-' + code_parts[1].upper() else: diff --git a/searx/engines/youtube_api.py b/searx/engines/youtube_api.py index 6de18aa2c..bc4c0d58e 100644 --- a/searx/engines/youtube_api.py +++ b/searx/engines/youtube_api.py @@ -23,7 +23,7 @@ base_url = 'https://www.googleapis.com/youtube/v3/search' search_url = base_url + '?part=snippet&{query}&maxResults=20&key={api_key}' embedded_url = '<iframe width="540" height="304" ' +\ - 'data-src="//www.youtube-nocookie.com/embed/{videoid}" ' +\ + 'data-src="https://www.youtube-nocookie.com/embed/{videoid}" ' +\ 'frameborder="0" allowfullscreen></iframe>' base_youtube_url = 'https://www.youtube.com/watch?v=' diff --git a/searx/engines/youtube_noapi.py b/searx/engines/youtube_noapi.py index 3bf25932b..49d0ae604 100644 --- a/searx/engines/youtube_noapi.py +++ b/searx/engines/youtube_noapi.py @@ -30,7 +30,7 @@ time_range_dict = {'day': 'Ag', 'year': 'BQ'} embedded_url = '<iframe width="540" height="304" ' +\ - 'data-src="//www.youtube-nocookie.com/embed/{videoid}" ' +\ + 'data-src="https://www.youtube-nocookie.com/embed/{videoid}" ' +\ 'frameborder="0" allowfullscreen></iframe>' base_youtube_url = 'https://www.youtube.com/watch?v=' @@ -67,12 +67,8 @@ def response(resp): if videoid is not None: url = base_youtube_url + videoid thumbnail = 'https://i.ytimg.com/vi/' + videoid + '/hqdefault.jpg' - title = video.get('title', {}).get('simpleText', videoid) - description_snippet = video.get('descriptionSnippet', {}) - if 'runs' in description_snippet: - content = reduce(lambda a, b: a + b.get('text', ''), description_snippet.get('runs'), '') - else: - content = description_snippet.get('simpleText', '') + title = get_text_from_json(video.get('title', {})) + content = get_text_from_json(video.get('descriptionSnippet', {})) embedded = embedded_url.format(videoid=videoid) # append result @@ -85,3 +81,10 @@ def response(resp): # return results return results + + +def get_text_from_json(element): + if 'runs' in element: + return reduce(lambda a, b: a + b.get('text', ''), element.get('runs'), '') + else: + return element.get('simpleText', '') |