summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/bandcamp.py2
-rw-r--r--searx/engines/genius.py58
-rw-r--r--searx/engines/google_images.py65
3 files changed, 85 insertions, 40 deletions
diff --git a/searx/engines/bandcamp.py b/searx/engines/bandcamp.py
index f83ca6d4f..8feff1fe0 100644
--- a/searx/engines/bandcamp.py
+++ b/searx/engines/bandcamp.py
@@ -82,7 +82,7 @@ def response(resp):
thumbnail = result.xpath('.//div[@class="art"]/img/@src')
if thumbnail:
- new_result['thumbnail'] = thumbnail[0]
+ new_result['img_src'] = thumbnail[0]
result_id = parse_qs(urlparse(link.get('href')).query)["search_item_id"][0]
itemtype = extract_text(result.xpath('.//div[@class="itemtype"]')).lower()
diff --git a/searx/engines/genius.py b/searx/engines/genius.py
index 1f4b4b03e..db1f66603 100644
--- a/searx/engines/genius.py
+++ b/searx/engines/genius.py
@@ -5,7 +5,6 @@
"""
-from json import loads
from urllib.parse import urlencode
from datetime import datetime
@@ -26,6 +25,7 @@ page_size = 5
url = 'https://genius.com/api/'
search_url = url + 'search/{index}?{query}&page={pageno}&per_page={page_size}'
+music_player = 'https://genius.com{api_path}/apple_music_player'
def request(query, params):
@@ -39,20 +39,28 @@ def request(query, params):
def parse_lyric(hit):
- try:
+ content = ''
+ highlights = hit['highlights']
+ if highlights:
content = hit['highlights'][0]['value']
- except Exception as e: # pylint: disable=broad-except
- logger.error(e, exc_info=True)
- content = ''
+ else:
+ content = hit['result'].get('title_with_featured', '')
+
timestamp = hit['result']['lyrics_updated_at']
result = {
'url': hit['result']['url'],
'title': hit['result']['full_title'],
'content': content,
- 'thumbnail': hit['result']['song_art_image_thumbnail_url'],
+ 'img_src': hit['result']['song_art_image_thumbnail_url'],
}
if timestamp:
result.update({'publishedDate': datetime.fromtimestamp(timestamp)})
+ api_path = hit['result'].get('api_path')
+ if api_path:
+ # The players are just playing 30sec from the title. Some of the player
+ # will be blocked because of a cross-origin request and some players will
+ # link to apple when you press the play button.
+ result['iframe_src'] = music_player.format(api_path=api_path)
return result
@@ -61,26 +69,25 @@ def parse_artist(hit):
'url': hit['result']['url'],
'title': hit['result']['name'],
'content': '',
- 'thumbnail': hit['result']['image_url'],
+ 'img_src': hit['result']['image_url'],
}
return result
def parse_album(hit):
- result = {
- 'url': hit['result']['url'],
- 'title': hit['result']['full_title'],
- 'thumbnail': hit['result']['cover_art_url'],
- 'content': '',
+ res = hit['result']
+ content = res.get('name_with_artist', res.get('name', ''))
+ x = res.get('release_date_components')
+ if x:
+ x = x.get('year')
+ if x:
+ content = "%s / %s" % (x, content)
+ return {
+ 'url': res['url'],
+ 'title': res['full_title'],
+ 'img_src': res['cover_art_url'],
+ 'content': content.strip(),
}
- try:
- year = hit['result']['release_date_components']['year']
- except Exception as e: # pylint: disable=broad-except
- logger.error(e, exc_info=True)
- else:
- if year:
- result.update({'content': 'Released: {}'.format(year)})
- return result
parse = {'lyric': parse_lyric, 'song': parse_lyric, 'artist': parse_artist, 'album': parse_album}
@@ -88,10 +95,9 @@ parse = {'lyric': parse_lyric, 'song': parse_lyric, 'artist': parse_artist, 'alb
def response(resp):
results = []
- json = loads(resp.text)
- hits = [hit for section in json['response']['sections'] for hit in section['hits']]
- for hit in hits:
- func = parse.get(hit['type'])
- if func:
- results.append(func(hit))
+ for section in resp.json()['response']['sections']:
+ for hit in section['hits']:
+ func = parse.get(hit['type'])
+ if func:
+ results.append(func(hit))
return results
diff --git a/searx/engines/google_images.py b/searx/engines/google_images.py
index 2855860d8..fc192d62d 100644
--- a/searx/engines/google_images.py
+++ b/searx/engines/google_images.py
@@ -13,6 +13,7 @@
https://developer.mozilla.org/en-US/docs/Web/HTTP/Basics_of_HTTP/Data_URIs
"""
+import re
from urllib.parse import urlencode, unquote
from lxml import html
@@ -68,15 +69,55 @@ def scrap_out_thumbs(dom):
return ret_val
-def scrap_img_by_id(script, data_id):
- """Get full image URL by data-id in parent element"""
- img_url = ''
- _script = script.split('\n')
- for i, line in enumerate(_script):
- if 'gstatic.com/images' in line and data_id in line and i + 1 < len(_script):
- url_line = _script[i + 1]
- img_url = url_line.split('"')[1]
- img_url = unquote(img_url.replace(r'\u00', r'%'))
+# [0, "-H96xjSoW5DsgM", ["https://encrypted-tbn0.gstatic.com/images?q...", 155, 324]
+# , ["https://assets.cdn.moviepilot.de/files/d3bf..", 576, 1200],
+_RE_JS_IMAGE_URL = re.compile(
+ r'"'
+ r'([^"]*)' # -H96xjSoW5DsgM
+ r'",\s*\["'
+ r'https://[^\.]*\.gstatic.com/images[^"]*' # https://encrypted-tbn0.gstatic.com/images?q...
+ r'[^\[]*\["'
+ r'(https?://[^"]*)' # https://assets.cdn.moviepilot.de/files/d3bf...
+)
+
+
+def parse_urls_img_from_js(dom):
+
+ # There are two HTML script tags starting with a JS function
+ # 'AF_initDataCallback(...)'
+ #
+ # <script nonce="zscm+Ab/JzBk1Qd4GY6wGQ">
+ # AF_initDataCallback({key: 'ds:0', hash: '1', data:[], sideChannel: {}});
+ # </script>
+ # <script nonce="zscm+Ab/JzBk1Qd4GY6wGQ">
+ # AF_initDataCallback({key: 'ds:1', hash: '2', data:[null,[[["online_chips",[["the big",
+ # ["https://encrypted-tbn0.gstatic.com/images?q...",null,null,true,[null,0],f
+ # ...
+ # </script>
+ #
+ # The second script contains the URLs of the images.
+
+ # The AF_initDataCallback(..) is called with very large dictionary, that
+ # looks like JSON but it is not JSON since it contains JS variables and
+ # constants like 'null' (we can't use a JSON parser for).
+ #
+ # The alternative is to parse the entire <script> and find all image URLs by
+ # a regular expression.
+
+ img_src_script = eval_xpath_getindex(dom, '//script[contains(., "AF_initDataCallback({key: ")]', 1).text
+ data_id_to_img_url = {}
+ for data_id, url in _RE_JS_IMAGE_URL.findall(img_src_script):
+ data_id_to_img_url[data_id] = url
+ return data_id_to_img_url
+
+
+def get_img_url_by_data_id(data_id_to_img_url, img_node):
+ """Get full image URL by @data-id from parent element."""
+
+ data_id = eval_xpath_getindex(img_node, '../../../@data-id', 0)
+ img_url = data_id_to_img_url.get(data_id, '')
+ img_url = unquote(img_url.replace(r'\u00', r'%'))
+
return img_url
@@ -123,7 +164,7 @@ def response(resp):
# convert the text to dom
dom = html.fromstring(resp.text)
img_bas64_map = scrap_out_thumbs(dom)
- img_src_script = eval_xpath_getindex(dom, '//script[contains(., "AF_initDataCallback({key: ")]', 1).text
+ data_id_to_img_url = parse_urls_img_from_js(dom)
# parse results
#
@@ -178,8 +219,7 @@ def response(resp):
pub_descr = extract_text(pub_nodes[0])
pub_source = extract_text(pub_nodes[1])
- img_src_id = eval_xpath_getindex(img_node, '../../../@data-id', 0)
- src_url = scrap_img_by_id(img_src_script, img_src_id)
+ src_url = get_img_url_by_data_id(data_id_to_img_url, img_node)
if not src_url:
src_url = thumbnail_src
@@ -190,7 +230,6 @@ def response(resp):
'content': pub_descr,
'source': pub_source,
'img_src': src_url,
- # 'img_format': img_format,
'thumbnail_src': thumbnail_src,
'template': 'images.html',
}