diff options
| author | Adam Tauber <asciimoo@gmail.com> | 2016-12-11 03:31:33 +0100 |
|---|---|---|
| committer | Adam Tauber <asciimoo@gmail.com> | 2016-12-11 03:31:33 +0100 |
| commit | e12ea9a5106a8b58080c0395c2fcf3f2a84808fe (patch) | |
| tree | fa08a5f4ee6c0a1971400faaa63d57a674d060ea /searx/engines | |
| parent | f6e9c074bbe8b4237ee361befa8dcb2c6d31a11a (diff) | |
| parent | 9b2bd6847f707e76d10e1ff8b80d68c56b4956f3 (diff) | |
Merge branch '500px_rewrite' of github.com:asciimoo/searx
Diffstat (limited to 'searx/engines')
| -rw-r--r-- | searx/engines/www500px.py | 42 |
1 files changed, 25 insertions, 17 deletions
diff --git a/searx/engines/www500px.py b/searx/engines/www500px.py index f1bc6c583..546521ba3 100644 --- a/searx/engines/www500px.py +++ b/searx/engines/www500px.py @@ -12,12 +12,9 @@ @todo rewrite to api """ - +from json import loads from urllib import urlencode from urlparse import urljoin -from lxml import html -import re -from searx.engines.xpath import extract_text # engine dependent config categories = ['images'] @@ -25,13 +22,27 @@ paging = True # search-url base_url = 'https://500px.com' -search_url = base_url + '/search?search?page={pageno}&type=photos&{query}' +search_url = 'https://api.500px.com/v1/photos/search?type=photos'\ + '&{query}'\ + '&image_size%5B%5D=4'\ + '&image_size%5B%5D=20'\ + '&image_size%5B%5D=21'\ + '&image_size%5B%5D=1080'\ + '&image_size%5B%5D=1600'\ + '&image_size%5B%5D=2048'\ + '&include_states=true'\ + '&formats=jpeg%2Clytro'\ + '&include_tags=true'\ + '&exclude_nude=true'\ + '&page={pageno}'\ + '&rpp=50'\ + '&sdk_key=b68e60cff4c929bedea36ca978830c5caca790c3' # do search-request def request(query, params): params['url'] = search_url.format(pageno=params['pageno'], - query=urlencode({'q': query})) + query=urlencode({'term': query})) return params @@ -40,19 +51,16 @@ def request(query, params): def response(resp): results = [] - dom = html.fromstring(resp.text) - regex = re.compile(r'3\.jpg.*$') + response_json = loads(resp.text) # parse results - for result in dom.xpath('//div[@class="photo"]'): - link = result.xpath('.//a')[0] - url = urljoin(base_url, link.attrib.get('href')) - title = extract_text(result.xpath('.//div[@class="title"]')) - thumbnail_src = link.xpath('.//img')[0].attrib.get('src') - # To have a bigger thumbnail, uncomment the next line - # thumbnail_src = regex.sub('4.jpg', thumbnail_src) - content = extract_text(result.xpath('.//div[@class="info"]')) - img_src = regex.sub('2048.jpg', thumbnail_src) + for result in response_json['photos']: + url = urljoin(base_url, result['url']) + title = result['name'] + # last index is the biggest resolution + img_src = result['image_url'][-1] + thumbnail_src = result['image_url'][0] + content = result['description'] or '' # append result results.append({'url': url, |