diff options
Diffstat (limited to 'searx/engines')
| -rw-r--r-- | searx/engines/tineye.py | 103 | ||||
| -rw-r--r-- | searx/engines/www1x.py | 17 |
2 files changed, 113 insertions, 7 deletions
diff --git a/searx/engines/tineye.py b/searx/engines/tineye.py new file mode 100644 index 000000000..fe5b60393 --- /dev/null +++ b/searx/engines/tineye.py @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +# lint: pylint +"""This engine implements *Tineye - reverse image search* + +Using TinEye, you can search by image or perform what we call a reverse image +search. You can do that by uploading an image or searching by URL. You can also +simply drag and drop your images to start your search. TinEye constantly crawls +the web and adds images to its index. Today, the TinEye index is over 50.2 +billion images `[tineye.com] <https://tineye.com/how>`_. + +.. hint:: + + This SearXNG engine only supports *'searching by URL'* and it does not use + the official API `[api.tineye.com] <https://api.tineye.com/python/docs/>`_. + +""" + +from urllib.parse import urlencode +from datetime import datetime + +about = { + "website": 'https://tineye.com', + "wikidata_id": 'Q2382535', + "official_api_documentation": 'https://api.tineye.com/python/docs/', + "use_official_api": False, + "require_api_key": False, + "results": 'JSON', +} + +engine_type = 'online_url_search' +categories = ['general'] +paging = True +safesearch = False +base_url = 'https://tineye.com' +search_string = '/result_json/?page={page}&{query}' + + +def request(query, params): + + if params['search_urls']['data:image']: + query = params['search_urls']['data:image'] + elif params['search_urls']['http']: + query = params['search_urls']['http'] + + query = urlencode({'url': query}) + + # see https://github.com/TinEye/pytineye/blob/main/pytineye/api.py + params['url'] = base_url + search_string.format(query=query, page=params['pageno']) + + params['headers'].update( + { + 'Connection': 'keep-alive', + 'Accept-Encoding': 'gzip, defalte, br', + 'Host': 'tineye.com', + 'DNT': '1', + 'TE': 'trailers', + } + ) + return params + + +def response(resp): + results = [] + + # Define wanted results + json_data = resp.json() + number_of_results = json_data['num_matches'] + + for i in json_data['matches']: + image_format = i['format'] + width = i['width'] + height = i['height'] + thumbnail_src = i['image_url'] + backlink = i['domains'][0]['backlinks'][0] + url = backlink['backlink'] + source = backlink['url'] + title = backlink['image_name'] + img_src = backlink['url'] + + # Get and convert published date + api_date = backlink['crawl_date'][:-3] + publishedDate = datetime.fromisoformat(api_date) + + # Append results + results.append( + { + 'template': 'images.html', + 'url': url, + 'thumbnail_src': thumbnail_src, + 'source': source, + 'title': title, + 'img_src': img_src, + 'format': image_format, + 'widht': width, + 'height': height, + 'publishedDate': publishedDate, + } + ) + + # Append number of results + results.append({'number_of_results': number_of_results}) + + return results diff --git a/searx/engines/www1x.py b/searx/engines/www1x.py index f6b82944d..a7ec06f18 100644 --- a/searx/engines/www1x.py +++ b/searx/engines/www1x.py @@ -1,10 +1,12 @@ # SPDX-License-Identifier: AGPL-3.0-or-later -""" - 1x (Images) +# lint: pylint +"""1x (Images) + """ -from lxml import html, etree from urllib.parse import urlencode, urljoin +from lxml import html, etree + from searx.utils import extract_text, eval_xpath_list, eval_xpath_getindex # about @@ -38,13 +40,14 @@ def request(query, params): def response(resp): results = [] xmldom = etree.fromstring(resp.content) - xmlsearchresult = eval_xpath_getindex(xmldom, '//searchresult', 0) + xmlsearchresult = eval_xpath_getindex(xmldom, '//data', 0) dom = html.fragment_fromstring(xmlsearchresult.text, create_parent='div') - for link in eval_xpath_list(dom, '/div/table/tr/td/div[2]//a'): + for link in eval_xpath_list(dom, '//a'): url = urljoin(base_url, link.attrib.get('href')) title = extract_text(link) - thumbnail_src = urljoin(gallery_url, eval_xpath_getindex(link, './/img', 0).attrib['src']) - + thumbnail_src = urljoin( + gallery_url, (eval_xpath_getindex(link, './/img', 0).attrib['src']).replace(base_url, '') + ) # append result results.append( { |