summaryrefslogtreecommitdiff
path: root/searx
diff options
context:
space:
mode:
Diffstat (limited to 'searx')
-rw-r--r--searx/engines/digg.py7
-rw-r--r--searx/engines/faroo.py58
-rw-r--r--searx/engines/torrentz.py38
-rw-r--r--searx/settings.yml15
4 files changed, 58 insertions, 60 deletions
diff --git a/searx/engines/digg.py b/searx/engines/digg.py
index 606747a4d..4369ccb84 100644
--- a/searx/engines/digg.py
+++ b/searx/engines/digg.py
@@ -10,6 +10,8 @@
@parse url, title, content, publishedDate, thumbnail
"""
+import random
+import string
from dateutil import parser
from json import loads
from lxml import html
@@ -30,12 +32,17 @@ title_xpath = './/h2//a//text()'
content_xpath = './/p//text()'
pubdate_xpath = './/time'
+digg_cookie_chars = string.ascii_uppercase + string.ascii_lowercase +\
+ string.digits + "+_"
+
# do search-request
def request(query, params):
offset = (params['pageno'] - 1) * 10
params['url'] = search_url.format(position=offset,
query=quote_plus(query))
+ params['cookies']['frontend.auid'] = ''.join(random.choice(
+ digg_cookie_chars) for _ in range(22))
return params
diff --git a/searx/engines/faroo.py b/searx/engines/faroo.py
index e24d1b7dc..7ce3a6ce8 100644
--- a/searx/engines/faroo.py
+++ b/searx/engines/faroo.py
@@ -4,7 +4,7 @@
@website http://www.faroo.com
@provide-api yes (http://www.faroo.com/hp/api/api.html), require API-key
- @using-api yes
+ @using-api no
@results JSON
@stable yes
@parse url, title, content, publishedDate, img_src
@@ -20,18 +20,16 @@ categories = ['general', 'news']
paging = True
language_support = True
number_of_results = 10
-api_key = None
# search-url
url = 'http://www.faroo.com/'
-search_url = url + 'api?{query}'\
- '&start={offset}'\
- '&length={number_of_results}'\
- '&l={language}'\
- '&src={categorie}'\
- '&i=false'\
- '&f=json'\
- '&key={api_key}' # noqa
+search_url = url + 'instant.json?{query}'\
+ '&start={offset}'\
+ '&length={number_of_results}'\
+ '&l={language}'\
+ '&src={categorie}'\
+ '&i=false'\
+ '&c=false'
search_category = {'general': 'web',
'news': 'news'}
@@ -57,21 +55,15 @@ def request(query, params):
number_of_results=number_of_results,
query=urlencode({'q': query}),
language=language,
- categorie=categorie,
- api_key=api_key)
+ categorie=categorie)
- # using searx User-Agent
- params['headers']['User-Agent'] = searx_useragent()
+ params['headers']['Referer'] = url
return params
# get response from search-request
def response(resp):
- # HTTP-Code 401: api-key is not valide
- if resp.status_code == 401:
- raise Exception("API key is not valide")
-
# HTTP-Code 429: rate limit exceeded
if resp.status_code == 429:
raise Exception("rate limit has been exceeded!")
@@ -86,31 +78,19 @@ def response(resp):
# parse results
for result in search_res['results']:
+ publishedDate = None
+ result_json = {'url': result['url'], 'title': result['title'],
+ 'content': result['kwic']}
if result['news']:
- # timestamp (milliseconds since 1970)
- publishedDate = datetime.datetime.fromtimestamp(result['date'] / 1000.0) # noqa
-
- # append news result
- results.append({'url': result['url'],
- 'title': result['title'],
- 'publishedDate': publishedDate,
- 'content': result['kwic']})
-
- else:
- # append general result
- # TODO, publishedDate correct?
- results.append({'url': result['url'],
- 'title': result['title'],
- 'content': result['kwic']})
+ result_json['publishedDate'] = \
+ datetime.datetime.fromtimestamp(result['date'] / 1000.0)
# append image result if image url is set
- # TODO, show results with an image like in faroo
if result['iurl']:
- results.append({'template': 'images.html',
- 'url': result['url'],
- 'title': result['title'],
- 'content': result['kwic'],
- 'img_src': result['iurl']})
+ result_json['template'] = 'videos.html'
+ result_json['thumbnail'] = result['iurl']
+
+ results.append(result_json)
# return results
return results
diff --git a/searx/engines/torrentz.py b/searx/engines/torrentz.py
index dda56fc22..fd4164a66 100644
--- a/searx/engines/torrentz.py
+++ b/searx/engines/torrentz.py
@@ -1,7 +1,7 @@
"""
- Torrentz.eu (BitTorrent meta-search engine)
+ Torrentz2.eu (BitTorrent meta-search engine)
- @website https://torrentz.eu/
+ @website https://torrentz2.eu/
@provide-api no
@using-api no
@@ -14,24 +14,24 @@
import re
from lxml import html
from datetime import datetime
-from searx.engines.nyaa import int_or_zero, get_filesize_mul
from searx.engines.xpath import extract_text
from searx.url_utils import urlencode
+from searx.utils import get_torrent_size
# engine dependent config
categories = ['files', 'videos', 'music']
paging = True
# search-url
-# https://torrentz.eu/search?f=EXAMPLE&p=6
-base_url = 'https://torrentz.eu/'
+# https://torrentz2.eu/search?f=EXAMPLE&p=6
+base_url = 'https://torrentz2.eu/'
search_url = base_url + 'search?{query}'
# do search-request
def request(query, params):
page = params['pageno'] - 1
- query = urlencode({'q': query, 'p': page})
+ query = urlencode({'f': query, 'p': page})
params['url'] = search_url.format(query=query)
return params
@@ -54,22 +54,29 @@ def response(resp):
# extract url and remove a slash in the beginning
link = links[0].attrib.get('href').lstrip('/')
- seed = result.xpath('./dd/span[@class="u"]/text()')[0].replace(',', '')
- leech = result.xpath('./dd/span[@class="d"]/text()')[0].replace(',', '')
+ seed = 0
+ leech = 0
+ try:
+ seed = int(result.xpath('./dd/span[4]/text()')[0].replace(',', ''))
+ leech = int(result.xpath('./dd/span[5]/text()')[0].replace(',', ''))
+ except:
+ pass
params = {
'url': base_url + link,
'title': title,
- 'seed': int_or_zero(seed),
- 'leech': int_or_zero(leech),
+ 'seed': seed,
+ 'leech': leech,
'template': 'torrent.html'
}
# let's try to calculate the torrent size
try:
- size_str = result.xpath('./dd/span[@class="s"]/text()')[0]
- size, suffix = size_str.split()
- params['filesize'] = int(size) * get_filesize_mul(suffix)
+ filesize_info = result.xpath('./dd/span[3]/text()')[0]
+ filesize, filesize_multiplier = filesize_info.split()
+ filesize = get_torrent_size(filesize, filesize_multiplier)
+
+ params['filesize'] = filesize
except:
pass
@@ -80,9 +87,8 @@ def response(resp):
# extract and convert creation date
try:
- date_str = result.xpath('./dd/span[@class="a"]/span')[0].attrib.get('title')
- # Fri, 25 Mar 2016 16:29:01
- date = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S')
+ date_ts = result.xpath('./dd/span[2]')[0].attrib.get('title')
+ date = datetime.fromtimestamp(float(date_ts))
params['publishedDate'] = date
except:
pass
diff --git a/searx/settings.yml b/searx/settings.yml
index 7822c6483..8f57e2e84 100644
--- a/searx/settings.yml
+++ b/searx/settings.yml
@@ -189,11 +189,10 @@ engines:
shortcut : et
disabled : True
-# api-key required: http://www.faroo.com/hp/api/api.html#key
-# - name : faroo
-# engine : faroo
-# shortcut : fa
-# api_key : 'apikey' # required!
+ - name : faroo
+ engine : faroo
+ shortcut : fa
+ disabled : True
- name : 500px
engine : www500px
@@ -552,6 +551,12 @@ engines:
timeout : 6.0
disabled : True
+ - name : torrentz
+ engine : torrentz
+ shortcut : tor
+ url: https://torrentz2.eu/
+ timeout : 3.0
+
- name : twitter
engine : twitter
shortcut : tw