summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
authorAdam Tauber <asciimoo@gmail.com>2015-02-01 14:07:34 +0100
committerAdam Tauber <asciimoo@gmail.com>2015-02-01 14:07:34 +0100
commit03137eebd9fdfaa57452cb364c1bc9f31b243f67 (patch)
treeb95f6f124cb9f2574e0835ec3f182b0d222719e7 /searx/engines
parent4a20fc202e886eaf7778481c403106e6243f49b7 (diff)
parenta605d0ae698e8a5555935780d83df50b06727f24 (diff)
Merge pull request #208 from pointhi/new_engines
add 1x.com engine, improve yacy-engine
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/www1x.py82
-rw-r--r--searx/engines/yacy.py25
2 files changed, 94 insertions, 13 deletions
diff --git a/searx/engines/www1x.py b/searx/engines/www1x.py
new file mode 100644
index 000000000..a68c105ce
--- /dev/null
+++ b/searx/engines/www1x.py
@@ -0,0 +1,82 @@
+## 1x (Images)
+#
+# @website http://1x.com/
+# @provide-api no
+#
+# @using-api no
+# @results HTML
+# @stable no (HTML can change)
+# @parse url, title, thumbnail, img_src, content
+
+
+from urllib import urlencode
+from urlparse import urljoin
+from lxml import html
+import string
+import re
+
+# engine dependent config
+categories = ['images']
+paging = False
+
+# search-url
+base_url = 'http://1x.com'
+search_url = base_url+'/backend/search.php?{query}'
+
+
+# do search-request
+def request(query, params):
+ params['url'] = search_url.format(query=urlencode({'q': query}))
+
+ return params
+
+
+# get response from search-request
+def response(resp):
+ results = []
+
+ # get links from result-text
+ regex = re.compile('(</a>|<a)')
+ results_parts = re.split(regex, resp.text)
+
+ cur_element = ''
+
+ # iterate over link parts
+ for result_part in results_parts:
+ # processed start and end of link
+ if result_part == '<a':
+ cur_element = result_part
+ continue
+ elif result_part != '</a>':
+ cur_element += result_part
+ continue
+
+ cur_element += result_part
+
+ # fix xml-error
+ cur_element = string.replace(cur_element, '"></a>', '"/></a>')
+
+ dom = html.fromstring(cur_element)
+ link = dom.xpath('//a')[0]
+
+ url = urljoin(base_url, link.attrib.get('href'))
+ title = link.attrib.get('title', '')
+
+ thumbnail_src = urljoin(base_url, link.xpath('.//img')[0].attrib['src'])
+ # TODO: get image with higher resolution
+ img_src = thumbnail_src
+
+ # check if url is showing to a photo
+ if '/photo/' not in url:
+ continue
+
+ # append result
+ results.append({'url': url,
+ 'title': title,
+ 'img_src': img_src,
+ 'content': '',
+ 'thumbnail_src': thumbnail_src,
+ 'template': 'images.html'})
+
+ # return results
+ return results
diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py
index 4c4fac7df..17e2a7aab 100644
--- a/searx/engines/yacy.py
+++ b/searx/engines/yacy.py
@@ -68,9 +68,18 @@ def response(resp):
search_results = raw_search_results.get('channels', {})[0].get('items', [])
- if resp.search_params['category'] == 'general':
+ for result in search_results:
+ # parse image results
+ if result.get('image'):
+ # append result
+ results.append({'url': result['url'],
+ 'title': result['title'],
+ 'content': '',
+ 'img_src': result['image'],
+ 'template': 'images.html'})
+
# parse general results
- for result in search_results:
+ else:
publishedDate = parser.parse(result['pubDate'])
# append result
@@ -79,17 +88,7 @@ def response(resp):
'content': result['description'],
'publishedDate': publishedDate})
- elif resp.search_params['category'] == 'images':
- # parse image results
- for result in search_results:
- # append result
- results.append({'url': result['url'],
- 'title': result['title'],
- 'content': '',
- 'img_src': result['image'],
- 'template': 'images.html'})
-
- #TODO parse video, audio and file results
+ #TODO parse video, audio and file results
# return results
return results