summaryrefslogtreecommitdiff
path: root/searx/engines
diff options
context:
space:
mode:
Diffstat (limited to 'searx/engines')
-rw-r--r--searx/engines/google_news.py9
-rw-r--r--searx/engines/yahoo_news.py20
2 files changed, 28 insertions, 1 deletions
diff --git a/searx/engines/google_news.py b/searx/engines/google_news.py
index 935718609..afda3e756 100644
--- a/searx/engines/google_news.py
+++ b/searx/engines/google_news.py
@@ -2,6 +2,7 @@
from urllib import urlencode
from json import loads
+from datetime import datetime, timedelta
categories = ['news']
@@ -31,7 +32,15 @@ def response(resp):
return []
for result in search_res['responseData']['results']:
+# S.149 (159), library.pdf
+# datetime.strptime("Mon, 10 Mar 2014 16:26:15 -0700", "%a, %d %b %Y %H:%M:%S %z")
+# publishedDate = parse(result['publishedDate'])
+ publishedDate = datetime.strptime(str.join(' ',result['publishedDate'].split(None)[0:5]), "%a, %d %b %Y %H:%M:%S")
+ #utc_offset = timedelta(result['publishedDate'].split(None)[5]) # local = utc + offset
+ #publishedDate = publishedDate + utc_offset
+
results.append({'url': result['unescapedUrl'],
'title': result['titleNoFormatting'],
+ 'publishedDate': publishedDate,
'content': result['content']})
return results
diff --git a/searx/engines/yahoo_news.py b/searx/engines/yahoo_news.py
index 35e323917..13a8a6024 100644
--- a/searx/engines/yahoo_news.py
+++ b/searx/engines/yahoo_news.py
@@ -4,6 +4,8 @@ from urllib import urlencode
from lxml import html
from searx.engines.xpath import extract_text, extract_url
from searx.engines.yahoo import parse_url
+from datetime import datetime, timedelta
+import re
categories = ['news']
search_url = 'http://news.search.yahoo.com/search?{query}&b={offset}'
@@ -11,6 +13,7 @@ results_xpath = '//div[@class="res"]'
url_xpath = './/h3/a/@href'
title_xpath = './/h3/a'
content_xpath = './/div[@class="abstr"]'
+publishedDate_xpath = './/span[@class="timestamp"]'
suggestion_xpath = '//div[@id="satat"]//a'
paging = True
@@ -37,7 +40,22 @@ def response(resp):
url = parse_url(extract_url(result.xpath(url_xpath), search_url))
title = extract_text(result.xpath(title_xpath)[0])
content = extract_text(result.xpath(content_xpath)[0])
- results.append({'url': url, 'title': title, 'content': content})
+ publishedDate = extract_text(result.xpath(publishedDate_xpath)[0])
+
+ if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
+ publishedDate = datetime.now() - timedelta(minutes=int(re.match(r'\d+', publishedDate).group()))
+ else:
+ if re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
+ timeNumbers = re.findall(r'\d+', publishedDate)
+ publishedDate = datetime.now() - timedelta(hours=int(timeNumbers[0])) - timedelta(minutes=int(timeNumbers[1]))
+ else:
+ # TODO year in string possible?
+ publishedDate = datetime.strptime(publishedDate,"%b %d %H:%M%p")
+
+ if publishedDate.year == 1900:
+ publishedDate = publishedDate.replace(year=datetime.now().year)
+
+ results.append({'url': url, 'title': title, 'content': content,'publishedDate':publishedDate})
if not suggestion_xpath:
return results