summaryrefslogtreecommitdiff
path: root/searx/engines/bing_news.py
diff options
context:
space:
mode:
authorAdam Tauber <adam.tauber@balabit.com>2015-02-02 09:37:12 +0100
committerAdam Tauber <adam.tauber@balabit.com>2015-02-02 09:37:12 +0100
commit0e6f8393ab8b29b2e85d1fafdc7442455767f753 (patch)
tree60e9acb27577968a41136c04f248c24871e83860 /searx/engines/bing_news.py
parent03137eebd9fdfaa57452cb364c1bc9f31b243f67 (diff)
parent7f865356f9a6c1b40d0c668c59b3d081de618bac (diff)
Merge branch 'Cqoicebordel-unit-tests'
Diffstat (limited to 'searx/engines/bing_news.py')
-rw-r--r--searx/engines/bing_news.py31
1 files changed, 12 insertions, 19 deletions
diff --git a/searx/engines/bing_news.py b/searx/engines/bing_news.py
index 789a23b89..e6adb2644 100644
--- a/searx/engines/bing_news.py
+++ b/searx/engines/bing_news.py
@@ -15,6 +15,7 @@ from lxml import html
from datetime import datetime, timedelta
from dateutil import parser
import re
+from searx.engines.xpath import extract_text
# engine dependent config
categories = ['news']
@@ -42,6 +43,7 @@ def request(query, params):
params['cookies']['_FP'] = "ui=en-US"
params['url'] = base_url + search_path
+
return params
@@ -55,44 +57,35 @@ def response(resp):
for result in dom.xpath('//div[@class="sn_r"]'):
link = result.xpath('.//div[@class="newstitle"]/a')[0]
url = link.attrib.get('href')
- title = ' '.join(link.xpath('.//text()'))
- contentXPath = result.xpath('.//div[@class="sn_txt"]/div'
- '//span[@class="sn_snip"]//text()')
- if contentXPath is not None:
- content = escape(' '.join(contentXPath))
+ title = extract_text(link)
+ contentXPath = result.xpath('.//div[@class="sn_txt"]/div//span[@class="sn_snip"]')
+ content = escape(extract_text(contentXPath))
# parse publishedDate
publishedDateXPath = result.xpath('.//div[@class="sn_txt"]/div'
'//span[contains(@class,"sn_ST")]'
- '//span[contains(@class,"sn_tm")]'
- '//text()')
- if publishedDateXPath is not None:
- publishedDate = escape(' '.join(publishedDateXPath))
+ '//span[contains(@class,"sn_tm")]')
+
+ publishedDate = escape(extract_text(publishedDateXPath))
if re.match("^[0-9]+ minute(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
- publishedDate = datetime.now()\
- - timedelta(minutes=int(timeNumbers[0]))
+ publishedDate = datetime.now() - timedelta(minutes=int(timeNumbers[0]))
elif re.match("^[0-9]+ hour(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
- publishedDate = datetime.now()\
- - timedelta(hours=int(timeNumbers[0]))
- elif re.match("^[0-9]+ hour(s|),"
- " [0-9]+ minute(s|) ago$", publishedDate):
+ publishedDate = datetime.now() - timedelta(hours=int(timeNumbers[0]))
+ elif re.match("^[0-9]+ hour(s|), [0-9]+ minute(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
publishedDate = datetime.now()\
- timedelta(hours=int(timeNumbers[0]))\
- timedelta(minutes=int(timeNumbers[1]))
elif re.match("^[0-9]+ day(s|) ago$", publishedDate):
timeNumbers = re.findall(r'\d+', publishedDate)
- publishedDate = datetime.now()\
- - timedelta(days=int(timeNumbers[0]))
+ publishedDate = datetime.now() - timedelta(days=int(timeNumbers[0]))
else:
try:
- # FIXME use params['language'] to parse either mm/dd or dd/mm
publishedDate = parser.parse(publishedDate, dayfirst=False)
except TypeError:
- # FIXME
publishedDate = datetime.now()
# append result