summaryrefslogtreecommitdiff
path: root/searx/engines/sogou_wechat.py
blob: c9d240ec77a88521e28e76f0e2db026d88d8bf35 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
# SPDX-License-Identifier: AGPL-3.0-or-later
"""Sogou-WeChat search engine for retrieving WeChat Article from Sogou"""

from urllib.parse import urlencode
from datetime import datetime
import re
from lxml import html

from searx.utils import extract_text

# Metadata
about = {
    "website": "https://weixin.sogou.com/",
    "use_official_api": False,
    "require_api_key": False,
    "results": "HTML",
    "language": "zh",
}

# Engine Configuration
categories = ["news"]
paging = True

# Base URL
base_url = "https://weixin.sogou.com"


def request(query, params):
    query_params = {
        "query": query,
        "page": params["pageno"],
        "type": 2,
    }

    params["url"] = f"{base_url}/weixin?{urlencode(query_params)}"
    return params


def response(resp):
    dom = html.fromstring(resp.text)
    results = []

    for item in dom.xpath('//li[contains(@id, "sogou_vr_")]'):
        title = extract_text(item.xpath('.//h3/a'))
        url = extract_text(item.xpath('.//h3/a/@href'))

        if url.startswith("/link?url="):
            url = f"{base_url}{url}"

        content = extract_text(item.xpath('.//p[@class="txt-info"]'))
        if not content:
            content = extract_text(item.xpath('.//p[contains(@class, "txt-info")]'))

        thumbnail = extract_text(item.xpath('.//div[@class="img-box"]/a/img/@src'))
        if thumbnail and thumbnail.startswith("//"):
            thumbnail = f"https:{thumbnail}"

        published_date = None
        timestamp = extract_text(item.xpath('.//script[contains(text(), "timeConvert")]'))
        if timestamp:
            match = re.search(r"timeConvert\('(\d+)'\)", timestamp)
            if match:
                published_date = datetime.fromtimestamp(int(match.group(1)))

        if title and url:
            results.append(
                {
                    "title": title,
                    "url": url,
                    "content": content,
                    'thumbnail': thumbnail,
                    "publishedDate": published_date,
                }
            )

    return results