diff options
| author | Alexandre Flament <alex@al-f.net> | 2020-10-04 09:06:20 +0200 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2020-10-04 09:06:20 +0200 |
| commit | b728cb610b92161609b1c40babff25749720fc25 (patch) | |
| tree | 733b665dd897cc8e4cd7e37f7d64a052c260d6f0 /searx/utils.py | |
| parent | e2cd9b65bb2b2e1f1085cf48442632da0d52077e (diff) | |
| parent | 8f914a28facec314a2b98b11d3cc1207eb8ee8ab (diff) | |
Merge pull request #2241 from dalf/move-extract-text-and-url
Move the extract_text and extract_url functions to searx.utils
Diffstat (limited to 'searx/utils.py')
| -rw-r--r-- | searx/utils.py | 232 |
1 files changed, 219 insertions, 13 deletions
diff --git a/searx/utils.py b/searx/utils.py index f74f2ac88..0be3c5b00 100644 --- a/searx/utils.py +++ b/searx/utils.py @@ -10,9 +10,13 @@ from os.path import splitext, join from io import open from random import choice from html.parser import HTMLParser -from lxml.etree import XPath +from urllib.parse import urljoin, urlparse, unquote + +from lxml import html +from lxml.etree import XPath, _ElementStringResult, _ElementUnicodeResult from babel.core import get_global + from searx import settings from searx.version import VERSION_STRING from searx.languages import language_codes @@ -35,12 +39,17 @@ lang_to_lc_cache = dict() def searx_useragent(): + """Return the searx User Agent""" return 'searx/{searx_version} {suffix}'.format( searx_version=VERSION_STRING, suffix=settings['outgoing'].get('useragent_suffix', '')) def gen_useragent(os=None): + """Return a random browser User Agent + + See searx/data/useragents.json + """ return str(useragents['ua'].format(os=os or choice(useragents['os']), version=choice(useragents['versions']))) @@ -95,18 +104,156 @@ class HTMLTextExtractor(HTMLParser): return ''.join(self.result).strip() -def html_to_text(html): - html = html.replace('\n', ' ') - html = ' '.join(html.split()) +def html_to_text(html_str): + """Extract text from a HTML string + + Args: + * html_str (str): string HTML + + Returns: + * str: extracted text + + Examples: + >>> html_to_text('Example <span id="42">#2</span>') + 'Example #2' + + >>> html_to_text('<style>.span { color: red; }</style><span>Example</span>') + 'Example' + """ + html_str = html_str.replace('\n', ' ') + html_str = ' '.join(html_str.split()) s = HTMLTextExtractor() try: - s.feed(html) + s.feed(html_str) except HTMLTextExtractorException: - logger.debug("HTMLTextExtractor: invalid HTML\n%s", html) + logger.debug("HTMLTextExtractor: invalid HTML\n%s", html_str) return s.get_text() +def extract_text(xpath_results): + """Extract text from a lxml result + + * if xpath_results is list, extract the text from each result and concat the list + * if xpath_results is a xml element, extract all the text node from it + ( text_content() method from lxml ) + * if xpath_results is a string element, then it's already done + """ + if type(xpath_results) == list: + # it's list of result : concat everything using recursive call + result = '' + for e in xpath_results: + result = result + extract_text(e) + return result.strip() + elif type(xpath_results) in [_ElementStringResult, _ElementUnicodeResult]: + # it's a string + return ''.join(xpath_results) + else: + # it's a element + text = html.tostring( + xpath_results, encoding='unicode', method='text', with_tail=False + ) + text = text.strip().replace('\n', ' ') + return ' '.join(text.split()) + + +def normalize_url(url, base_url): + """Normalize URL: add protocol, join URL with base_url, add trailing slash if there is no path + + Args: + * url (str): Relative URL + * base_url (str): Base URL, it must be an absolute URL. + + Example: + >>> normalize_url('https://example.com', 'http://example.com/') + 'https://example.com/' + >>> normalize_url('//example.com', 'http://example.com/') + 'http://example.com/' + >>> normalize_url('//example.com', 'https://example.com/') + 'https://example.com/' + >>> normalize_url('/path?a=1', 'https://example.com') + 'https://example.com/path?a=1' + >>> normalize_url('', 'https://example.com') + 'https://example.com/' + >>> normalize_url('/test', '/path') + raise Exception + + Raises: + * lxml.etree.ParserError + + Returns: + * str: normalized URL + """ + if url.startswith('//'): + # add http or https to this kind of url //example.com/ + parsed_search_url = urlparse(base_url) + url = '{0}:{1}'.format(parsed_search_url.scheme or 'http', url) + elif url.startswith('/'): + # fix relative url to the search engine + url = urljoin(base_url, url) + + # fix relative urls that fall through the crack + if '://' not in url: + url = urljoin(base_url, url) + + parsed_url = urlparse(url) + + # add a / at this end of the url if there is no path + if not parsed_url.netloc: + raise Exception('Cannot parse url') + if not parsed_url.path: + url += '/' + + return url + + +def extract_url(xpath_results, base_url): + """Extract and normalize URL from lxml Element + + Args: + * xpath_results (Union[List[html.HtmlElement], html.HtmlElement]): lxml Element(s) + * base_url (str): Base URL + + Example: + >>> def f(s, search_url): + >>> return searx.utils.extract_url(html.fromstring(s), search_url) + >>> f('<span id="42">https://example.com</span>', 'http://example.com/') + 'https://example.com/' + >>> f('https://example.com', 'http://example.com/') + 'https://example.com/' + >>> f('//example.com', 'http://example.com/') + 'http://example.com/' + >>> f('//example.com', 'https://example.com/') + 'https://example.com/' + >>> f('/path?a=1', 'https://example.com') + 'https://example.com/path?a=1' + >>> f('', 'https://example.com') + raise lxml.etree.ParserError + >>> searx.utils.extract_url([], 'https://example.com') + raise Exception + + Raises: + * Exception + * lxml.etree.ParserError + + Returns: + * str: normalized URL + """ + if xpath_results == []: + raise Exception('Empty url resultset') + + url = extract_text(xpath_results) + return normalize_url(url, base_url) + + def dict_subset(d, properties): + """Extract a subset of a dict + + Examples: + >>> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'C']) + {'A': 'a', 'C': 'c'} + >>> >> dict_subset({'A': 'a', 'B': 'b', 'C': 'c'}, ['A', 'D']) + {'A': 'a'} + """ result = {} for k in properties: if k in d: @@ -114,8 +261,19 @@ def dict_subset(d, properties): return result -# get element in list or default value def list_get(a_list, index, default=None): + """Get element in list or default value + + Examples: + >>> list_get(['A', 'B', 'C'], 0) + 'A' + >>> list_get(['A', 'B', 'C'], 3) + None + >>> list_get(['A', 'B', 'C'], 3, 'default') + 'default' + >>> list_get(['A', 'B', 'C'], -1) + 'C' + """ if len(a_list) > index: return a_list[index] else: @@ -123,6 +281,21 @@ def list_get(a_list, index, default=None): def get_torrent_size(filesize, filesize_multiplier): + """ + + Args: + * filesize (str): size + * filesize_multiplier (str): TB, GB, .... TiB, GiB... + + Returns: + * int: number of bytes + + Example: + >>> get_torrent_size('5', 'GB') + 5368709120 + >>> get_torrent_size('3.14', 'MiB') + 3140000 + """ try: filesize = float(filesize) @@ -149,14 +322,18 @@ def get_torrent_size(filesize, filesize_multiplier): def convert_str_to_int(number_str): + """Convert number_str to int or 0 if number_str is not a number.""" if number_str.isdigit(): return int(number_str) else: return 0 -# convert a variable to integer or return 0 if it's not a number def int_or_zero(num): + """Convert num to int or 0. num can be either a str or a list. + If num is a list, the first element is converted to int (or return 0 if the list is empty). + If num is a str, see convert_str_to_int + """ if isinstance(num, list): if len(num) < 1: return 0 @@ -165,6 +342,22 @@ def int_or_zero(num): def is_valid_lang(lang): + """Return language code and name if lang describe a language. + + Examples: + >>> is_valid_lang('zz') + False + >>> is_valid_lang('uk') + (True, 'uk', 'ukrainian') + >>> is_valid_lang(b'uk') + (True, 'uk', 'ukrainian') + >>> is_valid_lang('en') + (True, 'en', 'english') + >>> searx.utils.is_valid_lang('Español') + (True, 'es', 'spanish') + >>> searx.utils.is_valid_lang('Spanish') + (True, 'es', 'spanish') + """ if isinstance(lang, bytes): lang = lang.decode() is_abbr = (len(lang) == 2) @@ -192,8 +385,8 @@ def _get_lang_to_lc_dict(lang_list): return value -# auxiliary function to match lang_code in lang_list def _match_language(lang_code, lang_list=[], custom_aliases={}): + """auxiliary function to match lang_code in lang_list""" # replace language code with a custom alias if necessary if lang_code in custom_aliases: lang_code = custom_aliases[lang_code] @@ -215,8 +408,8 @@ def _match_language(lang_code, lang_list=[], custom_aliases={}): return _get_lang_to_lc_dict(lang_list).get(lang_code, None) -# get the language code from lang_list that best matches locale_code def match_language(locale_code, lang_list=[], custom_aliases={}, fallback='en-US'): + """get the language code from lang_list that best matches locale_code""" # try to get language from given locale_code language = _match_language(locale_code, lang_list, custom_aliases) if language: @@ -258,6 +451,7 @@ def load_module(filename, module_dir): def to_string(obj): + """Convert obj to its string representation.""" if isinstance(obj, str): return obj if isinstance(obj, Number): @@ -269,13 +463,19 @@ def to_string(obj): def ecma_unescape(s): - """ - python implementation of the unescape javascript function + """Python implementation of the unescape javascript function https://www.ecma-international.org/ecma-262/6.0/#sec-unescape-string https://developer.mozilla.org/fr/docs/Web/JavaScript/Reference/Objets_globaux/unescape + + Examples: + >>> ecma_unescape('%u5409') + '吉' + >>> ecma_unescape('%20') + ' ' + >>> ecma_unescape('%F3') + 'ó' """ - # s = unicode(s) # "%u5409" becomes "吉" s = ecma_unescape4_re.sub(lambda e: chr(int(e.group(1), 16)), s) # "%20" becomes " ", "%F3" becomes "ó" @@ -299,6 +499,11 @@ def get_engine_from_settings(name): def get_xpath(xpath_str): + """Return cached compiled XPath + + There is no thread lock. + Worst case scenario, xpath_str is compiled more than one time. + """ result = xpath_cache.get(xpath_str, None) if result is None: result = XPath(xpath_str) @@ -307,5 +512,6 @@ def get_xpath(xpath_str): def eval_xpath(element, xpath_str): + """Equivalent of element.xpath(xpath_str) but compile xpath_str once for all.""" xpath = get_xpath(xpath_str) return xpath(element) |