diff --git a/searx/engines/sjp.py b/searx/engines/sjp.py new file mode 100644 index 00000000..e5b4ad2f --- /dev/null +++ b/searx/engines/sjp.py @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: AGPL-3.0-or-later +"""Słownik Języka Polskiego (general) + +""" + +from lxml.html import fromstring +from searx import logger +from searx.utils import extract_text +from searx.raise_for_httperror import raise_for_httperror + +logger = logger.getChild('sjp engine') + +# about +about = { + "website": 'https://sjp.pwn.pl', + "wikidata_id": 'Q55117369', + "official_api_documentation": None, + "use_official_api": False, + "require_api_key": False, + "results": 'HTML', +} + +categories = ['general'] +paging = False + +URL = 'https://sjp.pwn.pl' +SEARCH_URL = URL + '/szukaj/{query}.html' + + +def request(query, params): + params['url'] = SEARCH_URL.format(query=query) + logger.debug(f"query_url --> {params['url']}") + return params + + +def response(resp): + results = [] + + raise_for_httperror(resp) + dom = fromstring(resp.text) + word = extract_text(dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div[2]/div/div')) + + definitions = [] + for src in dom.xpath('//*[@id="content"]/div/div[1]/div/div[1]/div[1]/div[2]/div/div/div/div/div/div'): + src_text = extract_text(src.xpath('./h1/span[@class="entry-head-title"]/text()')).strip() + + src_defs = [] + for def_item in src.xpath('./div/div[contains(@class, "ribbon-element")]'): + if def_item.xpath('./div[@class="znacz"]'): + sub_defs = [] + for def_sub_item in def_item.xpath('./div[@class="znacz"]'): + def_sub_text = extract_text(def_sub_item).lstrip('0123456789. ') + sub_defs.append(def_sub_text) + src_defs.append((word, sub_defs)) + else: + def_text = extract_text(def_item).strip() + src_defs.append((def_text, '')) + + definitions.append((src_text, src_defs)) + + if not definitions: + return results + + infobox = '' + for src in definitions: + infobox += f"