mirror of https://github.com/searx/searx
[mod] bing_news: use eval_xpath_getindex
remove unused function searx.utils.list_get
This commit is contained in:
parent
1d0c368746
commit
de887c6347
|
@ -15,7 +15,8 @@ from datetime import datetime
|
||||||
from dateutil import parser
|
from dateutil import parser
|
||||||
from urllib.parse import urlencode, urlparse, parse_qsl
|
from urllib.parse import urlencode, urlparse, parse_qsl
|
||||||
from lxml import etree
|
from lxml import etree
|
||||||
from searx.utils import list_get, match_language
|
from lxml.etree import XPath
|
||||||
|
from searx.utils import match_language, eval_xpath_getindex
|
||||||
from searx.engines.bing import language_aliases
|
from searx.engines.bing import language_aliases
|
||||||
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
|
from searx.engines.bing import _fetch_supported_languages, supported_languages_url # NOQA # pylint: disable=unused-import
|
||||||
|
|
||||||
|
@ -94,12 +95,12 @@ def response(resp):
|
||||||
# parse results
|
# parse results
|
||||||
for item in rss.xpath('./channel/item'):
|
for item in rss.xpath('./channel/item'):
|
||||||
# url / title / content
|
# url / title / content
|
||||||
url = url_cleanup(item.xpath('./link/text()')[0])
|
url = url_cleanup(eval_xpath_getindex(item, './link/text()', 0, default=None))
|
||||||
title = list_get(item.xpath('./title/text()'), 0, url)
|
title = eval_xpath_getindex(item, './title/text()', 0, default=url)
|
||||||
content = list_get(item.xpath('./description/text()'), 0, '')
|
content = eval_xpath_getindex(item, './description/text()', 0, default='')
|
||||||
|
|
||||||
# publishedDate
|
# publishedDate
|
||||||
publishedDate = list_get(item.xpath('./pubDate/text()'), 0)
|
publishedDate = eval_xpath_getindex(item, './pubDate/text()', 0, default=None)
|
||||||
try:
|
try:
|
||||||
publishedDate = parser.parse(publishedDate, dayfirst=False)
|
publishedDate = parser.parse(publishedDate, dayfirst=False)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
|
@ -108,7 +109,7 @@ def response(resp):
|
||||||
publishedDate = datetime.now()
|
publishedDate = datetime.now()
|
||||||
|
|
||||||
# thumbnail
|
# thumbnail
|
||||||
thumbnail = list_get(item.xpath('./News:Image/text()', namespaces=ns), 0)
|
thumbnail = eval_xpath_getindex(item, XPath('./News:Image/text()', namespaces=ns), 0, default=None)
|
||||||
if thumbnail is not None:
|
if thumbnail is not None:
|
||||||
thumbnail = image_url_cleanup(thumbnail)
|
thumbnail = image_url_cleanup(thumbnail)
|
||||||
|
|
||||||
|
|
|
@ -269,24 +269,6 @@ def dict_subset(d, properties):
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def list_get(a_list, index, default=None):
|
|
||||||
"""Get element in list or default value
|
|
||||||
Examples:
|
|
||||||
>>> list_get(['A', 'B', 'C'], 0)
|
|
||||||
'A'
|
|
||||||
>>> list_get(['A', 'B', 'C'], 3)
|
|
||||||
None
|
|
||||||
>>> list_get(['A', 'B', 'C'], 3, 'default')
|
|
||||||
'default'
|
|
||||||
>>> list_get(['A', 'B', 'C'], -1)
|
|
||||||
'C'
|
|
||||||
"""
|
|
||||||
if len(a_list) > index:
|
|
||||||
return a_list[index]
|
|
||||||
else:
|
|
||||||
return default
|
|
||||||
|
|
||||||
|
|
||||||
def get_torrent_size(filesize, filesize_multiplier):
|
def get_torrent_size(filesize, filesize_multiplier):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue