mirror of
https://github.com/searx/searx
synced 2024-12-05 08:30:55 +01:00
Merge branch 'master' of https://github.com/asciimoo/searx
This commit is contained in:
commit
51278ee0be
@ -16,7 +16,10 @@ update_dev_packages() {
|
||||
|
||||
pep8_check() {
|
||||
echo '[!] Running pep8 check'
|
||||
pep8 --max-line-length=120 "$SEARX_DIR" "$BASE_DIR/tests"
|
||||
# ignored rules:
|
||||
# E402 module level import not at top of file
|
||||
# W503 line break before binary operator
|
||||
pep8 --max-line-length=120 --ignore "E402,W503" "$SEARX_DIR" "$BASE_DIR/tests"
|
||||
}
|
||||
|
||||
unit_tests() {
|
||||
|
@ -1,7 +1,7 @@
|
||||
babel==2.2.0
|
||||
flake8==2.5.1
|
||||
mock==1.0.1
|
||||
nose2[coverage-plugin]
|
||||
pep8==1.7.0
|
||||
plone.testing==4.0.15
|
||||
robotframework-selenium2library==1.7.4
|
||||
robotsuite==1.7.0
|
||||
|
@ -114,8 +114,7 @@ def dbpedia(query):
|
||||
# dbpedia autocompleter, no HTTPS
|
||||
autocomplete_url = 'http://lookup.dbpedia.org/api/search.asmx/KeywordSearch?'
|
||||
|
||||
response = get(autocomplete_url
|
||||
+ urlencode(dict(QueryString=query)))
|
||||
response = get(autocomplete_url + urlencode(dict(QueryString=query)))
|
||||
|
||||
results = []
|
||||
|
||||
@ -141,8 +140,7 @@ def google(query):
|
||||
# google autocompleter
|
||||
autocomplete_url = 'https://suggestqueries.google.com/complete/search?client=toolbar&'
|
||||
|
||||
response = get(autocomplete_url
|
||||
+ urlencode(dict(q=query)))
|
||||
response = get(autocomplete_url + urlencode(dict(q=query)))
|
||||
|
||||
results = []
|
||||
|
||||
|
@ -209,29 +209,29 @@ def response(resp):
|
||||
parsed_url = urlparse(url, google_hostname)
|
||||
|
||||
# map result
|
||||
if ((parsed_url.netloc == google_hostname and parsed_url.path.startswith(maps_path))
|
||||
or (parsed_url.netloc.startswith(map_hostname_start))):
|
||||
x = result.xpath(map_near)
|
||||
if len(x) > 0:
|
||||
# map : near the location
|
||||
results = results + parse_map_near(parsed_url, x, google_hostname)
|
||||
else:
|
||||
# map : detail about a location
|
||||
results = results + parse_map_detail(parsed_url, result, google_hostname)
|
||||
if parsed_url.netloc == google_hostname:
|
||||
# TODO fix inside links
|
||||
continue
|
||||
# if parsed_url.path.startswith(maps_path) or parsed_url.netloc.startswith(map_hostname_start):
|
||||
# print "yooooo"*30
|
||||
# x = result.xpath(map_near)
|
||||
# if len(x) > 0:
|
||||
# # map : near the location
|
||||
# results = results + parse_map_near(parsed_url, x, google_hostname)
|
||||
# else:
|
||||
# # map : detail about a location
|
||||
# results = results + parse_map_detail(parsed_url, result, google_hostname)
|
||||
# # google news
|
||||
# elif parsed_url.path == search_path:
|
||||
# # skipping news results
|
||||
# pass
|
||||
|
||||
# google news
|
||||
elif (parsed_url.netloc == google_hostname
|
||||
and parsed_url.path == search_path):
|
||||
# skipping news results
|
||||
pass
|
||||
|
||||
# images result
|
||||
elif (parsed_url.netloc == google_hostname
|
||||
and parsed_url.path == images_path):
|
||||
# only thumbnail image provided,
|
||||
# so skipping image results
|
||||
# results = results + parse_images(result, google_hostname)
|
||||
pass
|
||||
# # images result
|
||||
# elif parsed_url.path == images_path:
|
||||
# # only thumbnail image provided,
|
||||
# # so skipping image results
|
||||
# # results = results + parse_images(result, google_hostname)
|
||||
# pass
|
||||
|
||||
else:
|
||||
# normal result
|
||||
|
Loading…
Reference in New Issue
Block a user