mirror of https://github.com/searx/searx
[fix] google & youtube - set EU consent cookie
This change the previous bypass method for Google consent using ``ucbcb=1`` (6face215b8) to accept the consent using ``CONSENT=YES+``. The youtube_noapi and google have a similar API, at least for the consent[1]. Get CONSENT cookie from google reguest:: curl -i "https://www.google.com/search?q=time&tbm=isch" \ -A "Mozilla/5.0 (X11; Linux i686; rv:102.0) Gecko/20100101 Firefox/102.0" \ | grep -i consent ... location: https://consent.google.com/m?continue=https://www.google.com/search?q%3Dtime%26tbm%3Disch&gl=DE&m=0&pc=irp&uxe=eomtm&hl=en-US&src=1 set-cookie: CONSENT=PENDING+936; expires=Wed, 24-Jul-2024 11:26:20 GMT; path=/; domain=.google.com; Secure ... PENDING & YES [2]: Google change the way for consent about YouTube cookies agreement in EU countries. Instead of showing a popup in the website, YouTube redirects the user to a new webpage at consent.youtube.com domain ... Fix for this is to put a cookie CONSENT with YES+ value for every YouTube request [1] https://github.com/iv-org/invidious/pull/2207 [2] https://github.com/TeamNewPipe/NewPipeExtractor/issues/592 Closes: https://github.com/searxng/searxng/issues/1432
This commit is contained in:
parent
86bd82d691
commit
7123aa12ec
|
@ -235,6 +235,7 @@ def request(query, params):
|
|||
params['url'] = query_url
|
||||
|
||||
logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
|
||||
params['cookies']['CONSENT'] = "YES+"
|
||||
params['headers'].update(lang_info['headers'])
|
||||
if use_mobile_ui:
|
||||
params['headers']['Accept'] = '*/*'
|
||||
|
|
|
@ -121,6 +121,7 @@ def request(query, params):
|
|||
params['url'] = query_url
|
||||
|
||||
logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
|
||||
params['cookies']['CONSENT'] = "YES+"
|
||||
params['headers'].update(lang_info['headers'])
|
||||
params['headers']['Accept'] = (
|
||||
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
|
||||
|
|
|
@ -111,6 +111,8 @@ def request(query, params):
|
|||
params['url'] = query_url
|
||||
|
||||
logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
|
||||
|
||||
params['cookies']['CONSENT'] = "YES+"
|
||||
params['headers'].update(lang_info['headers'])
|
||||
params['headers']['Accept'] = (
|
||||
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
|
||||
|
|
|
@ -0,0 +1,69 @@
|
|||
# SPDX-License-Identifier: AGPL-3.0-or-later
|
||||
"""
|
||||
Google Play Apps
|
||||
"""
|
||||
|
||||
from urllib.parse import urlencode
|
||||
from lxml import html
|
||||
from searx.utils import (
|
||||
eval_xpath,
|
||||
extract_url,
|
||||
extract_text,
|
||||
eval_xpath_list,
|
||||
eval_xpath_getindex,
|
||||
)
|
||||
|
||||
about = {
|
||||
"website": "https://play.google.com/",
|
||||
"wikidata_id": "Q79576",
|
||||
"use_official_api": False,
|
||||
"require_api_key": False,
|
||||
"results": "HTML",
|
||||
}
|
||||
|
||||
categories = ["files", "apps"]
|
||||
search_url = "https://play.google.com/store/search?{query}&c=apps"
|
||||
|
||||
|
||||
def request(query, params):
|
||||
params["url"] = search_url.format(query=urlencode({"q": query}))
|
||||
params['cookies']['CONSENT'] = "YES+"
|
||||
|
||||
return params
|
||||
|
||||
|
||||
def response(resp):
|
||||
results = []
|
||||
|
||||
dom = html.fromstring(resp.text)
|
||||
|
||||
if eval_xpath(dom, '//div[@class="v6DsQb"]'):
|
||||
return []
|
||||
|
||||
spot = eval_xpath_getindex(dom, '//div[@class="ipRz4"]', 0, None)
|
||||
if spot is not None:
|
||||
url = extract_url(eval_xpath(spot, './a[@class="Qfxief"]/@href'), search_url)
|
||||
title = extract_text(eval_xpath(spot, './/div[@class="vWM94c"]'))
|
||||
content = extract_text(eval_xpath(spot, './/div[@class="LbQbAe"]'))
|
||||
img = extract_text(eval_xpath(spot, './/img[@class="T75of bzqKMd"]/@src'))
|
||||
|
||||
results.append({"url": url, "title": title, "content": content, "img_src": img})
|
||||
|
||||
more = eval_xpath_list(dom, '//c-wiz[@jsrenderer="RBsfwb"]//div[@role="listitem"]', min_len=1)
|
||||
for result in more:
|
||||
url = extract_url(eval_xpath(result, ".//a/@href"), search_url)
|
||||
title = extract_text(eval_xpath(result, './/span[@class="DdYX5"]'))
|
||||
content = extract_text(eval_xpath(result, './/span[@class="wMUdtb"]'))
|
||||
img = extract_text(
|
||||
eval_xpath(
|
||||
result,
|
||||
'.//img[@class="T75of stzEZd" or @class="T75of etjhNc Q8CSx "]/@src',
|
||||
)
|
||||
)
|
||||
|
||||
results.append({"url": url, "title": title, "content": content, "img_src": img})
|
||||
|
||||
for suggestion in eval_xpath_list(dom, '//c-wiz[@jsrenderer="qyd4Kb"]//div[@class="ULeU3b neq64b"]'):
|
||||
results.append({"suggestion": extract_text(eval_xpath(suggestion, './/div[@class="Epkrse "]'))})
|
||||
|
||||
return results
|
|
@ -85,13 +85,13 @@ def request(query, params):
|
|||
# subdomain is: scholar.google.xy
|
||||
lang_info['subdomain'] = lang_info['subdomain'].replace("www.", "scholar.")
|
||||
|
||||
query_url = 'https://'+ lang_info['subdomain'] + '/scholar' + "?" + urlencode({
|
||||
'q': query,
|
||||
**lang_info['params'],
|
||||
'ie': "utf8",
|
||||
'oe': "utf8",
|
||||
'start' : offset,
|
||||
})
|
||||
query_url = (
|
||||
'https://'
|
||||
+ lang_info['subdomain']
|
||||
+ '/scholar'
|
||||
+ "?"
|
||||
+ urlencode({'q': query, **lang_info['params'], 'ie': "utf8", 'oe': "utf8", 'start': offset})
|
||||
)
|
||||
|
||||
query_url += time_range_url(params)
|
||||
|
||||
|
@ -99,6 +99,7 @@ def request(query, params):
|
|||
params['url'] = query_url
|
||||
|
||||
logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
|
||||
params['cookies']['CONSENT'] = "YES+"
|
||||
params['headers'].update(lang_info['headers'])
|
||||
params['headers']['Accept'] = (
|
||||
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
|
||||
|
|
|
@ -138,6 +138,7 @@ def request(query, params):
|
|||
params['url'] = query_url
|
||||
|
||||
logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
|
||||
params['cookies']['CONSENT'] = "YES+"
|
||||
params['headers'].update(lang_info['headers'])
|
||||
params['headers']['Accept'] = (
|
||||
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
|
||||
|
|
|
@ -44,6 +44,7 @@ base_youtube_url = 'https://www.youtube.com/watch?v='
|
|||
|
||||
# do search-request
|
||||
def request(query, params):
|
||||
params['cookies']['CONSENT'] = "YES+"
|
||||
if not params['engine_data'].get('next_page_token'):
|
||||
params['url'] = search_url.format(query=quote_plus(query), page=params['pageno'])
|
||||
if params['time_range'] in time_range_dict:
|
||||
|
|
Loading…
Reference in New Issue