gpiolib-acpi: Only defer request_irq for GpioInt ACPI event handlers

Commit 78d3a92edb ("gpiolib-acpi: Register GpioInt ACPI event handlers
from a late_initcall") deferred the entire acpi_gpiochip_request_interrupt
call for each event resource.

This means it also delays the gpiochip_request_own_desc(..., "ACPI:Event")
call. This is a problem if some AML code reads the GPIO pin before we
run the deferred acpi_gpiochip_request_interrupt, because in that case
acpi_gpio_adr_space_handler() will already have called
gpiochip_request_own_desc(..., "ACPI:OpRegion") causing the call from
acpi_gpiochip_request_interrupt to fail with -EBUSY and we will fail to
register an event handler.

acpi_gpio_adr_space_handler is prepared for acpi_gpiochip_request_interrupt
already having claimed the pin, but the other way around does not work.

One example of a problem this causes, is the event handler for the OTG
ID pin on a Prowise PT301 tablet not registering, keeping the port stuck
in whatever mode it was in during boot and e.g. only allowing charging
after a reboot.

This commit fixes this by only deferring the request_irq call and the
initial run of edge-triggered IRQs instead of deferring all of
acpi_gpiochip_request_interrupt.

Cc: stable@vger.kernel.org
Fixes: 78d3a92edb ("gpiolib-acpi: Register GpioInt ACPI event ...")
Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
This commit is contained in:
Hans de Goede 2018-11-28 17:57:55 +01:00 committed by Linus Walleij
parent 2e6e902d18
commit e59f5e08ec
1 changed files with 94 additions and 70 deletions

View File

@ -19,11 +19,28 @@
#include "gpiolib.h"
/**
* struct acpi_gpio_event - ACPI GPIO event handler data
*
* @node: list-entry of the events list of the struct acpi_gpio_chip
* @handle: handle of ACPI method to execute when the IRQ triggers
* @handler: irq_handler to pass to request_irq when requesting the IRQ
* @pin: GPIO pin number on the gpio_chip
* @irq: Linux IRQ number for the event, for request_ / free_irq
* @irqflags: flags to pass to request_irq when requesting the IRQ
* @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
* @is_requested: True if request_irq has been done
* @desc: gpio_desc for the GPIO pin for this event
*/
struct acpi_gpio_event {
struct list_head node;
acpi_handle handle;
irq_handler_t handler;
unsigned int pin;
unsigned int irq;
unsigned long irqflags;
bool irq_is_wake;
bool irq_requested;
struct gpio_desc *desc;
};
@ -49,10 +66,10 @@ struct acpi_gpio_chip {
/*
* For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
* (so builtin drivers) we register the ACPI GpioInt event handlers from a
* (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
* late_initcall_sync handler, so that other builtin drivers can register their
* OpRegions before the event handlers can run. This list contains gpiochips
* for which the acpi_gpiochip_request_interrupts() has been deferred.
* for which the acpi_gpiochip_request_irqs() call has been deferred.
*/
static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
@ -133,8 +150,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
}
EXPORT_SYMBOL_GPL(acpi_gpio_get_irq_resource);
static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
void *context)
static void acpi_gpiochip_request_irq(struct acpi_gpio_chip *acpi_gpio,
struct acpi_gpio_event *event)
{
int ret, value;
ret = request_threaded_irq(event->irq, NULL, event->handler,
event->irqflags, "ACPI:Event", event);
if (ret) {
dev_err(acpi_gpio->chip->parent,
"Failed to setup interrupt handler for %d\n",
event->irq);
return;
}
if (event->irq_is_wake)
enable_irq_wake(event->irq);
event->irq_requested = true;
/* Make sure we trigger the initial state of edge-triggered IRQs */
value = gpiod_get_raw_value_cansleep(event->desc);
if (((event->irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
((event->irqflags & IRQF_TRIGGER_FALLING) && value == 0))
event->handler(event->irq, event);
}
static void acpi_gpiochip_request_irqs(struct acpi_gpio_chip *acpi_gpio)
{
struct acpi_gpio_event *event;
list_for_each_entry(event, &acpi_gpio->events, node)
acpi_gpiochip_request_irq(acpi_gpio, event);
}
static acpi_status acpi_gpiochip_alloc_event(struct acpi_resource *ares,
void *context)
{
struct acpi_gpio_chip *acpi_gpio = context;
struct gpio_chip *chip = acpi_gpio->chip;
@ -143,8 +194,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
struct acpi_gpio_event *event;
irq_handler_t handler = NULL;
struct gpio_desc *desc;
unsigned long irqflags;
int ret, pin, irq, value;
int ret, pin, irq;
if (!acpi_gpio_get_irq_resource(ares, &agpio))
return AE_OK;
@ -175,8 +225,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc);
value = gpiod_get_value_cansleep(desc);
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
dev_err(chip->parent, "Failed to lock GPIO as interrupt\n");
@ -189,64 +237,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
goto fail_unlock_irq;
}
irqflags = IRQF_ONESHOT;
if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
if (agpio->polarity == ACPI_ACTIVE_HIGH)
irqflags |= IRQF_TRIGGER_HIGH;
else
irqflags |= IRQF_TRIGGER_LOW;
} else {
switch (agpio->polarity) {
case ACPI_ACTIVE_HIGH:
irqflags |= IRQF_TRIGGER_RISING;
break;
case ACPI_ACTIVE_LOW:
irqflags |= IRQF_TRIGGER_FALLING;
break;
default:
irqflags |= IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING;
break;
}
}
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
goto fail_unlock_irq;
event->irqflags = IRQF_ONESHOT;
if (agpio->triggering == ACPI_LEVEL_SENSITIVE) {
if (agpio->polarity == ACPI_ACTIVE_HIGH)
event->irqflags |= IRQF_TRIGGER_HIGH;
else
event->irqflags |= IRQF_TRIGGER_LOW;
} else {
switch (agpio->polarity) {
case ACPI_ACTIVE_HIGH:
event->irqflags |= IRQF_TRIGGER_RISING;
break;
case ACPI_ACTIVE_LOW:
event->irqflags |= IRQF_TRIGGER_FALLING;
break;
default:
event->irqflags |= IRQF_TRIGGER_RISING |
IRQF_TRIGGER_FALLING;
break;
}
}
event->handle = evt_handle;
event->handler = handler;
event->irq = irq;
event->irq_is_wake = agpio->wake_capable == ACPI_WAKE_CAPABLE;
event->pin = pin;
event->desc = desc;
ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
"ACPI:Event", event);
if (ret) {
dev_err(chip->parent,
"Failed to setup interrupt handler for %d\n",
event->irq);
goto fail_free_event;
}
if (agpio->wake_capable == ACPI_WAKE_CAPABLE)
enable_irq_wake(irq);
list_add_tail(&event->node, &acpi_gpio->events);
/*
* Make sure we trigger the initial state of the IRQ when using RISING
* or FALLING. Note we run the handlers on late_init, the AML code
* may refer to OperationRegions from other (builtin) drivers which
* may be probed after us.
*/
if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
handler(event->irq, event);
return AE_OK;
fail_free_event:
kfree(event);
fail_unlock_irq:
gpiochip_unlock_as_irq(chip, pin);
fail_free_desc:
@ -283,6 +309,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
acpi_walk_resources(handle, "_AEI",
acpi_gpiochip_alloc_event, acpi_gpio);
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
defer = !acpi_gpio_deferred_req_irqs_done;
if (defer)
@ -293,8 +322,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (defer)
return;
acpi_walk_resources(handle, "_AEI",
acpi_gpiochip_request_interrupt, acpi_gpio);
acpi_gpiochip_request_irqs(acpi_gpio);
}
EXPORT_SYMBOL_GPL(acpi_gpiochip_request_interrupts);
@ -331,10 +359,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc;
if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
disable_irq_wake(event->irq);
if (event->irq_requested) {
if (event->irq_is_wake)
disable_irq_wake(event->irq);
free_irq(event->irq, event);
}
free_irq(event->irq, event);
desc = event->desc;
if (WARN_ON(IS_ERR(desc)))
continue;
@ -1200,23 +1231,16 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
return con_id == NULL;
}
/* Run deferred acpi_gpiochip_request_interrupts() */
static int acpi_gpio_handle_deferred_request_interrupts(void)
/* Run deferred acpi_gpiochip_request_irqs() */
static int acpi_gpio_handle_deferred_request_irqs(void)
{
struct acpi_gpio_chip *acpi_gpio, *tmp;
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
list_for_each_entry_safe(acpi_gpio, tmp,
&acpi_gpio_deferred_req_irqs_list,
deferred_req_irqs_list_entry) {
acpi_handle handle;
handle = ACPI_HANDLE(acpi_gpio->chip->parent);
acpi_walk_resources(handle, "_AEI",
acpi_gpiochip_request_interrupt, acpi_gpio);
list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
}
deferred_req_irqs_list_entry)
acpi_gpiochip_request_irqs(acpi_gpio);
acpi_gpio_deferred_req_irqs_done = true;
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
@ -1224,4 +1248,4 @@ static int acpi_gpio_handle_deferred_request_interrupts(void)
return 0;
}
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);
late_initcall_sync(acpi_gpio_handle_deferred_request_irqs);