GPIO fixes for the v4.19 cycle:

- Atomic contexts, cansleep* calls and such fastpath/slopwpath
   things.
 
 - Defer ACPI event handler registration to late_initcall()
   so IRQs do not fire in our face before other drivers have
   a chance to register handlers.
 
 - Race condition if a consumer requests a GPIO after
   gpiochip_add_data_with_key() but before of_gpiochip_add()
 
 - Probe errorpath in the dwapb driver.
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJbj8PeAAoJEEEQszewGV1zXN0P/1IPyvN7AvFfKiaJZGRMj7+E
 3yUgRjCQqBM4zP+jC9Fnd/dCdf9Ex/4pY5OpuuubOnayAKfBNLMItshHZ50KDGVv
 igkysabGhMydBXzlesFlOcS7Ap518FxE+Je+2Y1cveh/wPM83wBurAE8Z25Pj5KX
 sSdEKWb9gvBGMw20M2dcv5r2m44eA8ycXJr/EoOayZ829vq7th1ATaaOtprfpKam
 2sMYTBzuG0MHaWcfJeZAmVINsEkHDGbXAr98C/xurNUIK3wqEqat3PrRNUoizeED
 xZbh7mp8lzUgMz7TmCa6c/sf1sqvyEUcixO/Y7Ylsry44fiXjxh7B2CicTwClM9l
 qPL8KlPhmKPEUKtuGYyBJG1tbyt93E0EIVg983NtzlMmMSzMmQVi16rf1Jv4eZGi
 batjpwmMoS5g5MI+oA8xyoTFj6LpU6MwbFSb8QFJ0iBf0BveueCECCVfVZgRpnWa
 dGv0ENq6a+KDSVSondQ1WRQPtTWiOqNxhIWmBaepJaEwq1MYeSOZDhD3Nzi0AfLm
 ORic+QBzJHSqArc6ymyY+mEufxFzlECesEgrP7R+GuZUSQbZbQmUXtAvhrEH8BKO
 zij5cYFo//7tpi1LMuELrWTbTrgkemem8nx6pA3XUpCk9KzszGJ6AnUkbXNFNNJN
 mMFMSfNLfAd8UXfnLBAg
 =fXnt
 -----END PGP SIGNATURE-----

Merge tag 'gpio-v4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio

Pull GPIO fixes from Linus Walleij:
 "Some GPIO fixes. The ACPI stuff is probably the most annoying for
  users that get fixed this time.

   - Atomic contexts, cansleep* calls and such fastpath/slopwpath
     things.

   - Defer ACPI event handler registration to late_initcall() so IRQs do
     not fire in our face before other drivers have a chance to register
     handlers.

   - Race condition if a consumer requests a GPIO after
     gpiochip_add_data_with_key() but before of_gpiochip_add()

   - Probe errorpath in the dwapb driver"

* tag 'gpio-v4.19-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio:
  gpio: Fix crash due to registration race
  gpio: dwapb: Fix error handling in dwapb_gpio_probe()
  gpiolib-acpi: Register GpioInt ACPI event handlers from a late_initcall
  gpiolib: acpi: Switch to cansleep version of GPIO library call
  gpio: adp5588: Fix sleep-in-atomic-context bug
This commit is contained in:
Linus Torvalds 2018-09-05 09:27:45 -07:00
commit b36fdc6853
4 changed files with 72 additions and 40 deletions

View File

@ -41,6 +41,8 @@ struct adp5588_gpio {
uint8_t int_en[3];
uint8_t irq_mask[3];
uint8_t irq_stat[3];
uint8_t int_input_en[3];
uint8_t int_lvl_cached[3];
};
static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@ -173,12 +175,28 @@ static void adp5588_irq_bus_sync_unlock(struct irq_data *d)
struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
int i;
for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
if (dev->int_input_en[i]) {
mutex_lock(&dev->lock);
dev->dir[i] &= ~dev->int_input_en[i];
dev->int_input_en[i] = 0;
adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
dev->dir[i]);
mutex_unlock(&dev->lock);
}
if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
dev->int_lvl_cached[i] = dev->int_lvl[i];
adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
dev->int_lvl[i]);
}
if (dev->int_en[i] ^ dev->irq_mask[i]) {
dev->int_en[i] = dev->irq_mask[i];
adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
dev->int_en[i]);
}
}
mutex_unlock(&dev->irq_lock);
}
@ -221,9 +239,7 @@ static int adp5588_irq_set_type(struct irq_data *d, unsigned int type)
else
return -EINVAL;
adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
dev->int_lvl[bank]);
dev->int_input_en[bank] |= bit;
return 0;
}

View File

@ -728,6 +728,7 @@ static int dwapb_gpio_probe(struct platform_device *pdev)
out_unregister:
dwapb_gpio_unregister(gpio);
dwapb_irq_teardown(gpio);
clk_disable_unprepare(gpio->clk);
return err;
}

View File

@ -25,7 +25,6 @@
struct acpi_gpio_event {
struct list_head node;
struct list_head initial_sync_list;
acpi_handle handle;
unsigned int pin;
unsigned int irq;
@ -49,10 +48,19 @@ struct acpi_gpio_chip {
struct mutex conn_lock;
struct gpio_chip *chip;
struct list_head events;
struct list_head deferred_req_irqs_list_entry;
};
static LIST_HEAD(acpi_gpio_initial_sync_list);
static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
/*
* For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
* (so builtin drivers) we register the ACPI GpioInt event handlers from a
* late_initcall_sync handler, so that other builtin drivers can register their
* OpRegions before the event handlers can run. This list contains gpiochips
* for which the acpi_gpiochip_request_interrupts() has been deferred.
*/
static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
static bool acpi_gpio_deferred_req_irqs_done;
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
{
@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
return gpiochip_get_desc(chip, pin);
}
static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
{
mutex_lock(&acpi_gpio_initial_sync_list_lock);
list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
mutex_unlock(&acpi_gpio_initial_sync_list_lock);
}
static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
{
mutex_lock(&acpi_gpio_initial_sync_list_lock);
if (!list_empty(&event->initial_sync_list))
list_del_init(&event->initial_sync_list);
mutex_unlock(&acpi_gpio_initial_sync_list_lock);
}
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
{
struct acpi_gpio_event *event = data;
@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
gpiod_direction_input(desc);
value = gpiod_get_value(desc);
value = gpiod_get_value_cansleep(desc);
ret = gpiochip_lock_as_irq(chip, pin);
if (ret) {
@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
event->irq = irq;
event->pin = pin;
event->desc = desc;
INIT_LIST_HEAD(&event->initial_sync_list);
ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
"ACPI:Event", event);
@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
* may refer to OperationRegions from other (builtin) drivers which
* may be probed after us.
*/
if (handler == acpi_gpio_irq_handler &&
(((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
acpi_gpio_add_to_initial_sync_list(event);
if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
handler(event->irq, event);
return AE_OK;
@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
struct acpi_gpio_chip *acpi_gpio;
acpi_handle handle;
acpi_status status;
bool defer;
if (!chip->parent || !chip->to_irq)
return;
@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
defer = !acpi_gpio_deferred_req_irqs_done;
if (defer)
list_add(&acpi_gpio->deferred_req_irqs_list_entry,
&acpi_gpio_deferred_req_irqs_list);
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
if (defer)
return;
acpi_walk_resources(handle, "_AEI",
acpi_gpiochip_request_interrupt, acpi_gpio);
}
@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
if (ACPI_FAILURE(status))
return;
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
struct gpio_desc *desc;
acpi_gpio_del_from_initial_sync_list(event);
if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
disable_irq_wake(event->irq);
@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
acpi_gpio->chip = chip;
INIT_LIST_HEAD(&acpi_gpio->events);
INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
if (ACPI_FAILURE(status)) {
@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
return con_id == NULL;
}
/* Sync the initial state of handlers after all builtin drivers have probed */
static int acpi_gpio_initial_sync(void)
/* Run deferred acpi_gpiochip_request_interrupts() */
static int acpi_gpio_handle_deferred_request_interrupts(void)
{
struct acpi_gpio_event *event, *ep;
struct acpi_gpio_chip *acpi_gpio, *tmp;
mutex_lock(&acpi_gpio_initial_sync_list_lock);
list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
initial_sync_list) {
acpi_evaluate_object(event->handle, NULL, NULL, NULL);
list_del_init(&event->initial_sync_list);
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
list_for_each_entry_safe(acpi_gpio, tmp,
&acpi_gpio_deferred_req_irqs_list,
deferred_req_irqs_list_entry) {
acpi_handle handle;
handle = ACPI_HANDLE(acpi_gpio->chip->parent);
acpi_walk_resources(handle, "_AEI",
acpi_gpiochip_request_interrupt, acpi_gpio);
list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
}
mutex_unlock(&acpi_gpio_initial_sync_list_lock);
acpi_gpio_deferred_req_irqs_done = true;
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
return 0;
}
/* We must use _sync so that this runs after the first deferred_probe run */
late_initcall_sync(acpi_gpio_initial_sync);
late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);

View File

@ -31,6 +31,7 @@ static int of_gpiochip_match_node_and_xlate(struct gpio_chip *chip, void *data)
struct of_phandle_args *gpiospec = data;
return chip->gpiodev->dev.of_node == gpiospec->np &&
chip->of_xlate &&
chip->of_xlate(chip, gpiospec, NULL) >= 0;
}