70aedd24d2
Some interrupt chips are connected to a "slow" bus (i2c, spi ...). The bus access needs to sleep and therefor cannot be called in atomic contexts. Some of the generic interrupt management functions like disable_irq(), enable_irq() ... call interrupt chip functions with the irq_desc->lock held and interrupts disabled. This does not work for such devices. Provide a separate synchronization mechanism for such interrupt chips. The irq_chip structure is extended by two optional functions (bus_lock and bus_sync_and_unlock). The idea is to serialize the bus access for those operations in the core code so that drivers which are behind that bus operated interrupt controller do not have to worry about it and just can use the normal interfaces. To achieve this we add two function pointers to the irq_chip: bus_lock and bus_sync_unlock. bus_lock() is called to serialize access to the interrupt controller bus. Now the core code can issue chip->mask/unmask ... commands without changing the fast path code at all. The chip implementation merily stores that information in a chip private data structure and returns. No bus interaction as these functions are called from atomic context. After that bus_sync_unlock() is called outside the atomic context. Now the chip implementation issues the bus commands, waits for completion and unlocks the interrupt controller bus. The irq_chip implementation as pseudo code: struct irq_chip_data { struct mutex mutex; unsigned int irq_offset; unsigned long mask; unsigned long mask_status; } static void bus_lock(unsigned int irq) { struct irq_chip_data *data = get_irq_desc_chip_data(irq); mutex_lock(&data->mutex); } static void mask(unsigned int irq) { struct irq_chip_data *data = get_irq_desc_chip_data(irq); irq -= data->irq_offset; data->mask |= (1 << irq); } static void unmask(unsigned int irq) { struct irq_chip_data *data = get_irq_desc_chip_data(irq); irq -= data->irq_offset; data->mask &= ~(1 << irq); } static void bus_sync_unlock(unsigned int irq) { struct irq_chip_data *data = get_irq_desc_chip_data(irq); if (data->mask != data->mask_status) { do_bus_magic_to_set_mask(data->mask); data->mask_status = data->mask; } mutex_unlock(&data->mutex); } The device drivers can use request_threaded_irq, free_irq, disable_irq and enable_irq as usual with the only restriction that the calls need to come from non atomic context. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Mark Brown <broonie@opensource.wolfsonmicro.com> Cc: Dmitry Torokhov <dmitry.torokhov@gmail.com> Cc: Trilok Soni <soni.trilok@gmail.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: Brian Swetland <swetland@google.com> Cc: Joonyoung Shim <jy0922.shim@samsung.com> Cc: m.szyprowski@samsung.com Cc: t.fujak@samsung.com Cc: kyungmin.park@samsung.com, Cc: David Brownell <david-b@pacbell.net> Cc: Daniel Ribeiro <drwyrm@gmail.com> Cc: arve@android.com Cc: Barry Song <21cnbao@gmail.com>
100 lines
2.9 KiB
C
100 lines
2.9 KiB
C
/*
|
|
* IRQ subsystem internal functions and variables:
|
|
*/
|
|
|
|
extern int noirqdebug;
|
|
|
|
/* Set default functions for irq_chip structures: */
|
|
extern void irq_chip_set_defaults(struct irq_chip *chip);
|
|
|
|
/* Set default handler: */
|
|
extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
|
|
|
|
extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
|
|
unsigned long flags);
|
|
extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
|
|
extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
|
|
|
|
extern struct lock_class_key irq_desc_lock_class;
|
|
extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
|
|
extern void clear_kstat_irqs(struct irq_desc *desc);
|
|
extern spinlock_t sparse_irq_lock;
|
|
|
|
#ifdef CONFIG_SPARSE_IRQ
|
|
/* irq_desc_ptrs allocated at boot time */
|
|
extern struct irq_desc **irq_desc_ptrs;
|
|
#else
|
|
/* irq_desc_ptrs is a fixed size array */
|
|
extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
|
|
#endif
|
|
|
|
#ifdef CONFIG_PROC_FS
|
|
extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
|
|
extern void register_handler_proc(unsigned int irq, struct irqaction *action);
|
|
extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
|
|
#else
|
|
static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
|
|
static inline void register_handler_proc(unsigned int irq,
|
|
struct irqaction *action) { }
|
|
static inline void unregister_handler_proc(unsigned int irq,
|
|
struct irqaction *action) { }
|
|
#endif
|
|
|
|
extern int irq_select_affinity_usr(unsigned int irq);
|
|
|
|
extern void irq_set_thread_affinity(struct irq_desc *desc);
|
|
|
|
/* Inline functions for support of irq chips on slow busses */
|
|
static inline void chip_bus_lock(unsigned int irq, struct irq_desc *desc)
|
|
{
|
|
if (unlikely(desc->chip->bus_lock))
|
|
desc->chip->bus_lock(irq);
|
|
}
|
|
|
|
static inline void chip_bus_sync_unlock(unsigned int irq, struct irq_desc *desc)
|
|
{
|
|
if (unlikely(desc->chip->bus_sync_unlock))
|
|
desc->chip->bus_sync_unlock(irq);
|
|
}
|
|
|
|
/*
|
|
* Debugging printout:
|
|
*/
|
|
|
|
#include <linux/kallsyms.h>
|
|
|
|
#define P(f) if (desc->status & f) printk("%14s set\n", #f)
|
|
|
|
static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
|
|
{
|
|
printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n",
|
|
irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled);
|
|
printk("->handle_irq(): %p, ", desc->handle_irq);
|
|
print_symbol("%s\n", (unsigned long)desc->handle_irq);
|
|
printk("->chip(): %p, ", desc->chip);
|
|
print_symbol("%s\n", (unsigned long)desc->chip);
|
|
printk("->action(): %p\n", desc->action);
|
|
if (desc->action) {
|
|
printk("->action->handler(): %p, ", desc->action->handler);
|
|
print_symbol("%s\n", (unsigned long)desc->action->handler);
|
|
}
|
|
|
|
P(IRQ_INPROGRESS);
|
|
P(IRQ_DISABLED);
|
|
P(IRQ_PENDING);
|
|
P(IRQ_REPLAY);
|
|
P(IRQ_AUTODETECT);
|
|
P(IRQ_WAITING);
|
|
P(IRQ_LEVEL);
|
|
P(IRQ_MASKED);
|
|
#ifdef CONFIG_IRQ_PER_CPU
|
|
P(IRQ_PER_CPU);
|
|
#endif
|
|
P(IRQ_NOPROBE);
|
|
P(IRQ_NOREQUEST);
|
|
P(IRQ_NOAUTOEN);
|
|
}
|
|
|
|
#undef P
|
|
|