soc: ti: QMSS: Fix usage of irq_set_affinity_hint

The Keystone QMSS driver is pretty damaged, in the sense that it
does things like this:

	irq_set_affinity_hint(irq, to_cpumask(&cpu_map));

where cpu_map is a local variable. As we leave the function, this
will point to nowhere-land, and things will end-up badly.

Instead, let's use a proper cpumask that gets allocated, giving
the driver a chance to actually work with things like irqbalance
as well as have a hypothetical 64bit future.

Cc: stable@vger.kernel.org
Acked-by: Santosh Shilimkar <ssantosh@kernel.org>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Olof Johansson <olof@lixom.net>
This commit is contained in:
Marc Zyngier 2018-10-31 08:41:34 +00:00 committed by Olof Johansson
parent 4b42745211
commit 832ad0e3da
3 changed files with 22 additions and 14 deletions

View File

@ -329,8 +329,8 @@ struct knav_range_ops {
};
struct knav_irq_info {
int irq;
u32 cpu_map;
int irq;
struct cpumask *cpu_mask;
};
struct knav_range_info {

View File

@ -205,18 +205,18 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range,
{
struct knav_device *kdev = range->kdev;
struct knav_acc_channel *acc;
unsigned long cpu_map;
struct cpumask *cpu_mask;
int ret = 0, irq;
u32 old, new;
if (range->flags & RANGE_MULTI_QUEUE) {
acc = range->acc;
irq = range->irqs[0].irq;
cpu_map = range->irqs[0].cpu_map;
cpu_mask = range->irqs[0].cpu_mask;
} else {
acc = range->acc + queue;
irq = range->irqs[queue].irq;
cpu_map = range->irqs[queue].cpu_map;
cpu_mask = range->irqs[queue].cpu_mask;
}
old = acc->open_mask;
@ -239,8 +239,8 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range,
acc->name, acc->name);
ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
range);
if (!ret && cpu_map) {
ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
if (!ret && cpu_mask) {
ret = irq_set_affinity_hint(irq, cpu_mask);
if (ret) {
dev_warn(range->kdev->dev,
"Failed to set IRQ affinity\n");

View File

@ -118,19 +118,17 @@ static int knav_queue_setup_irq(struct knav_range_info *range,
struct knav_queue_inst *inst)
{
unsigned queue = inst->id - range->queue_base;
unsigned long cpu_map;
int ret = 0, irq;
if (range->flags & RANGE_HAS_IRQ) {
irq = range->irqs[queue].irq;
cpu_map = range->irqs[queue].cpu_map;
ret = request_irq(irq, knav_queue_int_handler, 0,
inst->irq_name, inst);
if (ret)
return ret;
disable_irq(irq);
if (cpu_map) {
ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map));
if (range->irqs[queue].cpu_mask) {
ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
if (ret) {
dev_warn(range->kdev->dev,
"Failed to set IRQ affinity\n");
@ -1262,9 +1260,19 @@ static int knav_setup_queue_range(struct knav_device *kdev,
range->num_irqs++;
if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3)
range->irqs[i].cpu_map =
(oirq.args[2] & 0x0000ff00) >> 8;
if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
unsigned long mask;
int bit;
range->irqs[i].cpu_mask = devm_kzalloc(dev,
cpumask_size(), GFP_KERNEL);
if (!range->irqs[i].cpu_mask)
return -ENOMEM;
mask = (oirq.args[2] & 0x0000ff00) >> 8;
for_each_set_bit(bit, &mask, BITS_PER_LONG)
cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
}
}
range->num_irqs = min(range->num_irqs, range->num_queues);