x86: put irq_2_iommu pointer into irq_desc

when CONFIG_HAVE_SPARSE_IRQ
preallocate some irq_2_iommu entries, and use get_one_free_irq_2_iomm to
get new one and link to irq_desc if needed.

else will use dyn_array or static array.

v2: <= nr_irqs fix

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Yinghai Lu 2008-08-19 20:50:21 -07:00 committed by Ingo Molnar
parent 52b17329d6
commit e420dfb40c
2 changed files with 170 additions and 49 deletions

View File

@ -19,41 +19,136 @@ struct irq_2_iommu {
u8 irte_mask;
};
#ifdef CONFIG_HAVE_DYNA_ARRAY
static struct irq_2_iommu *irq_2_iommu;
DEFINE_DYN_ARRAY(irq_2_iommu, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
#ifdef CONFIG_HAVE_SPARSE_IRQ
static struct irq_2_iommu *irq_2_iommuX;
/* fill one page ? */
static int nr_irq_2_iommu = 0x100;
static int irq_2_iommu_index;
DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irq_2_iommu, PAGE_SIZE, NULL);
extern void *__alloc_bootmem_nopanic(unsigned long size,
unsigned long align,
unsigned long goal);
static struct irq_2_iommu *get_one_free_irq_2_iommu(int not_used)
{
struct irq_2_iommu *iommu;
unsigned long total_bytes;
if (irq_2_iommu_index >= nr_irq_2_iommu) {
/*
* we run out of pre-allocate ones, allocate more
*/
printk(KERN_DEBUG "try to get more irq_2_iommu %d\n", nr_irq_2_iommu);
total_bytes = sizeof(struct irq_2_iommu)*nr_irq_2_iommu;
if (after_bootmem)
iommu = kzalloc(total_bytes, GFP_ATOMIC);
else
iommu = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
if (!iommu)
panic("can not get more irq_2_iommu\n");
irq_2_iommuX = iommu;
irq_2_iommu_index = 0;
}
iommu = &irq_2_iommuX[irq_2_iommu_index];
irq_2_iommu_index++;
return iommu;
}
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
struct irq_desc *desc;
desc = irq_to_desc(irq);
BUG_ON(!desc);
return desc->irq_2_iommu;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
struct irq_desc *desc;
struct irq_2_iommu *irq_iommu;
desc = irq_to_desc(irq);
BUG_ON(!desc);
irq_iommu = desc->irq_2_iommu;
if (!irq_iommu)
desc->irq_2_iommu = get_one_free_irq_2_iommu(irq);
return desc->irq_2_iommu;
}
#else /* !CONFIG_HAVE_SPARSE_IRQ */
#ifdef CONFIG_HAVE_DYN_ARRAY
static struct irq_2_iommu *irq_2_iommuX;
DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
#else
static struct irq_2_iommu irq_2_iommu[NR_IRQS];
static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
#endif
static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
{
if (irq < nr_irqs)
return &irq_2_iommuX[irq];
return NULL;
}
static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
{
return irq_2_iommu(irq);
}
#endif
static DEFINE_SPINLOCK(irq_2_ir_lock);
static struct irq_2_iommu *valid_irq_2_iommu(unsigned int irq)
{
struct irq_2_iommu *irq_iommu;
irq_iommu = irq_2_iommu(irq);
if (!irq_iommu)
return NULL;
if (!irq_iommu->iommu)
return NULL;
return irq_iommu;
}
int irq_remapped(int irq)
{
if (irq > nr_irqs)
return 0;
if (!irq_2_iommu[irq].iommu)
return 0;
return 1;
return valid_irq_2_iommu(irq) != NULL;
}
int get_irte(int irq, struct irte *entry)
{
int index;
struct irq_2_iommu *irq_iommu;
if (!entry || irq > nr_irqs)
if (!entry)
return -1;
spin_lock(&irq_2_ir_lock);
if (!irq_2_iommu[irq].iommu) {
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock(&irq_2_ir_lock);
return -1;
}
index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
*entry = *(irq_2_iommu[irq].iommu->ir_table->base + index);
index = irq_iommu->irte_index + irq_iommu->sub_handle;
*entry = *(irq_iommu->iommu->ir_table->base + index);
spin_unlock(&irq_2_ir_lock);
return 0;
@ -62,6 +157,7 @@ int get_irte(int irq, struct irte *entry)
int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{
struct ir_table *table = iommu->ir_table;
struct irq_2_iommu *irq_iommu;
u16 index, start_index;
unsigned int mask = 0;
int i;
@ -69,6 +165,12 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
if (!count)
return -1;
#ifndef CONFIG_HAVE_SPARSE_IRQ
/* protect irq_2_iommu_alloc later */
if (irq >= nr_irqs)
return -1;
#endif
/*
* start the IRTE search from index 0.
*/
@ -108,10 +210,11 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
for (i = index; i < index + count; i++)
table->base[i].present = 1;
irq_2_iommu[irq].iommu = iommu;
irq_2_iommu[irq].irte_index = index;
irq_2_iommu[irq].sub_handle = 0;
irq_2_iommu[irq].irte_mask = mask;
irq_iommu = irq_2_iommu_alloc(irq);
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = mask;
spin_unlock(&irq_2_ir_lock);
@ -132,31 +235,36 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
int map_irq_to_irte_handle(int irq, u16 *sub_handle)
{
int index;
struct irq_2_iommu *irq_iommu;
spin_lock(&irq_2_ir_lock);
if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) {
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock(&irq_2_ir_lock);
return -1;
}
*sub_handle = irq_2_iommu[irq].sub_handle;
index = irq_2_iommu[irq].irte_index;
*sub_handle = irq_iommu->sub_handle;
index = irq_iommu->irte_index;
spin_unlock(&irq_2_ir_lock);
return index;
}
int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
{
struct irq_2_iommu *irq_iommu;
spin_lock(&irq_2_ir_lock);
if (irq >= nr_irqs || irq_2_iommu[irq].iommu) {
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock(&irq_2_ir_lock);
return -1;
}
irq_2_iommu[irq].iommu = iommu;
irq_2_iommu[irq].irte_index = index;
irq_2_iommu[irq].sub_handle = subhandle;
irq_2_iommu[irq].irte_mask = 0;
irq_iommu->iommu = iommu;
irq_iommu->irte_index = index;
irq_iommu->sub_handle = subhandle;
irq_iommu->irte_mask = 0;
spin_unlock(&irq_2_ir_lock);
@ -165,16 +273,19 @@ int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
{
struct irq_2_iommu *irq_iommu;
spin_lock(&irq_2_ir_lock);
if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) {
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock(&irq_2_ir_lock);
return -1;
}
irq_2_iommu[irq].iommu = NULL;
irq_2_iommu[irq].irte_index = 0;
irq_2_iommu[irq].sub_handle = 0;
irq_2_iommu[irq].irte_mask = 0;
irq_iommu->iommu = NULL;
irq_iommu->irte_index = 0;
irq_iommu->sub_handle = 0;
irq_2_iommu(irq)->irte_mask = 0;
spin_unlock(&irq_2_ir_lock);
@ -186,16 +297,18 @@ int modify_irte(int irq, struct irte *irte_modified)
int index;
struct irte *irte;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
spin_lock(&irq_2_ir_lock);
if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) {
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock(&irq_2_ir_lock);
return -1;
}
iommu = irq_2_iommu[irq].iommu;
iommu = irq_iommu->iommu;
index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
@ -211,18 +324,20 @@ int flush_irte(int irq)
{
int index;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
spin_lock(&irq_2_ir_lock);
if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) {
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock(&irq_2_ir_lock);
return -1;
}
iommu = irq_2_iommu[irq].iommu;
iommu = irq_iommu->iommu;
index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
index = irq_iommu->irte_index + irq_iommu->sub_handle;
qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
qi_flush_iec(iommu, index, irq_iommu->irte_mask);
spin_unlock(&irq_2_ir_lock);
return 0;
@ -254,28 +369,30 @@ int free_irte(int irq)
int index, i;
struct irte *irte;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu;
spin_lock(&irq_2_ir_lock);
if (irq >= nr_irqs || !irq_2_iommu[irq].iommu) {
irq_iommu = valid_irq_2_iommu(irq);
if (!irq_iommu) {
spin_unlock(&irq_2_ir_lock);
return -1;
}
iommu = irq_2_iommu[irq].iommu;
iommu = irq_iommu->iommu;
index = irq_2_iommu[irq].irte_index + irq_2_iommu[irq].sub_handle;
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
if (!irq_2_iommu[irq].sub_handle) {
for (i = 0; i < (1 << irq_2_iommu[irq].irte_mask); i++)
if (!irq_iommu->sub_handle) {
for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
set_64bit((unsigned long *)irte, 0);
qi_flush_iec(iommu, index, irq_2_iommu[irq].irte_mask);
qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
irq_2_iommu[irq].iommu = NULL;
irq_2_iommu[irq].irte_index = 0;
irq_2_iommu[irq].sub_handle = 0;
irq_2_iommu[irq].irte_mask = 0;
irq_iommu->iommu = NULL;
irq_iommu->irte_index = 0;
irq_iommu->sub_handle = 0;
irq_iommu->irte_mask = 0;
spin_unlock(&irq_2_ir_lock);

View File

@ -128,6 +128,7 @@ struct irq_chip {
};
struct timer_rand_state;
struct irq_2_iommu;
/**
* struct irq_desc - interrupt descriptor
*
@ -162,6 +163,9 @@ struct irq_desc {
unsigned int *kstat_irqs;
#else
unsigned int kstat_irqs[NR_CPUS];
#endif
#if defined(CONFIG_INTR_REMAP) && defined(CONFIG_HAVE_SPARSE_IRQ)
struct irq_2_iommu *irq_2_iommu;
#endif
irq_flow_handler_t handle_irq;
struct irq_chip *chip;