Merge git://git.infradead.org/iommu-2.6

* git://git.infradead.org/iommu-2.6:
  intel-iommu: fix endless "Unknown DMAR structure type" loop
  VT-d: handle Invalidation Queue Error to avoid system hang
  intel-iommu: fix build error with INTR_REMAP=y and DMAR=n
This commit is contained in:
Linus Torvalds 2009-02-25 09:31:21 -08:00
commit 60042600c5
3 changed files with 70 additions and 27 deletions

View File

@ -330,6 +330,14 @@ parse_dmar_table(void)
entry_header = (struct acpi_dmar_header *)(dmar + 1); entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) < while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) { (((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
printk(KERN_WARNING PREFIX
"Invalid 0-length structure\n");
ret = -EINVAL;
break;
}
dmar_table_print_dmar_entry(entry_header); dmar_table_print_dmar_entry(entry_header);
switch (entry_header->type) { switch (entry_header->type) {
@ -491,7 +499,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
int map_size; int map_size;
u32 ver; u32 ver;
static int iommu_allocated = 0; static int iommu_allocated = 0;
int agaw; int agaw = 0;
iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
if (!iommu) if (!iommu)
@ -507,6 +515,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
#ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu); agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) { if (agaw < 0) {
printk(KERN_ERR printk(KERN_ERR
@ -514,6 +523,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->seq_id); iommu->seq_id);
goto error; goto error;
} }
#endif
iommu->agaw = agaw; iommu->agaw = agaw;
/* the registers might be more than one page */ /* the registers might be more than one page */
@ -571,19 +581,49 @@ static inline void reclaim_free_desc(struct q_inval *qi)
} }
} }
static int qi_check_fault(struct intel_iommu *iommu, int index)
{
u32 fault;
int head;
struct q_inval *qi = iommu->qi;
int wait_index = (index + 1) % QI_LENGTH;
fault = readl(iommu->reg + DMAR_FSTS_REG);
/*
* If IQE happens, the head points to the descriptor associated
* with the error. No new descriptors are fetched until the IQE
* is cleared.
*/
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
if ((head >> 4) == index) {
memcpy(&qi->desc[index], &qi->desc[wait_index],
sizeof(struct qi_desc));
__iommu_flush_cache(iommu, &qi->desc[index],
sizeof(struct qi_desc));
writel(DMA_FSTS_IQE, iommu->reg + DMAR_FSTS_REG);
return -EINVAL;
}
}
return 0;
}
/* /*
* Submit the queued invalidation descriptor to the remapping * Submit the queued invalidation descriptor to the remapping
* hardware unit and wait for its completion. * hardware unit and wait for its completion.
*/ */
void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu) int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
{ {
int rc = 0;
struct q_inval *qi = iommu->qi; struct q_inval *qi = iommu->qi;
struct qi_desc *hw, wait_desc; struct qi_desc *hw, wait_desc;
int wait_index, index; int wait_index, index;
unsigned long flags; unsigned long flags;
if (!qi) if (!qi)
return; return 0;
hw = qi->desc; hw = qi->desc;
@ -601,7 +641,8 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
hw[index] = *desc; hw[index] = *desc;
wait_desc.low = QI_IWD_STATUS_DATA(2) | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; wait_desc.low = QI_IWD_STATUS_DATA(QI_DONE) |
QI_IWD_STATUS_WRITE | QI_IWD_TYPE;
wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]); wait_desc.high = virt_to_phys(&qi->desc_status[wait_index]);
hw[wait_index] = wait_desc; hw[wait_index] = wait_desc;
@ -612,13 +653,11 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
qi->free_head = (qi->free_head + 2) % QI_LENGTH; qi->free_head = (qi->free_head + 2) % QI_LENGTH;
qi->free_cnt -= 2; qi->free_cnt -= 2;
spin_lock(&iommu->register_lock);
/* /*
* update the HW tail register indicating the presence of * update the HW tail register indicating the presence of
* new descriptors. * new descriptors.
*/ */
writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG); writel(qi->free_head << 4, iommu->reg + DMAR_IQT_REG);
spin_unlock(&iommu->register_lock);
while (qi->desc_status[wait_index] != QI_DONE) { while (qi->desc_status[wait_index] != QI_DONE) {
/* /*
@ -628,15 +667,21 @@ void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu)
* a deadlock where the interrupt context can wait indefinitely * a deadlock where the interrupt context can wait indefinitely
* for free slots in the queue. * for free slots in the queue.
*/ */
rc = qi_check_fault(iommu, index);
if (rc)
goto out;
spin_unlock(&qi->q_lock); spin_unlock(&qi->q_lock);
cpu_relax(); cpu_relax();
spin_lock(&qi->q_lock); spin_lock(&qi->q_lock);
} }
out:
qi->desc_status[index] = QI_DONE; qi->desc_status[index] = qi->desc_status[wait_index] = QI_DONE;
reclaim_free_desc(qi); reclaim_free_desc(qi);
spin_unlock_irqrestore(&qi->q_lock, flags); spin_unlock_irqrestore(&qi->q_lock, flags);
return rc;
} }
/* /*
@ -649,13 +694,13 @@ void qi_global_iec(struct intel_iommu *iommu)
desc.low = QI_IEC_TYPE; desc.low = QI_IEC_TYPE;
desc.high = 0; desc.high = 0;
/* should never fail */
qi_submit_sync(&desc, iommu); qi_submit_sync(&desc, iommu);
} }
int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
u64 type, int non_present_entry_flush) u64 type, int non_present_entry_flush)
{ {
struct qi_desc desc; struct qi_desc desc;
if (non_present_entry_flush) { if (non_present_entry_flush) {
@ -669,10 +714,7 @@ int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
| QI_CC_GRAN(type) | QI_CC_TYPE; | QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0; desc.high = 0;
qi_submit_sync(&desc, iommu); return qi_submit_sync(&desc, iommu);
return 0;
} }
int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@ -702,10 +744,7 @@ int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih) desc.high = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
| QI_IOTLB_AM(size_order); | QI_IOTLB_AM(size_order);
qi_submit_sync(&desc, iommu); return qi_submit_sync(&desc, iommu);
return 0;
} }
/* /*

View File

@ -207,7 +207,7 @@ int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
return index; return index;
} }
static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask) static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
{ {
struct qi_desc desc; struct qi_desc desc;
@ -215,7 +215,7 @@ static void qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
| QI_IEC_SELECTIVE; | QI_IEC_SELECTIVE;
desc.high = 0; desc.high = 0;
qi_submit_sync(&desc, iommu); return qi_submit_sync(&desc, iommu);
} }
int map_irq_to_irte_handle(int irq, u16 *sub_handle) int map_irq_to_irte_handle(int irq, u16 *sub_handle)
@ -283,6 +283,7 @@ int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index)
int modify_irte(int irq, struct irte *irte_modified) int modify_irte(int irq, struct irte *irte_modified)
{ {
int rc;
int index; int index;
struct irte *irte; struct irte *irte;
struct intel_iommu *iommu; struct intel_iommu *iommu;
@ -303,14 +304,15 @@ int modify_irte(int irq, struct irte *irte_modified)
set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1)); set_64bit((unsigned long *)irte, irte_modified->low | (1 << 1));
__iommu_flush_cache(iommu, irte, sizeof(*irte)); __iommu_flush_cache(iommu, irte, sizeof(*irte));
qi_flush_iec(iommu, index, 0); rc = qi_flush_iec(iommu, index, 0);
spin_unlock(&irq_2_ir_lock); spin_unlock(&irq_2_ir_lock);
return 0;
return rc;
} }
int flush_irte(int irq) int flush_irte(int irq)
{ {
int rc;
int index; int index;
struct intel_iommu *iommu; struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu; struct irq_2_iommu *irq_iommu;
@ -326,10 +328,10 @@ int flush_irte(int irq)
index = irq_iommu->irte_index + irq_iommu->sub_handle; index = irq_iommu->irte_index + irq_iommu->sub_handle;
qi_flush_iec(iommu, index, irq_iommu->irte_mask); rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
spin_unlock(&irq_2_ir_lock); spin_unlock(&irq_2_ir_lock);
return 0; return rc;
} }
struct intel_iommu *map_ioapic_to_ir(int apic) struct intel_iommu *map_ioapic_to_ir(int apic)
@ -355,6 +357,7 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
int free_irte(int irq) int free_irte(int irq)
{ {
int rc = 0;
int index, i; int index, i;
struct irte *irte; struct irte *irte;
struct intel_iommu *iommu; struct intel_iommu *iommu;
@ -375,7 +378,7 @@ int free_irte(int irq)
if (!irq_iommu->sub_handle) { if (!irq_iommu->sub_handle) {
for (i = 0; i < (1 << irq_iommu->irte_mask); i++) for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
set_64bit((unsigned long *)irte, 0); set_64bit((unsigned long *)irte, 0);
qi_flush_iec(iommu, index, irq_iommu->irte_mask); rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
} }
irq_iommu->iommu = NULL; irq_iommu->iommu = NULL;
@ -385,7 +388,7 @@ int free_irte(int irq)
spin_unlock(&irq_2_ir_lock); spin_unlock(&irq_2_ir_lock);
return 0; return rc;
} }
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)

View File

@ -194,6 +194,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
/* FSTS_REG */ /* FSTS_REG */
#define DMA_FSTS_PPF ((u32)2) #define DMA_FSTS_PPF ((u32)2)
#define DMA_FSTS_PFO ((u32)1) #define DMA_FSTS_PFO ((u32)1)
#define DMA_FSTS_IQE (1 << 4)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */ /* FRCD_REG, 32 bits access */
@ -328,7 +329,7 @@ extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type, unsigned int size_order, u64 type,
int non_present_entry_flush); int non_present_entry_flush);
extern void qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); extern void *intel_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t); extern void intel_free_coherent(struct device *, size_t, void *, dma_addr_t);