intel-iommu: Clean up handling of "caching mode" vs. context flushing.

It really doesn't make a lot of sense to have some of the logic to
handle caching vs. non-caching mode duplicated in qi_flush_context() and
__iommu_flush_context(), while the return value indicates whether the
caller should take other action which depends on the same thing.

Especially since qi_flush_context() thought it was returning something
entirely different anyway.

This patch makes qi_flush_context() and __iommu_flush_context() both
return void, removes the 'non_present_entry_flush' argument and makes
the only call site which _set_ that argument to 1 do the right thing.

Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
This commit is contained in:
David Woodhouse 2009-05-10 17:16:06 +01:00
parent fa3b6dcd52
commit 4c25a2c1b9
3 changed files with 28 additions and 45 deletions

View File

@ -723,23 +723,16 @@ void qi_global_iec(struct intel_iommu *iommu)
qi_submit_sync(&desc, iommu);
}
int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
u64 type, int non_present_entry_flush)
void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
u64 type)
{
struct qi_desc desc;
if (non_present_entry_flush) {
if (!cap_caching_mode(iommu->cap))
return 1;
else
did = 0;
}
desc.low = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did)
| QI_CC_GRAN(type) | QI_CC_TYPE;
desc.high = 0;
return qi_submit_sync(&desc, iommu);
qi_submit_sync(&desc, iommu);
}
int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,

View File

@ -857,26 +857,13 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
}
/* return value determine if we need a write buffer flush */
static int __iommu_flush_context(struct intel_iommu *iommu,
u16 did, u16 source_id, u8 function_mask, u64 type,
int non_present_entry_flush)
static void __iommu_flush_context(struct intel_iommu *iommu,
u16 did, u16 source_id, u8 function_mask,
u64 type)
{
u64 val = 0;
unsigned long flag;
/*
* In the non-present entry flush case, if hardware doesn't cache
* non-present entry we do nothing and if hardware cache non-present
* entry, we flush entries of domain 0 (the domain id is used to cache
* any non-present entries)
*/
if (non_present_entry_flush) {
if (!cap_caching_mode(iommu->cap))
return 1;
else
did = 0;
}
switch (type) {
case DMA_CCMD_GLOBAL_INVL:
val = DMA_CCMD_GLOBAL_INVL;
@ -901,9 +888,6 @@ static int __iommu_flush_context(struct intel_iommu *iommu,
dmar_readq, (!(val & DMA_CCMD_ICC)), val);
spin_unlock_irqrestore(&iommu->register_lock, flag);
/* flush context entry will implicitly flush write buffer */
return 0;
}
/* return value determine if we need a write buffer flush */
@ -1428,14 +1412,21 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context));
/* it's a non-present to present mapping */
if (iommu->flush.flush_context(iommu, id,
(((u16)bus) << 8) | devfn, DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL, 1))
iommu_flush_write_buffer(iommu);
else
/*
* It's a non-present to present mapping. If hardware doesn't cache
* non-present entry we only need to flush the write-buffer. If the
* _does_ cache non-present entries, then it does so in the special
* domain #0, which we have to flush:
*/
if (cap_caching_mode(iommu->cap)) {
iommu->flush.flush_context(iommu, 0,
(((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT,
DMA_CCMD_DEVICE_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_DSI_FLUSH, 0);
} else {
iommu_flush_write_buffer(iommu);
}
spin_unlock_irqrestore(&iommu->lock, flags);
spin_lock_irqsave(&domain->iommu_lock, flags);
@ -1566,7 +1557,7 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
clear_context_table(iommu, bus, devfn);
iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL, 0);
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
}
@ -2104,8 +2095,7 @@ static int __init init_dmars(void)
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL,
0);
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH,
0);
iommu_disable_protect_mem_regions(iommu);
@ -2721,7 +2711,7 @@ static int init_iommu_hw(void)
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL, 0);
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
iommu_disable_protect_mem_regions(iommu);
@ -2738,7 +2728,7 @@ static void iommu_flush_all(void)
for_each_active_iommu(iommu, drhd) {
iommu->flush.flush_context(iommu, 0, 0, 0,
DMA_CCMD_GLOBAL_INVL, 0);
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH, 0);
}

View File

@ -281,8 +281,8 @@ struct ir_table {
#endif
struct iommu_flush {
int (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
u64 type, int non_present_entry_flush);
void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
int (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type, int non_present_entry_flush);
};
@ -339,8 +339,8 @@ extern void dmar_disable_qi(struct intel_iommu *iommu);
extern int dmar_reenable_qi(struct intel_iommu *iommu);
extern void qi_global_iec(struct intel_iommu *iommu);
extern int qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type, int non_present_entry_flush);
extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern int qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type,
int non_present_entry_flush);