iommu: Add "iommu.strict" command line option

Add a generic command line option to enable lazy unmapping via IOVA
flush queues, which will initally be suuported by iommu-dma. This echoes
the semantics of "intel_iommu=strict" (albeit with the opposite default
value), but in the driver-agnostic fashion of "iommu.passthrough".

Signed-off-by: Zhen Lei <thunder.leizhen@huawei.com>
[rm: move handling out of SMMUv3 driver, clean up documentation]
Signed-off-by: Robin Murphy <robin.murphy@arm.com>
[will: dropped broken printk when parsing command-line option]
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Zhen Lei 2018-09-20 17:10:23 +01:00 committed by Will Deacon
parent 2da274cdf9
commit 68a6efe86f
2 changed files with 26 additions and 0 deletions

View File

@ -1749,6 +1749,18 @@
nobypass [PPC/POWERNV] nobypass [PPC/POWERNV]
Disable IOMMU bypass, using IOMMU for PCI devices. Disable IOMMU bypass, using IOMMU for PCI devices.
iommu.strict= [ARM64] Configure TLB invalidation behaviour
Format: { "0" | "1" }
0 - Lazy mode.
Request that DMA unmap operations use deferred
invalidation of hardware TLBs, for increased
throughput at the cost of reduced device isolation.
Will fall back to strict mode if not supported by
the relevant IOMMU driver.
1 - Strict mode (default).
DMA unmap operations invalidate IOMMU hardware TLBs
synchronously.
iommu.passthrough= iommu.passthrough=
[ARM64] Configure DMA to bypass the IOMMU by default. [ARM64] Configure DMA to bypass the IOMMU by default.
Format: { "0" | "1" } Format: { "0" | "1" }

View File

@ -41,6 +41,7 @@ static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_IDENTITY;
#else #else
static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA; static unsigned int iommu_def_domain_type = IOMMU_DOMAIN_DMA;
#endif #endif
static bool iommu_dma_strict __read_mostly = true;
struct iommu_callback_data { struct iommu_callback_data {
const struct iommu_ops *ops; const struct iommu_ops *ops;
@ -131,6 +132,12 @@ static int __init iommu_set_def_domain_type(char *str)
} }
early_param("iommu.passthrough", iommu_set_def_domain_type); early_param("iommu.passthrough", iommu_set_def_domain_type);
static int __init iommu_dma_setup(char *str)
{
return kstrtobool(str, &iommu_dma_strict);
}
early_param("iommu.strict", iommu_dma_setup);
static ssize_t iommu_group_attr_show(struct kobject *kobj, static ssize_t iommu_group_attr_show(struct kobject *kobj,
struct attribute *__attr, char *buf) struct attribute *__attr, char *buf)
{ {
@ -1072,6 +1079,13 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev)
group->default_domain = dom; group->default_domain = dom;
if (!group->domain) if (!group->domain)
group->domain = dom; group->domain = dom;
if (dom && !iommu_dma_strict) {
int attr = 1;
iommu_domain_set_attr(dom,
DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE,
&attr);
}
} }
ret = iommu_group_add_device(group, dev); ret = iommu_group_add_device(group, dev);