iommu/arm-smmu: Allow building as a module

By conditionally dropping support for the legacy binding and exporting
the newly introduced 'arm_smmu_impl_init()' function we can allow the
ARM SMMU driver to be built as a module.

Signed-off-by: Will Deacon <will@kernel.org>
Tested-by: John Garry <john.garry@huawei.com> # smmu v3
Reviewed-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Will Deacon 2019-12-19 12:03:51 +00:00 committed by Joerg Roedel
parent 7359572e1a
commit cd221bd24f
3 changed files with 47 additions and 22 deletions

View File

@ -356,7 +356,7 @@ config SPAPR_TCE_IOMMU
# ARM IOMMU support
config ARM_SMMU
bool "ARM Ltd. System MMU (SMMU) Support"
tristate "ARM Ltd. System MMU (SMMU) Support"
depends on (ARM64 || ARM) && MMU
select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE
@ -368,6 +368,18 @@ config ARM_SMMU
Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture.
config ARM_SMMU_LEGACY_DT_BINDINGS
bool "Support the legacy \"mmu-masters\" devicetree bindings"
depends on ARM_SMMU=y && OF
help
Support for the badly designed and deprecated "mmu-masters"
devicetree bindings. This allows some DMA masters to attach
to the SMMU but does not provide any support via the DMA API.
If you're lucky, you might be able to get VFIO up and running.
If you say Y here then you'll make me very sad. Instead, say N
and move your firmware to the utopian future that was 2016.
config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
bool "Default to disabling bypass on ARM SMMU v1 and v2"
depends on ARM_SMMU

View File

@ -14,7 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o
arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o

View File

@ -126,6 +126,12 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
return container_of(dom, struct arm_smmu_domain, domain);
}
static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops;
#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
static int arm_smmu_bus_init(struct iommu_ops *ops);
static struct device_node *dev_get_dev_node(struct device *dev)
{
if (dev_is_pci(dev)) {
@ -161,9 +167,6 @@ static int __find_legacy_master_phandle(struct device *dev, void *data)
return err == -ENOENT ? 0 : err;
}
static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops;
static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu)
{
@ -215,6 +218,27 @@ static int arm_smmu_register_legacy_master(struct device *dev,
return err;
}
/*
* With the legacy DT binding in play, we have no guarantees about
* probe order, but then we're also not doing default domains, so we can
* delay setting bus ops until we're sure every possible SMMU is ready,
* and that way ensure that no add_device() calls get missed.
*/
static int arm_smmu_legacy_bus_init(void)
{
if (using_legacy_binding)
return arm_smmu_bus_init(&arm_smmu_ops);
return 0;
}
device_initcall_sync(arm_smmu_legacy_bus_init);
#else
static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu)
{
return -ENODEV;
}
#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
{
int idx;
@ -1599,6 +1623,7 @@ static struct iommu_ops arm_smmu_ops = {
.get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
};
static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@ -1993,8 +2018,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
if (legacy_binding && !using_generic_binding) {
if (!using_legacy_binding)
pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n");
if (!using_legacy_binding) {
pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
}
using_legacy_binding = true;
} else if (!legacy_binding && !using_legacy_binding) {
using_generic_binding = true;
@ -2028,7 +2055,6 @@ static int arm_smmu_bus_init(struct iommu_ops *ops)
#endif
#ifdef CONFIG_PCI
if (!iommu_present(&pci_bus_type)) {
pci_request_acs();
err = bus_set_iommu(&pci_bus_type, ops);
if (err)
goto err_reset_amba_ops;
@ -2204,20 +2230,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return 0;
}
/*
* With the legacy DT binding in play, though, we have no guarantees about
* probe order, but then we're also not doing default domains, so we can
* delay setting bus ops until we're sure every possible SMMU is ready,
* and that way ensure that no add_device() calls get missed.
*/
static int arm_smmu_legacy_bus_init(void)
{
if (using_legacy_binding)
return arm_smmu_bus_init(&arm_smmu_ops);
return 0;
}
device_initcall_sync(arm_smmu_legacy_bus_init);
static int arm_smmu_device_remove(struct platform_device *pdev)
{
struct arm_smmu_device *smmu = platform_get_drvdata(pdev);