powerpc/dma: remove the iommu fallback for coherent allocations

All iommu capable platforms now always use the iommu code with the
internal bypass, so there is not need for this magic anymore.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Tested-by: Christian Zigotzky <chzigotzky@xenosoft.de>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Christoph Hellwig 2019-02-13 08:01:17 +01:00 committed by Michael Ellerman
parent 662acad406
commit e72849827a
2 changed files with 2 additions and 70 deletions

View File

@ -119,9 +119,6 @@ config GENERIC_HWEIGHT
bool
default y
config ARCH_HAS_DMA_SET_COHERENT_MASK
bool
config PPC
bool
default y
@ -130,7 +127,6 @@ config PPC
#
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_SET_COHERENT_MASK
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_GCOV_PROFILE_ALL

View File

@ -115,51 +115,6 @@ void __dma_nommu_free_coherent(struct device *dev, size_t size,
}
#endif /* !CONFIG_NOT_COHERENT_CACHE */
static void *dma_nommu_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
unsigned long attrs)
{
struct iommu_table *iommu;
/* The coherent mask may be smaller than the real mask, check if
* we can really use the direct ops
*/
if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
return __dma_nommu_alloc_coherent(dev, size, dma_handle,
flag, attrs);
/* Ok we can't ... do we have an iommu ? If not, fail */
iommu = get_iommu_table_base(dev);
if (!iommu)
return NULL;
/* Try to use the iommu */
return iommu_alloc_coherent(dev, iommu, size, dma_handle,
dev->coherent_dma_mask, flag,
dev_to_node(dev));
}
static void dma_nommu_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
unsigned long attrs)
{
struct iommu_table *iommu;
/* See comments in dma_nommu_alloc_coherent() */
if (dma_nommu_dma_supported(dev, dev->coherent_dma_mask))
return __dma_nommu_free_coherent(dev, size, vaddr, dma_handle,
attrs);
/* Maybe we used an iommu ... */
iommu = get_iommu_table_base(dev);
/* If we hit that we should have never allocated in the first
* place so how come we are freeing ?
*/
if (WARN_ON(!iommu))
return;
iommu_free_coherent(iommu, size, vaddr, dma_handle);
}
int dma_nommu_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
unsigned long attrs)
@ -262,8 +217,8 @@ static inline void dma_nommu_sync_single(struct device *dev,
#endif
const struct dma_map_ops dma_nommu_ops = {
.alloc = dma_nommu_alloc_coherent,
.free = dma_nommu_free_coherent,
.alloc = __dma_nommu_alloc_coherent,
.free = __dma_nommu_free_coherent,
.mmap = dma_nommu_mmap_coherent,
.map_sg = dma_nommu_map_sg,
.unmap_sg = dma_nommu_unmap_sg,
@ -280,25 +235,6 @@ const struct dma_map_ops dma_nommu_ops = {
};
EXPORT_SYMBOL(dma_nommu_ops);
int dma_set_coherent_mask(struct device *dev, u64 mask)
{
if (!dma_supported(dev, mask)) {
/*
* We need to special case the direct DMA ops which can
* support a fallback for coherent allocations. There
* is no dma_op->set_coherent_mask() so we have to do
* things the hard way:
*/
if (get_dma_ops(dev) != &dma_nommu_ops ||
get_iommu_table_base(dev) == NULL ||
!dma_iommu_dma_supported(dev, mask))
return -EIO;
}
dev->coherent_dma_mask = mask;
return 0;
}
EXPORT_SYMBOL(dma_set_coherent_mask);
int dma_set_mask(struct device *dev, u64 dma_mask)
{
if (ppc_md.dma_set_mask)