dma-mapping: fix handling of dma-ranges for reserved memory (again)

[ Upstream commit a445e940ea ]

Daniele reported that issue previously fixed in c41f9ea998
("drivers: dma-coherent: Account dma_pfn_offset when used with device
tree") reappear shortly after 43fc509c3e ("dma-coherent: introduce
interface for default DMA pool") where fix was accidentally dropped.

Lets put fix back in place and respect dma-ranges for reserved memory.

Fixes: 43fc509c3e ("dma-coherent: introduce interface for default DMA pool")

Reported-by: Daniele Alessandrelli <daniele.alessandrelli@gmail.com>
Tested-by: Daniele Alessandrelli <daniele.alessandrelli@gmail.com>
Tested-by: Alexandre Torgue <alexandre.torgue@st.com>
Signed-off-by: Vladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
Vladimir Murzin 2019-10-30 10:13:13 +00:00 committed by Greg Kroah-Hartman
parent 686dd313a2
commit fee76d84ba
3 changed files with 12 additions and 10 deletions

View File

@ -35,7 +35,7 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
unsigned long attrs)
{
void *ret = dma_alloc_from_global_coherent(size, dma_handle);
void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
/*
* dma_alloc_from_global_coherent() may fail because:

View File

@ -162,7 +162,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
int dma_release_from_global_coherent(int order, void *vaddr);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
size_t size, int *ret);
@ -172,7 +172,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
static inline void *dma_alloc_from_global_coherent(ssize_t size,
static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle)
{
return NULL;

View File

@ -123,8 +123,9 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
return ret;
}
static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)
static void *__dma_alloc_from_coherent(struct device *dev,
struct dma_coherent_mem *mem,
ssize_t size, dma_addr_t *dma_handle)
{
int order = get_order(size);
unsigned long flags;
@ -143,7 +144,7 @@ static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem,
/*
* Memory was found in the coherent area.
*/
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT);
ret = mem->virt_base + (pageno << PAGE_SHIFT);
spin_unlock_irqrestore(&mem->spinlock, flags);
memset(ret, 0, size);
@ -175,17 +176,18 @@ int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
if (!mem)
return 0;
*ret = __dma_alloc_from_coherent(mem, size, dma_handle);
*ret = __dma_alloc_from_coherent(dev, mem, size, dma_handle);
return 1;
}
void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle)
void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle)
{
if (!dma_coherent_default_memory)
return NULL;
return __dma_alloc_from_coherent(dma_coherent_default_memory, size,
dma_handle);
return __dma_alloc_from_coherent(dev, dma_coherent_default_memory, size,
dma_handle);
}
static int __dma_release_from_coherent(struct dma_coherent_mem *mem,