arch/c6x: add option to skip sync on DMA map and unmap
This change allows us to pass DMA_ATTR_SKIP_CPU_SYNC which allows us to avoid invoking cache line invalidation if the driver will just handle it later via a sync_for_cpu or sync_for_device call. Link: http://lkml.kernel.org/r/20161110113442.76501.7673.stgit@ahduyck-blue-test.jf.intel.com Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Acked-by: Mark Salter <msalter@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
8c16a2e209
commit
64c596b59c
|
@ -42,13 +42,16 @@ static dma_addr_t c6x_dma_map_page(struct device *dev, struct page *page,
|
|||
{
|
||||
dma_addr_t handle = virt_to_phys(page_address(page) + offset);
|
||||
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
c6x_dma_sync(handle, size, dir);
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
static void c6x_dma_unmap_page(struct device *dev, dma_addr_t handle,
|
||||
size_t size, enum dma_data_direction dir, unsigned long attrs)
|
||||
{
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
c6x_dma_sync(handle, size, dir);
|
||||
}
|
||||
|
||||
|
@ -60,6 +63,7 @@ static int c6x_dma_map_sg(struct device *dev, struct scatterlist *sglist,
|
|||
|
||||
for_each_sg(sglist, sg, nents, i) {
|
||||
sg->dma_address = sg_phys(sg);
|
||||
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
|
||||
c6x_dma_sync(sg->dma_address, sg->length, dir);
|
||||
}
|
||||
|
||||
|
@ -72,9 +76,11 @@ static void c6x_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
|
|||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
|
||||
return;
|
||||
|
||||
for_each_sg(sglist, sg, nents, i)
|
||||
c6x_dma_sync(sg_dma_address(sg), sg->length, dir);
|
||||
|
||||
}
|
||||
|
||||
static void c6x_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle,
|
||||
|
|
Loading…
Reference in New Issue