mtd: fsmc_nand: add missing DMA unmap to dma_xfer()

Make dma_xfer() do DMA unmapping itself and fix handling
of failure cases.

Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Vipin Kumar <vipin.kumar@st.com>
Cc: Tomasz Figa <t.figa@samsung.com>
Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Signed-off-by: Dan Williams <djbw@fb.com>
This commit is contained in:
Bartlomiej Zolnierkiewicz 2012-11-05 10:00:14 +00:00 committed by Vinod Koul
parent 522d974451
commit d1806a5c4d
1 changed files with 14 additions and 8 deletions

View File

@ -573,23 +573,22 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
dma_dev = chan->device;
dma_addr = dma_map_single(dma_dev->dev, buffer, len, direction);
flags |= DMA_COMPL_SKIP_SRC_UNMAP | DMA_COMPL_SKIP_DEST_UNMAP;
if (direction == DMA_TO_DEVICE) {
dma_src = dma_addr;
dma_dst = host->data_pa;
flags |= DMA_COMPL_SRC_UNMAP_SINGLE | DMA_COMPL_SKIP_DEST_UNMAP;
} else {
dma_src = host->data_pa;
dma_dst = dma_addr;
flags |= DMA_COMPL_DEST_UNMAP_SINGLE | DMA_COMPL_SKIP_SRC_UNMAP;
}
tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src,
len, flags);
if (!tx) {
dev_err(host->dev, "device_prep_dma_memcpy error\n");
dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
return -EIO;
ret = -EIO;
goto unmap_dma;
}
tx->callback = dma_complete;
@ -599,7 +598,7 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
ret = dma_submit_error(cookie);
if (ret) {
dev_err(host->dev, "dma_submit_error %d\n", cookie);
return ret;
goto unmap_dma;
}
dma_async_issue_pending(chan);
@ -610,10 +609,17 @@ static int dma_xfer(struct fsmc_nand_data *host, void *buffer, int len,
if (ret <= 0) {
chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
dev_err(host->dev, "wait_for_completion_timeout\n");
return ret ? ret : -ETIMEDOUT;
if (!ret)
ret = -ETIMEDOUT;
goto unmap_dma;
}
return 0;
ret = 0;
unmap_dma:
dma_unmap_single(dma_dev->dev, dma_addr, len, direction);
return ret;
}
/*