Merge branch 'fixes' of git://git.infradead.org/users/vkoul/slave-dma

Pull slave-dmaengine fixes from Vinod Koul:
 "Here is the common fixes PULL for dmaengine.

  Dan has been working on fixing the build issues in bunch of drivers.
  Here we have one fixing s3c24xx-dma, along with fix from Russell on
  pl08x.  Also we have Kuninori rcar dma fixes.  The s3c24xx-dma which
  was added in last merge window missed updates to usage of DMA_COMPLETE
  so converting the last driver"

* 'fixes' of git://git.infradead.org/users/vkoul/slave-dma:
  dma: fix build breakage in s3c24xx-dma
  Fix pl08x warnings
  rcar-hpbdma: initialise plane information when halted
  rcar-hpbdma: fixup channel busy check for double plane
  rcar-hpbdma: add max transfer size
  dma: mmp_pdma: add missing platform_set_drvdata() in mmp_pdma_probe()
  dmaengine: s3c24xx-dma: use DMA_COMPLETE for dma completion status
This commit is contained in:
Linus Torvalds 2013-12-13 11:29:51 -08:00
commit f649350591
4 changed files with 14 additions and 33 deletions

View File

@ -1169,7 +1169,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
dma_descriptor_unmap(txd);
dma_descriptor_unmap(&vd->tx);
if (!txd->done)
pl08x_release_mux(plchan);

View File

@ -1017,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op)
}
}
platform_set_drvdata(op, pdev);
dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
return 0;
}

View File

@ -628,42 +628,13 @@ retry:
s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
}
static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
{
struct device *dev = txd->vd.tx.chan->device->dev;
struct s3c24xx_sg *dsg;
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
else {
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->src_addr, dsg->len,
DMA_TO_DEVICE);
}
}
if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_single(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
else
list_for_each_entry(dsg, &txd->dsg_list, node)
dma_unmap_page(dev, dsg->dst_addr, dsg->len,
DMA_FROM_DEVICE);
}
}
static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
{
struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
if (!s3cchan->slave)
s3c24xx_dma_unmap_buffers(txd);
dma_descriptor_unmap(&vd->tx);
s3c24xx_dma_free_txd(txd);
}
@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&s3cchan->vc.lock, flags);
ret = dma_cookie_status(chan, cookie, txstate);
if (ret == DMA_SUCCESS) {
if (ret == DMA_COMPLETE) {
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
return ret;
}

View File

@ -60,6 +60,7 @@
#define HPB_DMAE_DSTPR_DMSTP BIT(0)
/* DMA status register (DSTSR) bits */
#define HPB_DMAE_DSTSR_DQSTS BIT(2)
#define HPB_DMAE_DSTSR_DMSTS BIT(0)
/* DMA common registers */
@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan)
ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
chan->plane_idx = 0;
chan->first_desc = true;
}
static const struct hpb_dmae_slave_config *
@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
struct hpb_dmae_chan *chan = to_chan(schan);
u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS;
if (chan->xfer_mode == XFER_DOUBLE)
return dstsr & HPB_DMAE_DSTSR_DQSTS;
else
return dstsr & HPB_DMAE_DSTSR_DMSTS;
}
static int
@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
}
schan = &new_hpb_chan->shdma_chan;
schan->max_xfer_len = HPB_DMA_TCR_MAX;
shdma_chan_probe(sdev, schan, id);
if (pdev->id >= 0)