ALSA: hda: Remove page allocation redirection

The HD-audio core allocates and releases pages via driver's specific
dma_alloc_pages and dma_free_pages ops defined in bus->io_ops.  This
was because some platforms require the uncached pages and the handling
of page flags had to be done locally in the driver code.

Since the recent change in ALSA core memory allocator, we can simply
pass SNDRV_DMA_TYPE_DEV_UC for the uncached pages, and the only
difference became about this type to be passed to the core allocator.
That is, it's good time for cleaning up the mess.

This patch changes the allocation code in HD-audio core to call the
core allocator directly so that we get rid of dma_alloc_pages and
dma_free_pages io_ops.  If a driver needs the uncached pages, it has
to set bus->dma_type right after the bus initialization.

This is merely a code refactoring and shouldn't bring any behavior
changes.

Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
Takashi Iwai 2019-08-07 20:02:31 +02:00
parent 5f9e832c13
commit 619a1f195f
9 changed files with 21 additions and 94 deletions

View File

@ -264,11 +264,6 @@ struct hdac_io_ops {
u16 (*reg_readw)(u16 __iomem *addr);
void (*reg_writeb)(u8 value, u8 __iomem *addr);
u8 (*reg_readb)(u8 __iomem *addr);
/* Allocation ops */
int (*dma_alloc_pages)(struct hdac_bus *bus, int type, size_t size,
struct snd_dma_buffer *buf);
void (*dma_free_pages)(struct hdac_bus *bus,
struct snd_dma_buffer *buf);
};
#define HDA_UNSOL_QUEUE_SIZE 64
@ -344,6 +339,7 @@ struct hdac_bus {
/* CORB/RIRB and position buffers */
struct snd_dma_buffer rb;
struct snd_dma_buffer posbuf;
int dma_type; /* SNDRV_DMA_TYPE_XXX for CORB/RIRB */
/* hdac_stream linked list */
struct list_head stream_list;

View File

@ -47,17 +47,6 @@ static u8 hdac_ext_readb(u8 __iomem *addr)
return readb(addr);
}
static int hdac_ext_dma_alloc_pages(struct hdac_bus *bus, int type,
size_t size, struct snd_dma_buffer *buf)
{
return snd_dma_alloc_pages(type, bus->dev, size, buf);
}
static void hdac_ext_dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
{
snd_dma_free_pages(buf);
}
static const struct hdac_io_ops hdac_ext_default_io = {
.reg_writel = hdac_ext_writel,
.reg_readl = hdac_ext_readl,
@ -65,8 +54,6 @@ static const struct hdac_io_ops hdac_ext_default_io = {
.reg_readw = hdac_ext_readw,
.reg_writeb = hdac_ext_writeb,
.reg_readb = hdac_ext_readb,
.dma_alloc_pages = hdac_ext_dma_alloc_pages,
.dma_free_pages = hdac_ext_dma_free_pages,
};
/**

View File

@ -34,6 +34,7 @@ int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
else
bus->ops = &default_ops;
bus->io_ops = io_ops;
bus->dma_type = SNDRV_DMA_TYPE_DEV;
INIT_LIST_HEAD(&bus->stream_list);
INIT_LIST_HEAD(&bus->codec_list);
INIT_WORK(&bus->unsol_work, snd_hdac_bus_process_unsol_events);

View File

@ -575,12 +575,13 @@ int snd_hdac_bus_alloc_stream_pages(struct hdac_bus *bus)
{
struct hdac_stream *s;
int num_streams = 0;
int dma_type = bus->dma_type ? bus->dma_type : SNDRV_DMA_TYPE_DEV;
int err;
list_for_each_entry(s, &bus->stream_list, list) {
/* allocate memory for the BDL for each stream */
err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV,
BDL_SIZE, &s->bdl);
err = snd_dma_alloc_pages(dma_type, bus->dev,
BDL_SIZE, &s->bdl);
num_streams++;
if (err < 0)
return -ENOMEM;
@ -589,16 +590,15 @@ int snd_hdac_bus_alloc_stream_pages(struct hdac_bus *bus)
if (WARN_ON(!num_streams))
return -EINVAL;
/* allocate memory for the position buffer */
err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV,
num_streams * 8, &bus->posbuf);
err = snd_dma_alloc_pages(dma_type, bus->dev,
num_streams * 8, &bus->posbuf);
if (err < 0)
return -ENOMEM;
list_for_each_entry(s, &bus->stream_list, list)
s->posbuf = (__le32 *)(bus->posbuf.area + s->index * 8);
/* single page (at least 4096 bytes) must suffice for both ringbuffes */
return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV,
PAGE_SIZE, &bus->rb);
return snd_dma_alloc_pages(dma_type, bus->dev, PAGE_SIZE, &bus->rb);
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_alloc_stream_pages);
@ -612,12 +612,12 @@ void snd_hdac_bus_free_stream_pages(struct hdac_bus *bus)
list_for_each_entry(s, &bus->stream_list, list) {
if (s->bdl.area)
bus->io_ops->dma_free_pages(bus, &s->bdl);
snd_dma_free_pages(&s->bdl);
}
if (bus->rb.area)
bus->io_ops->dma_free_pages(bus, &bus->rb);
snd_dma_free_pages(&bus->rb);
if (bus->posbuf.area)
bus->io_ops->dma_free_pages(bus, &bus->posbuf);
snd_dma_free_pages(&bus->posbuf);
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_free_stream_pages);

View File

@ -680,8 +680,8 @@ int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
azx_dev->locked = true;
spin_unlock_irq(&bus->reg_lock);
err = bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV_SG,
byte_size, bufp);
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, bus->dev,
byte_size, bufp);
if (err < 0)
goto err_alloc;
@ -707,7 +707,7 @@ int snd_hdac_dsp_prepare(struct hdac_stream *azx_dev, unsigned int format,
return azx_dev->stream_tag;
error:
bus->io_ops->dma_free_pages(bus, bufp);
snd_dma_free_pages(bufp);
err_alloc:
spin_lock_irq(&bus->reg_lock);
azx_dev->locked = false;
@ -754,7 +754,7 @@ void snd_hdac_dsp_cleanup(struct hdac_stream *azx_dev,
azx_dev->period_bytes = 0;
azx_dev->format_val = 0;
bus->io_ops->dma_free_pages(bus, dmab);
snd_dma_free_pages(dmab);
dmab->area = NULL;
spin_lock_irq(&bus->reg_lock);

View File

@ -1694,6 +1694,10 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
return err;
}
/* use the non-cached pages in non-snoop mode */
if (!azx_snoop(chip))
azx_bus(chip)->dma_type = SNDRV_DMA_TYPE_DEV_UC;
/* Workaround for a communication error on CFL (bko#199007) and CNL */
if (IS_CFL(pci) || IS_CNL(pci))
azx_bus(chip)->polling_mode = 1;
@ -1979,24 +1983,6 @@ static int disable_msi_reset_irq(struct azx *chip)
return 0;
}
/* DMA page allocation helpers. */
static int dma_alloc_pages(struct hdac_bus *bus,
int type,
size_t size,
struct snd_dma_buffer *buf)
{
struct azx *chip = bus_to_azx(bus);
if (!azx_snoop(chip) && type == SNDRV_DMA_TYPE_DEV)
type = SNDRV_DMA_TYPE_DEV_UC;
return snd_dma_alloc_pages(type, bus->dev, size, buf);
}
static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
{
snd_dma_free_pages(buf);
}
static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
struct vm_area_struct *area)
{
@ -2015,8 +2001,6 @@ static const struct hdac_io_ops pci_hda_io_ops = {
.reg_readw = pci_azx_readw,
.reg_writeb = pci_azx_writeb,
.reg_readb = pci_azx_readb,
.dma_alloc_pages = dma_alloc_pages,
.dma_free_pages = dma_free_pages,
};
static const struct hda_controller_ops pci_hda_ops = {

View File

@ -75,20 +75,6 @@ MODULE_PARM_DESC(power_save,
#define power_save 0
#endif
/*
* DMA page allocation ops.
*/
static int dma_alloc_pages(struct hdac_bus *bus, int type, size_t size,
struct snd_dma_buffer *buf)
{
return snd_dma_alloc_pages(type, bus->dev, size, buf);
}
static void dma_free_pages(struct hdac_bus *bus, struct snd_dma_buffer *buf)
{
snd_dma_free_pages(buf);
}
/*
* Register access ops. Tegra HDA register access is DWORD only.
*/
@ -153,8 +139,6 @@ static const struct hdac_io_ops hda_tegra_io_ops = {
.reg_readw = hda_tegra_readw,
.reg_writeb = hda_tegra_writeb,
.reg_readb = hda_tegra_readb,
.dma_alloc_pages = dma_alloc_pages,
.dma_free_pages = dma_free_pages,
};
static const struct hda_controller_ops hda_tegra_ops; /* nothing special */

View File

@ -25,23 +25,12 @@
static int skl_alloc_dma_buf(struct device *dev,
struct snd_dma_buffer *dmab, size_t size)
{
struct hdac_bus *bus = dev_get_drvdata(dev);
if (!bus)
return -ENODEV;
return bus->io_ops->dma_alloc_pages(bus, SNDRV_DMA_TYPE_DEV, size, dmab);
return snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, dev, size, dmab);
}
static int skl_free_dma_buf(struct device *dev, struct snd_dma_buffer *dmab)
{
struct hdac_bus *bus = dev_get_drvdata(dev);
if (!bus)
return -ENODEV;
bus->io_ops->dma_free_pages(bus, dmab);
snd_dma_free_pages(dmab);
return 0;
}

View File

@ -51,18 +51,6 @@ static u8 sof_hda_readb(u8 __iomem *addr)
return readb(addr);
}
static int sof_hda_dma_alloc_pages(struct hdac_bus *bus, int type,
size_t size, struct snd_dma_buffer *buf)
{
return snd_dma_alloc_pages(type, bus->dev, size, buf);
}
static void sof_hda_dma_free_pages(struct hdac_bus *bus,
struct snd_dma_buffer *buf)
{
snd_dma_free_pages(buf);
}
static const struct hdac_io_ops io_ops = {
.reg_writel = sof_hda_writel,
.reg_readl = sof_hda_readl,
@ -70,8 +58,6 @@ static const struct hdac_io_ops io_ops = {
.reg_readw = sof_hda_readw,
.reg_writeb = sof_hda_writeb,
.reg_readb = sof_hda_readb,
.dma_alloc_pages = sof_hda_dma_alloc_pages,
.dma_free_pages = sof_hda_dma_free_pages,
};
/*