Merge branch 'dma' into devel

Conflicts:

	arch/arm/plat-mxc/dma-mx1-mx2.c
This commit is contained in:
Russell King 2009-02-21 21:42:50 +00:00 committed by Russell King
commit 22b61a11fd
13 changed files with 292 additions and 238 deletions

View File

@ -19,21 +19,17 @@
#include <asm/system.h>
#include <asm/scatterlist.h>
typedef unsigned int dmach_t;
#include <mach/isa-dma.h>
/*
* DMA modes
* The DMA modes reflect the settings for the ISA DMA controller
*/
typedef unsigned int dmamode_t;
#define DMA_MODE_MASK 0xcc
#define DMA_MODE_MASK 3
#define DMA_MODE_READ 0
#define DMA_MODE_WRITE 1
#define DMA_MODE_CASCADE 2
#define DMA_AUTOINIT 4
#define DMA_MODE_READ 0x44
#define DMA_MODE_WRITE 0x48
#define DMA_MODE_CASCADE 0xc0
#define DMA_AUTOINIT 0x10
extern spinlock_t dma_spin_lock;
@ -52,44 +48,44 @@ static inline void release_dma_lock(unsigned long flags)
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
*/
#define clear_dma_ff(channel)
#define clear_dma_ff(chan)
/* Set only the page register bits of the transfer address.
*
* NOTE: This is an architecture specific function, and should
* be hidden from the drivers
*/
extern void set_dma_page(dmach_t channel, char pagenr);
extern void set_dma_page(unsigned int chan, char pagenr);
/* Request a DMA channel
*
* Some architectures may need to do allocate an interrupt
*/
extern int request_dma(dmach_t channel, const char * device_id);
extern int request_dma(unsigned int chan, const char * device_id);
/* Free a DMA channel
*
* Some architectures may need to do free an interrupt
*/
extern void free_dma(dmach_t channel);
extern void free_dma(unsigned int chan);
/* Enable DMA for this channel
*
* On some architectures, this may have other side effects like
* enabling an interrupt and setting the DMA registers.
*/
extern void enable_dma(dmach_t channel);
extern void enable_dma(unsigned int chan);
/* Disable DMA for this channel
*
* On some architectures, this may have other side effects like
* disabling an interrupt or whatever.
*/
extern void disable_dma(dmach_t channel);
extern void disable_dma(unsigned int chan);
/* Test whether the specified channel has an active DMA transfer
*/
extern int dma_channel_active(dmach_t channel);
extern int dma_channel_active(unsigned int chan);
/* Set the DMA scatter gather list for this channel
*
@ -97,7 +93,7 @@ extern int dma_channel_active(dmach_t channel);
* especially since some DMA architectures don't update the
* DMA address immediately, but defer it to the enable_dma().
*/
extern void set_dma_sg(dmach_t channel, struct scatterlist *sg, int nr_sg);
extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
/* Set the DMA address for this channel
*
@ -105,9 +101,9 @@ extern void set_dma_sg(dmach_t channel, struct scatterlist *sg, int nr_sg);
* especially since some DMA architectures don't update the
* DMA address immediately, but defer it to the enable_dma().
*/
extern void __set_dma_addr(dmach_t channel, void *addr);
#define set_dma_addr(channel, addr) \
__set_dma_addr(channel, bus_to_virt(addr))
extern void __set_dma_addr(unsigned int chan, void *addr);
#define set_dma_addr(chan, addr) \
__set_dma_addr(chan, bus_to_virt(addr))
/* Set the DMA byte count for this channel
*
@ -115,7 +111,7 @@ extern void __set_dma_addr(dmach_t channel, void *addr);
* especially since some DMA architectures don't update the
* DMA count immediately, but defer it to the enable_dma().
*/
extern void set_dma_count(dmach_t channel, unsigned long count);
extern void set_dma_count(unsigned int chan, unsigned long count);
/* Set the transfer direction for this channel
*
@ -124,11 +120,11 @@ extern void set_dma_count(dmach_t channel, unsigned long count);
* DMA transfer direction immediately, but defer it to the
* enable_dma().
*/
extern void set_dma_mode(dmach_t channel, dmamode_t mode);
extern void set_dma_mode(unsigned int chan, unsigned int mode);
/* Set the transfer speed for this channel
*/
extern void set_dma_speed(dmach_t channel, int cycle_ns);
extern void set_dma_speed(unsigned int chan, int cycle_ns);
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
@ -136,7 +132,7 @@ extern void set_dma_speed(dmach_t channel, int cycle_ns);
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*/
extern int get_dma_residue(dmach_t channel);
extern int get_dma_residue(unsigned int chan);
#ifndef NO_DMA
#define NO_DMA 255

View File

@ -15,13 +15,13 @@ struct dma_struct;
typedef struct dma_struct dma_t;
struct dma_ops {
int (*request)(dmach_t, dma_t *); /* optional */
void (*free)(dmach_t, dma_t *); /* optional */
void (*enable)(dmach_t, dma_t *); /* mandatory */
void (*disable)(dmach_t, dma_t *); /* mandatory */
int (*residue)(dmach_t, dma_t *); /* optional */
int (*setspeed)(dmach_t, dma_t *, int); /* optional */
char *type;
int (*request)(unsigned int, dma_t *); /* optional */
void (*free)(unsigned int, dma_t *); /* optional */
void (*enable)(unsigned int, dma_t *); /* mandatory */
void (*disable)(unsigned int, dma_t *); /* mandatory */
int (*residue)(unsigned int, dma_t *); /* optional */
int (*setspeed)(unsigned int, dma_t *, int); /* optional */
const char *type;
};
struct dma_struct {
@ -34,24 +34,21 @@ struct dma_struct {
unsigned int active:1; /* Transfer active */
unsigned int invalid:1; /* Address/Count changed */
dmamode_t dma_mode; /* DMA mode */
unsigned int dma_mode; /* DMA mode */
int speed; /* DMA speed */
unsigned int lock; /* Device is allocated */
const char *device_id; /* Device name */
unsigned int dma_base; /* Controller base address */
int dma_irq; /* Controller IRQ */
struct scatterlist cur_sg; /* Current controller buffer */
unsigned int state;
struct dma_ops *d_ops;
const struct dma_ops *d_ops;
};
/* Prototype: void arch_dma_init(dma)
* Purpose : Initialise architecture specific DMA
* Params : dma - pointer to array of DMA structures
/*
* isa_dma_add - add an ISA-style DMA channel
*/
extern void arch_dma_init(dma_t *dma);
extern int isa_dma_add(unsigned int, dma_t *dma);
extern void isa_init_dma(dma_t *dma);
/*
* Add the ISA DMA controller. Always takes channels 0-7.
*/
extern void isa_init_dma(void);

View File

@ -24,11 +24,6 @@
#include <asm/dma.h>
#include <asm/mach/dma.h>
#define ISA_DMA_MODE_READ 0x44
#define ISA_DMA_MODE_WRITE 0x48
#define ISA_DMA_MODE_CASCADE 0xc0
#define ISA_DMA_AUTOINIT 0x10
#define ISA_DMA_MASK 0
#define ISA_DMA_MODE 1
#define ISA_DMA_CLRFF 2
@ -49,38 +44,35 @@ static unsigned int isa_dma_port[8][7] = {
{ 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce }
};
static int isa_get_dma_residue(dmach_t channel, dma_t *dma)
static int isa_get_dma_residue(unsigned int chan, dma_t *dma)
{
unsigned int io_port = isa_dma_port[channel][ISA_DMA_COUNT];
unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT];
int count;
count = 1 + inb(io_port);
count |= inb(io_port) << 8;
return channel < 4 ? count : (count << 1);
return chan < 4 ? count : (count << 1);
}
static void isa_enable_dma(dmach_t channel, dma_t *dma)
static void isa_enable_dma(unsigned int chan, dma_t *dma)
{
if (dma->invalid) {
unsigned long address, length;
unsigned int mode;
enum dma_data_direction direction;
mode = channel & 3;
mode = (chan & 3) | dma->dma_mode;
switch (dma->dma_mode & DMA_MODE_MASK) {
case DMA_MODE_READ:
mode |= ISA_DMA_MODE_READ;
direction = DMA_FROM_DEVICE;
break;
case DMA_MODE_WRITE:
mode |= ISA_DMA_MODE_WRITE;
direction = DMA_TO_DEVICE;
break;
case DMA_MODE_CASCADE:
mode |= ISA_DMA_MODE_CASCADE;
direction = DMA_BIDIRECTIONAL;
break;
@ -105,34 +97,31 @@ static void isa_enable_dma(dmach_t channel, dma_t *dma)
address = dma->buf.dma_address;
length = dma->buf.length - 1;
outb(address >> 16, isa_dma_port[channel][ISA_DMA_PGLO]);
outb(address >> 24, isa_dma_port[channel][ISA_DMA_PGHI]);
outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]);
outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]);
if (channel >= 4) {
if (chan >= 4) {
address >>= 1;
length >>= 1;
}
outb(0, isa_dma_port[channel][ISA_DMA_CLRFF]);
outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]);
outb(address, isa_dma_port[channel][ISA_DMA_ADDR]);
outb(address >> 8, isa_dma_port[channel][ISA_DMA_ADDR]);
outb(address, isa_dma_port[chan][ISA_DMA_ADDR]);
outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]);
outb(length, isa_dma_port[channel][ISA_DMA_COUNT]);
outb(length >> 8, isa_dma_port[channel][ISA_DMA_COUNT]);
outb(length, isa_dma_port[chan][ISA_DMA_COUNT]);
outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]);
if (dma->dma_mode & DMA_AUTOINIT)
mode |= ISA_DMA_AUTOINIT;
outb(mode, isa_dma_port[channel][ISA_DMA_MODE]);
outb(mode, isa_dma_port[chan][ISA_DMA_MODE]);
dma->invalid = 0;
}
outb(channel & 3, isa_dma_port[channel][ISA_DMA_MASK]);
outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]);
}
static void isa_disable_dma(dmach_t channel, dma_t *dma)
static void isa_disable_dma(unsigned int chan, dma_t *dma)
{
outb(channel | 4, isa_dma_port[channel][ISA_DMA_MASK]);
outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]);
}
static struct dma_ops isa_dma_ops = {
@ -160,7 +149,12 @@ static struct resource dma_resources[] = { {
.end = 0x048f
} };
void __init isa_init_dma(dma_t *dma)
static dma_t isa_dma[8];
/*
* ISA DMA always starts at channel 0
*/
void __init isa_init_dma(void)
{
/*
* Try to autodetect presence of an ISA DMA controller.
@ -178,11 +172,11 @@ void __init isa_init_dma(dma_t *dma)
outb(0xaa, 0x00);
if (inb(0) == 0x55 && inb(0) == 0xaa) {
int channel, i;
unsigned int chan, i;
for (channel = 0; channel < 8; channel++) {
dma[channel].d_ops = &isa_dma_ops;
isa_disable_dma(channel, NULL);
for (chan = 0; chan < 8; chan++) {
isa_dma[chan].d_ops = &isa_dma_ops;
isa_disable_dma(chan, NULL);
}
outb(0x40, 0x0b);
@ -217,5 +211,12 @@ void __init isa_init_dma(dma_t *dma)
for (i = 0; i < ARRAY_SIZE(dma_resources); i++)
request_resource(&ioport_resource, dma_resources + i);
for (chan = 0; chan < 8; chan++) {
int ret = isa_dma_add(chan, &isa_dma[chan]);
if (ret)
printk(KERN_ERR "ISADMA%u: unable to register: %d\n",
chan, ret);
}
}
}

View File

@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/scatterlist.h>
#include <asm/dma.h>
@ -23,19 +24,40 @@
DEFINE_SPINLOCK(dma_spin_lock);
EXPORT_SYMBOL(dma_spin_lock);
static dma_t dma_chan[MAX_DMA_CHANNELS];
static dma_t *dma_chan[MAX_DMA_CHANNELS];
static inline dma_t *dma_channel(unsigned int chan)
{
if (chan >= MAX_DMA_CHANNELS)
return NULL;
return dma_chan[chan];
}
int __init isa_dma_add(unsigned int chan, dma_t *dma)
{
if (!dma->d_ops)
return -EINVAL;
sg_init_table(&dma->buf, 1);
if (dma_chan[chan])
return -EBUSY;
dma_chan[chan] = dma;
return 0;
}
/*
* Request DMA channel
*
* On certain platforms, we have to allocate an interrupt as well...
*/
int request_dma(dmach_t channel, const char *device_id)
int request_dma(unsigned int chan, const char *device_id)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
int ret;
if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
if (!dma)
goto bad_dma;
if (xchg(&dma->lock, 1) != 0)
@ -47,7 +69,7 @@ int request_dma(dmach_t channel, const char *device_id)
ret = 0;
if (dma->d_ops->request)
ret = dma->d_ops->request(channel, dma);
ret = dma->d_ops->request(chan, dma);
if (ret)
xchg(&dma->lock, 0);
@ -55,7 +77,7 @@ int request_dma(dmach_t channel, const char *device_id)
return ret;
bad_dma:
printk(KERN_ERR "dma: trying to allocate DMA%d\n", channel);
printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan);
return -EINVAL;
busy:
@ -68,42 +90,42 @@ EXPORT_SYMBOL(request_dma);
*
* On certain platforms, we have to free interrupt as well...
*/
void free_dma(dmach_t channel)
void free_dma(unsigned int chan)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
if (channel >= MAX_DMA_CHANNELS || !dma->d_ops)
if (!dma)
goto bad_dma;
if (dma->active) {
printk(KERN_ERR "dma%d: freeing active DMA\n", channel);
dma->d_ops->disable(channel, dma);
printk(KERN_ERR "dma%d: freeing active DMA\n", chan);
dma->d_ops->disable(chan, dma);
dma->active = 0;
}
if (xchg(&dma->lock, 0) != 0) {
if (dma->d_ops->free)
dma->d_ops->free(channel, dma);
dma->d_ops->free(chan, dma);
return;
}
printk(KERN_ERR "dma%d: trying to free free DMA\n", channel);
printk(KERN_ERR "dma%d: trying to free free DMA\n", chan);
return;
bad_dma:
printk(KERN_ERR "dma: trying to free DMA%d\n", channel);
printk(KERN_ERR "dma: trying to free DMA%d\n", chan);
}
EXPORT_SYMBOL(free_dma);
/* Set DMA Scatter-Gather list
*/
void set_dma_sg (dmach_t channel, struct scatterlist *sg, int nr_sg)
void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
if (dma->active)
printk(KERN_ERR "dma%d: altering DMA SG while "
"DMA active\n", channel);
"DMA active\n", chan);
dma->sg = sg;
dma->sgcount = nr_sg;
@ -115,13 +137,13 @@ EXPORT_SYMBOL(set_dma_sg);
*
* Copy address to the structure, and set the invalid bit
*/
void __set_dma_addr (dmach_t channel, void *addr)
void __set_dma_addr (unsigned int chan, void *addr)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
if (dma->active)
printk(KERN_ERR "dma%d: altering DMA address while "
"DMA active\n", channel);
"DMA active\n", chan);
dma->sg = NULL;
dma->addr = addr;
@ -133,13 +155,13 @@ EXPORT_SYMBOL(__set_dma_addr);
*
* Copy address to the structure, and set the invalid bit
*/
void set_dma_count (dmach_t channel, unsigned long count)
void set_dma_count (unsigned int chan, unsigned long count)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
if (dma->active)
printk(KERN_ERR "dma%d: altering DMA count while "
"DMA active\n", channel);
"DMA active\n", chan);
dma->sg = NULL;
dma->count = count;
@ -149,13 +171,13 @@ EXPORT_SYMBOL(set_dma_count);
/* Set DMA direction mode
*/
void set_dma_mode (dmach_t channel, dmamode_t mode)
void set_dma_mode (unsigned int chan, unsigned int mode)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
if (dma->active)
printk(KERN_ERR "dma%d: altering DMA mode while "
"DMA active\n", channel);
"DMA active\n", chan);
dma->dma_mode = mode;
dma->invalid = 1;
@ -164,42 +186,42 @@ EXPORT_SYMBOL(set_dma_mode);
/* Enable DMA channel
*/
void enable_dma (dmach_t channel)
void enable_dma (unsigned int chan)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
if (!dma->lock)
goto free_dma;
if (dma->active == 0) {
dma->active = 1;
dma->d_ops->enable(channel, dma);
dma->d_ops->enable(chan, dma);
}
return;
free_dma:
printk(KERN_ERR "dma%d: trying to enable free DMA\n", channel);
printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan);
BUG();
}
EXPORT_SYMBOL(enable_dma);
/* Disable DMA channel
*/
void disable_dma (dmach_t channel)
void disable_dma (unsigned int chan)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
if (!dma->lock)
goto free_dma;
if (dma->active == 1) {
dma->active = 0;
dma->d_ops->disable(channel, dma);
dma->d_ops->disable(chan, dma);
}
return;
free_dma:
printk(KERN_ERR "dma%d: trying to disable free DMA\n", channel);
printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan);
BUG();
}
EXPORT_SYMBOL(disable_dma);
@ -207,45 +229,38 @@ EXPORT_SYMBOL(disable_dma);
/*
* Is the specified DMA channel active?
*/
int dma_channel_active(dmach_t channel)
int dma_channel_active(unsigned int chan)
{
return dma_chan[channel].active;
dma_t *dma = dma_channel(chan);
return dma->active;
}
EXPORT_SYMBOL(dma_channel_active);
void set_dma_page(dmach_t channel, char pagenr)
void set_dma_page(unsigned int chan, char pagenr)
{
printk(KERN_ERR "dma%d: trying to set_dma_page\n", channel);
printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan);
}
EXPORT_SYMBOL(set_dma_page);
void set_dma_speed(dmach_t channel, int cycle_ns)
void set_dma_speed(unsigned int chan, int cycle_ns)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
int ret = 0;
if (dma->d_ops->setspeed)
ret = dma->d_ops->setspeed(channel, dma, cycle_ns);
ret = dma->d_ops->setspeed(chan, dma, cycle_ns);
dma->speed = ret;
}
EXPORT_SYMBOL(set_dma_speed);
int get_dma_residue(dmach_t channel)
int get_dma_residue(unsigned int chan)
{
dma_t *dma = dma_chan + channel;
dma_t *dma = dma_channel(chan);
int ret = 0;
if (dma->d_ops->residue)
ret = dma->d_ops->residue(channel, dma);
ret = dma->d_ops->residue(chan, dma);
return ret;
}
EXPORT_SYMBOL(get_dma_residue);
static int __init init_dma(void)
{
arch_dma_init(dma_chan);
return 0;
}
core_initcall(init_dma);

View File

@ -21,16 +21,16 @@
#include <asm/hardware/dec21285.h>
#if 0
static int fb_dma_request(dmach_t channel, dma_t *dma)
static int fb_dma_request(unsigned int chan, dma_t *dma)
{
return -EINVAL;
}
static void fb_dma_enable(dmach_t channel, dma_t *dma)
static void fb_dma_enable(unsigned int chan, dma_t *dma)
{
}
static void fb_dma_disable(dmach_t channel, dma_t *dma)
static void fb_dma_disable(unsigned int chan, dma_t *dma)
{
}
@ -42,7 +42,7 @@ static struct dma_ops fb_dma_ops = {
};
#endif
void __init arch_dma_init(dma_t *dma)
static int __init fb_dma_init(void)
{
#if 0
dma[_DC21285_DMA(0)].d_ops = &fb_dma_ops;
@ -50,6 +50,8 @@ void __init arch_dma_init(dma_t *dma)
#endif
#ifdef CONFIG_ISA_DMA
if (footbridge_cfn_mode())
isa_init_dma(dma + _ISA_DMA(0));
isa_init_dma();
#endif
return 0;
}
core_initcall(fb_dma_init);

View File

@ -26,6 +26,16 @@
#include <asm/mach/dma.h>
#include <asm/hardware/iomd.h>
struct iomd_dma {
struct dma_struct dma;
unsigned int state;
unsigned long base; /* Controller base address */
int irq; /* Controller IRQ */
struct scatterlist cur_sg; /* Current controller buffer */
dma_addr_t dma_addr;
unsigned int dma_len;
};
#if 0
typedef enum {
dma_size_8 = 1,
@ -44,15 +54,15 @@ typedef enum {
#define CR (IOMD_IO0CR - IOMD_IO0CURA)
#define ST (IOMD_IO0ST - IOMD_IO0CURA)
static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma)
static void iomd_get_next_sg(struct scatterlist *sg, struct iomd_dma *idma)
{
unsigned long end, offset, flags = 0;
if (dma->sg) {
sg->dma_address = dma->sg->dma_address;
if (idma->dma.sg) {
sg->dma_address = idma->dma_addr;
offset = sg->dma_address & ~PAGE_MASK;
end = offset + dma->sg->length;
end = offset + idma->dma_len;
if (end > PAGE_SIZE)
end = PAGE_SIZE;
@ -62,15 +72,17 @@ static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma)
sg->length = end - TRANSFER_SIZE;
dma->sg->length -= end - offset;
dma->sg->dma_address += end - offset;
idma->dma_len -= end - offset;
idma->dma_addr += end - offset;
if (dma->sg->length == 0) {
if (dma->sgcount > 1) {
dma->sg++;
dma->sgcount--;
if (idma->dma_len == 0) {
if (idma->dma.sgcount > 1) {
idma->dma.sg = sg_next(idma->dma.sg);
idma->dma_addr = idma->dma.sg->dma_address;
idma->dma_len = idma->dma.sg->length;
idma->dma.sgcount--;
} else {
dma->sg = NULL;
idma->dma.sg = NULL;
flags |= DMA_END_S;
}
}
@ -85,8 +97,8 @@ static void iomd_get_next_sg(struct scatterlist *sg, dma_t *dma)
static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
{
dma_t *dma = (dma_t *)dev_id;
unsigned long base = dma->dma_base;
struct iomd_dma *idma = dev_id;
unsigned long base = idma->base;
do {
unsigned int status;
@ -95,93 +107,99 @@ static irqreturn_t iomd_dma_handle(int irq, void *dev_id)
if (!(status & DMA_ST_INT))
return IRQ_HANDLED;
if ((dma->state ^ status) & DMA_ST_AB)
iomd_get_next_sg(&dma->cur_sg, dma);
if ((idma->state ^ status) & DMA_ST_AB)
iomd_get_next_sg(&idma->cur_sg, idma);
switch (status & (DMA_ST_OFL | DMA_ST_AB)) {
case DMA_ST_OFL: /* OIA */
case DMA_ST_AB: /* .IB */
iomd_writel(dma->cur_sg.dma_address, base + CURA);
iomd_writel(dma->cur_sg.length, base + ENDA);
dma->state = DMA_ST_AB;
iomd_writel(idma->cur_sg.dma_address, base + CURA);
iomd_writel(idma->cur_sg.length, base + ENDA);
idma->state = DMA_ST_AB;
break;
case DMA_ST_OFL | DMA_ST_AB: /* OIB */
case 0: /* .IA */
iomd_writel(dma->cur_sg.dma_address, base + CURB);
iomd_writel(dma->cur_sg.length, base + ENDB);
dma->state = 0;
iomd_writel(idma->cur_sg.dma_address, base + CURB);
iomd_writel(idma->cur_sg.length, base + ENDB);
idma->state = 0;
break;
}
if (status & DMA_ST_OFL &&
dma->cur_sg.length == (DMA_END_S|DMA_END_L))
idma->cur_sg.length == (DMA_END_S|DMA_END_L))
break;
} while (1);
dma->state = ~DMA_ST_AB;
idma->state = ~DMA_ST_AB;
disable_irq(irq);
return IRQ_HANDLED;
}
static int iomd_request_dma(dmach_t channel, dma_t *dma)
static int iomd_request_dma(unsigned int chan, dma_t *dma)
{
return request_irq(dma->dma_irq, iomd_dma_handle,
IRQF_DISABLED, dma->device_id, dma);
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
return request_irq(idma->irq, iomd_dma_handle,
IRQF_DISABLED, idma->dma.device_id, idma);
}
static void iomd_free_dma(dmach_t channel, dma_t *dma)
static void iomd_free_dma(unsigned int chan, dma_t *dma)
{
free_irq(dma->dma_irq, dma);
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
free_irq(idma->irq, idma);
}
static void iomd_enable_dma(dmach_t channel, dma_t *dma)
static void iomd_enable_dma(unsigned int chan, dma_t *dma)
{
unsigned long dma_base = dma->dma_base;
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
unsigned long dma_base = idma->base;
unsigned int ctrl = TRANSFER_SIZE | DMA_CR_E;
if (dma->invalid) {
dma->invalid = 0;
if (idma->dma.invalid) {
idma->dma.invalid = 0;
/*
* Cope with ISA-style drivers which expect cache
* coherence.
*/
if (!dma->sg) {
dma->sg = &dma->buf;
dma->sgcount = 1;
dma->buf.length = dma->count;
dma->buf.dma_address = dma_map_single(NULL,
dma->addr, dma->count,
dma->dma_mode == DMA_MODE_READ ?
if (!idma->dma.sg) {
idma->dma.sg = &idma->dma.buf;
idma->dma.sgcount = 1;
idma->dma.buf.length = idma->dma.count;
idma->dma.buf.dma_address = dma_map_single(NULL,
idma->dma.addr, idma->dma.count,
idma->dma.dma_mode == DMA_MODE_READ ?
DMA_FROM_DEVICE : DMA_TO_DEVICE);
}
iomd_writeb(DMA_CR_C, dma_base + CR);
dma->state = DMA_ST_AB;
idma->state = DMA_ST_AB;
}
if (dma->dma_mode == DMA_MODE_READ)
if (idma->dma.dma_mode == DMA_MODE_READ)
ctrl |= DMA_CR_D;
iomd_writeb(ctrl, dma_base + CR);
enable_irq(dma->dma_irq);
enable_irq(idma->irq);
}
static void iomd_disable_dma(dmach_t channel, dma_t *dma)
static void iomd_disable_dma(unsigned int chan, dma_t *dma)
{
unsigned long dma_base = dma->dma_base;
struct iomd_dma *idma = container_of(dma, struct iomd_dma, dma);
unsigned long dma_base = idma->base;
unsigned long flags;
local_irq_save(flags);
if (dma->state != ~DMA_ST_AB)
disable_irq(dma->dma_irq);
if (idma->state != ~DMA_ST_AB)
disable_irq(idma->irq);
iomd_writeb(0, dma_base + CR);
local_irq_restore(flags);
}
static int iomd_set_dma_speed(dmach_t channel, dma_t *dma, int cycle)
static int iomd_set_dma_speed(unsigned int chan, dma_t *dma, int cycle)
{
int tcr, speed;
@ -197,7 +215,7 @@ static int iomd_set_dma_speed(dmach_t channel, dma_t *dma, int cycle)
tcr = iomd_readb(IOMD_DMATCR);
speed &= 3;
switch (channel) {
switch (chan) {
case DMA_0:
tcr = (tcr & ~0x03) | speed;
break;
@ -236,16 +254,22 @@ static struct fiq_handler fh = {
.name = "floppydma"
};
static void floppy_enable_dma(dmach_t channel, dma_t *dma)
struct floppy_dma {
struct dma_struct dma;
unsigned int fiq;
};
static void floppy_enable_dma(unsigned int chan, dma_t *dma)
{
struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
void *fiqhandler_start;
unsigned int fiqhandler_length;
struct pt_regs regs;
if (dma->sg)
if (fdma->dma.sg)
BUG();
if (dma->dma_mode == DMA_MODE_READ) {
if (fdma->dma.dma_mode == DMA_MODE_READ) {
extern unsigned char floppy_fiqin_start, floppy_fiqin_end;
fiqhandler_start = &floppy_fiqin_start;
fiqhandler_length = &floppy_fiqin_end - &floppy_fiqin_start;
@ -255,8 +279,8 @@ static void floppy_enable_dma(dmach_t channel, dma_t *dma)
fiqhandler_length = &floppy_fiqout_end - &floppy_fiqout_start;
}
regs.ARM_r9 = dma->count;
regs.ARM_r10 = (unsigned long)dma->addr;
regs.ARM_r9 = fdma->dma.count;
regs.ARM_r10 = (unsigned long)fdma->dma.addr;
regs.ARM_fp = (unsigned long)FLOPPYDMA_BASE;
if (claim_fiq(&fh)) {
@ -266,16 +290,17 @@ static void floppy_enable_dma(dmach_t channel, dma_t *dma)
set_fiq_handler(fiqhandler_start, fiqhandler_length);
set_fiq_regs(&regs);
enable_fiq(dma->dma_irq);
enable_fiq(fdma->fiq);
}
static void floppy_disable_dma(dmach_t channel, dma_t *dma)
static void floppy_disable_dma(unsigned int chan, dma_t *dma)
{
disable_fiq(dma->dma_irq);
struct floppy_dma *fdma = container_of(dma, struct floppy_dma, dma);
disable_fiq(fdma->fiq);
release_fiq(&fh);
}
static int floppy_get_residue(dmach_t channel, dma_t *dma)
static int floppy_get_residue(unsigned int chan, dma_t *dma)
{
struct pt_regs regs;
get_fiq_regs(&regs);
@ -292,7 +317,7 @@ static struct dma_ops floppy_dma_ops = {
/*
* This is virtual DMA - we don't need anything here.
*/
static void sound_enable_disable_dma(dmach_t channel, dma_t *dma)
static void sound_enable_disable_dma(unsigned int chan, dma_t *dma)
{
}
@ -302,8 +327,24 @@ static struct dma_ops sound_dma_ops = {
.disable = sound_enable_disable_dma,
};
void __init arch_dma_init(dma_t *dma)
static struct iomd_dma iomd_dma[6];
static struct floppy_dma floppy_dma = {
.dma = {
.d_ops = &floppy_dma_ops,
},
.fiq = FIQ_FLOPPYDATA,
};
static dma_t sound_dma = {
.d_ops = &sound_dma_ops,
};
static int __init rpc_dma_init(void)
{
unsigned int i;
int ret;
iomd_writeb(0, IOMD_IO0CR);
iomd_writeb(0, IOMD_IO1CR);
iomd_writeb(0, IOMD_IO2CR);
@ -311,31 +352,39 @@ void __init arch_dma_init(dma_t *dma)
iomd_writeb(0xa0, IOMD_DMATCR);
dma[DMA_0].dma_base = IOMD_IO0CURA;
dma[DMA_0].dma_irq = IRQ_DMA0;
dma[DMA_0].d_ops = &iomd_dma_ops;
dma[DMA_1].dma_base = IOMD_IO1CURA;
dma[DMA_1].dma_irq = IRQ_DMA1;
dma[DMA_1].d_ops = &iomd_dma_ops;
dma[DMA_2].dma_base = IOMD_IO2CURA;
dma[DMA_2].dma_irq = IRQ_DMA2;
dma[DMA_2].d_ops = &iomd_dma_ops;
dma[DMA_3].dma_base = IOMD_IO3CURA;
dma[DMA_3].dma_irq = IRQ_DMA3;
dma[DMA_3].d_ops = &iomd_dma_ops;
dma[DMA_S0].dma_base = IOMD_SD0CURA;
dma[DMA_S0].dma_irq = IRQ_DMAS0;
dma[DMA_S0].d_ops = &iomd_dma_ops;
dma[DMA_S1].dma_base = IOMD_SD1CURA;
dma[DMA_S1].dma_irq = IRQ_DMAS1;
dma[DMA_S1].d_ops = &iomd_dma_ops;
dma[DMA_VIRTUAL_FLOPPY].dma_irq = FIQ_FLOPPYDATA;
dma[DMA_VIRTUAL_FLOPPY].d_ops = &floppy_dma_ops;
dma[DMA_VIRTUAL_SOUND].d_ops = &sound_dma_ops;
/*
* Setup DMA channels 2,3 to be for podules
* and channels 0,1 for internal devices
*/
iomd_writeb(DMA_EXT_IO3|DMA_EXT_IO2, IOMD_DMAEXT);
iomd_dma[DMA_0].base = IOMD_IO0CURA;
iomd_dma[DMA_0].irq = IRQ_DMA0;
iomd_dma[DMA_1].base = IOMD_IO1CURA;
iomd_dma[DMA_1].irq = IRQ_DMA1;
iomd_dma[DMA_2].base = IOMD_IO2CURA;
iomd_dma[DMA_2].irq = IRQ_DMA2;
iomd_dma[DMA_3].base = IOMD_IO3CURA;
iomd_dma[DMA_3].irq = IRQ_DMA3;
iomd_dma[DMA_S0].base = IOMD_SD0CURA;
iomd_dma[DMA_S0].irq = IRQ_DMAS0;
iomd_dma[DMA_S1].base = IOMD_SD1CURA;
iomd_dma[DMA_S1].irq = IRQ_DMAS1;
for (i = DMA_0; i <= DMA_S1; i++) {
iomd_dma[i].dma.d_ops = &iomd_dma_ops;
ret = isa_dma_add(i, &iomd_dma[i].dma);
if (ret)
printk("IOMDDMA%u: unable to register: %d\n", i, ret);
}
ret = isa_dma_add(DMA_VIRTUAL_FLOPPY, &floppy_dma.dma);
if (ret)
printk("IOMDFLOPPY: unable to register: %d\n", ret);
ret = isa_dma_add(DMA_VIRTUAL_SOUND, &sound_dma);
if (ret)
printk("IOMDSOUND: unable to register: %d\n", ret);
return 0;
}
core_initcall(rpc_dma_init);

View File

@ -23,5 +23,7 @@
#define DMA_FLOPPY DMA_VIRTUAL_FLOPPY
#define IOMD_DMA_BOUNDARY (PAGE_SIZE - 1)
#endif /* _ASM_ARCH_DMA_H */

View File

@ -13,9 +13,11 @@
#include <asm/dma.h>
#include <asm/mach/dma.h>
void __init arch_dma_init(dma_t *dma)
static int __init shark_dma_init(void)
{
#ifdef CONFIG_ISA_DMA
isa_init_dma(dma);
isa_init_dma();
#endif
return 0;
}
core_initcall(shark_dma_init);

View File

@ -113,7 +113,7 @@ struct imx_dma_channel {
void (*err_handler) (int, void *, int errcode);
void (*prog_handler) (int, void *, struct scatterlist *);
void *data;
unsigned int dma_mode;
unsigned int dma_mode;
struct scatterlist *sg;
unsigned int resbytes;
int dma_num;

View File

@ -45,8 +45,6 @@ static const struct portinfo pata_icside_portinfo_v6_2 = {
.stepping = 6,
};
#define PATA_ICSIDE_MAX_SG 128
struct pata_icside_state {
void __iomem *irq_port;
void __iomem *ioc_base;
@ -57,7 +55,6 @@ struct pata_icside_state {
u8 disabled;
unsigned int speed[ATA_MAX_DEVICES];
} port[2];
struct scatterlist sg[PATA_ICSIDE_MAX_SG];
};
struct pata_icside_info {
@ -222,9 +219,7 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
{
struct ata_port *ap = qc->ap;
struct pata_icside_state *state = ap->host->private_data;
struct scatterlist *sg, *rsg = state->sg;
unsigned int write = qc->tf.flags & ATA_TFLAG_WRITE;
unsigned int si;
/*
* We are simplex; BUG if we try to fiddle with DMA
@ -232,21 +227,13 @@ static void pata_icside_bmdma_setup(struct ata_queued_cmd *qc)
*/
BUG_ON(dma_channel_active(state->dma));
/*
* Copy ATAs scattered sg list into a contiguous array of sg
*/
for_each_sg(qc->sg, sg, qc->n_elem, si) {
memcpy(rsg, sg, sizeof(*sg));
rsg++;
}
/*
* Route the DMA signals to the correct interface
*/
writeb(state->port[ap->port_no].port_sel, state->ioc_base);
set_dma_speed(state->dma, state->port[ap->port_no].speed[qc->dev->devno]);
set_dma_sg(state->dma, state->sg, rsg - state->sg);
set_dma_sg(state->dma, qc->sg, qc->n_elem);
set_dma_mode(state->dma, write ? DMA_MODE_WRITE : DMA_MODE_READ);
/* issue r/w command */
@ -306,8 +293,8 @@ static int icside_dma_init(struct pata_icside_info *info)
static struct scsi_host_template pata_icside_sht = {
ATA_BASE_SHT(DRV_NAME),
.sg_tablesize = PATA_ICSIDE_MAX_SG,
.dma_boundary = ~0, /* no dma boundaries */
.sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
};
static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)

View File

@ -390,7 +390,8 @@ static struct scsi_host_template cumanascsi2_template = {
.eh_abort_handler = fas216_eh_abort,
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
.sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "cumanascsi2",

View File

@ -508,7 +508,8 @@ static struct scsi_host_template eesox_template = {
.eh_abort_handler = fas216_eh_abort,
.can_queue = 1,
.this_id = 7,
.sg_tablesize = SG_ALL,
.sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
.proc_name = "eesox",

View File

@ -302,7 +302,8 @@ static struct scsi_host_template powertecscsi_template = {
.can_queue = 8,
.this_id = 7,
.sg_tablesize = SG_ALL,
.sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
.dma_boundary = IOMD_DMA_BOUNDARY,
.cmd_per_lun = 2,
.use_clustering = ENABLE_CLUSTERING,
.proc_name = "powertec",