Merge remote-tracking branch 'spi/topic/s3c64xx' into spi-next

This commit is contained in:
Mark Brown 2013-06-26 16:21:04 +01:00
commit f4e975814e
1 changed files with 143 additions and 59 deletions

View File

@ -39,6 +39,7 @@
#endif
#define MAX_SPI_PORTS 3
#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
/* Registers and bit-fields */
@ -130,6 +131,7 @@
#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
#define RXBUSY (1<<2)
#define TXBUSY (1<<3)
@ -158,6 +160,7 @@ struct s3c64xx_spi_port_config {
int fifo_lvl_mask[MAX_SPI_PORTS];
int rx_lvl_offset;
int tx_st_done;
int quirks;
bool high_speed;
bool clk_from_cmu;
};
@ -205,6 +208,7 @@ struct s3c64xx_spi_driver_data {
struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
unsigned long gpios[4];
bool cs_gpio;
};
static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
@ -344,8 +348,12 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
{
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
/* Acquire DMA channels */
while (!acquire_dma(sdd))
/*
* If DMA resource was not available during
* probe, no need to continue with dma requests
* else Acquire DMA channels
*/
while (!is_polling(sdd) && !acquire_dma(sdd))
usleep_range(10000, 11000);
pm_runtime_get_sync(&sdd->pdev->dev);
@ -358,9 +366,12 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
/* Free DMA channels */
sdd->ops->release((enum dma_ch)sdd->rx_dma.ch, &s3c64xx_spi_dma_client);
sdd->ops->release((enum dma_ch)sdd->tx_dma.ch, &s3c64xx_spi_dma_client);
if (!is_polling(sdd)) {
sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
&s3c64xx_spi_dma_client);
sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
&s3c64xx_spi_dma_client);
}
pm_runtime_put(&sdd->pdev->dev);
return 0;
@ -464,8 +475,10 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
/* Free DMA channels */
dma_release_channel(sdd->rx_dma.ch);
dma_release_channel(sdd->tx_dma.ch);
if (!is_polling(sdd)) {
dma_release_channel(sdd->rx_dma.ch);
dma_release_channel(sdd->tx_dma.ch);
}
pm_runtime_put(&sdd->pdev->dev);
return 0;
@ -558,14 +571,40 @@ static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
/* Deselect the last toggled device */
cs = sdd->tgl_spi->controller_data;
gpio_set_value(cs->line,
spi->mode & SPI_CS_HIGH ? 0 : 1);
if (sdd->cs_gpio)
gpio_set_value(cs->line,
spi->mode & SPI_CS_HIGH ? 0 : 1);
}
sdd->tgl_spi = NULL;
}
cs = spi->controller_data;
gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
if (sdd->cs_gpio)
gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 1 : 0);
/* Start the signals */
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
}
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
int timeout_ms)
{
void __iomem *regs = sdd->regs;
unsigned long val = 1;
u32 status;
/* max fifo depth available */
u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
if (timeout_ms)
val = msecs_to_loops(timeout_ms);
do {
status = readl(regs + S3C64XX_SPI_STATUS);
} while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
/* return the actual received data length */
return RX_FIFO_LVL(status, sdd);
}
static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
@ -590,20 +629,19 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
} while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
}
if (!val)
return -EIO;
if (dma_mode) {
u32 status;
/*
* If the previous xfer was completed within timeout, then
* proceed further else return -EIO.
* DmaTx returns after simply writing data in the FIFO,
* w/o waiting for real transmission on the bus to finish.
* DmaRx returns only after Dma read data from FIFO which
* needs bus transmission to finish, so we don't worry if
* Xfer involved Rx(with or without Tx).
*/
if (xfer->rx_buf == NULL) {
if (val && !xfer->rx_buf) {
val = msecs_to_loops(10);
status = readl(regs + S3C64XX_SPI_STATUS);
while ((TX_FIFO_LVL(status, sdd)
@ -613,30 +651,54 @@ static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
status = readl(regs + S3C64XX_SPI_STATUS);
}
if (!val)
return -EIO;
}
/* If timed out while checking rx/tx status return error */
if (!val)
return -EIO;
} else {
int loops;
u32 cpy_len;
u8 *buf;
/* If it was only Tx */
if (xfer->rx_buf == NULL) {
if (!xfer->rx_buf) {
sdd->state &= ~TXBUSY;
return 0;
}
switch (sdd->cur_bpw) {
case 32:
ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
xfer->rx_buf, xfer->len / 4);
break;
case 16:
ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
xfer->rx_buf, xfer->len / 2);
break;
default:
ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
xfer->rx_buf, xfer->len);
break;
}
/*
* If the receive length is bigger than the controller fifo
* size, calculate the loops and read the fifo as many times.
* loops = length / max fifo size (calculated by using the
* fifo mask).
* For any size less than the fifo size the below code is
* executed atleast once.
*/
loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
buf = xfer->rx_buf;
do {
/* wait for data to be received in the fifo */
cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
(loops ? ms : 0));
switch (sdd->cur_bpw) {
case 32:
ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
buf, cpy_len / 4);
break;
case 16:
ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
buf, cpy_len / 2);
break;
default:
ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
buf, cpy_len);
break;
}
buf = buf + cpy_len;
} while (loops--);
sdd->state &= ~RXBUSY;
}
@ -651,7 +713,11 @@ static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
if (sdd->tgl_spi == spi)
sdd->tgl_spi = NULL;
gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
if (sdd->cs_gpio)
gpio_set_value(cs->line, spi->mode & SPI_CS_HIGH ? 0 : 1);
/* Quiese the signals */
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@ -733,7 +799,7 @@ static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
if (msg->is_dma_mapped)
if (is_polling(sdd) || msg->is_dma_mapped)
return 0;
/* First mark all xfer unmapped */
@ -782,7 +848,7 @@ static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
struct device *dev = &sdd->pdev->dev;
struct spi_transfer *xfer;
if (msg->is_dma_mapped)
if (is_polling(sdd) || msg->is_dma_mapped)
return;
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
@ -861,8 +927,9 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
/* Polling method for xfers not bigger than FIFO capacity */
use_dma = 0;
if (sdd->rx_dma.ch && sdd->tx_dma.ch &&
(xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1)))
if (!is_polling(sdd) &&
(sdd->rx_dma.ch && sdd->tx_dma.ch &&
(xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
use_dma = 1;
spin_lock_irqsave(&sdd->lock, flags);
@ -876,17 +943,10 @@ static int s3c64xx_spi_transfer_one_message(struct spi_master *master,
/* Slave Select */
enable_cs(sdd, spi);
/* Start the signals */
writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
spin_unlock_irqrestore(&sdd->lock, flags);
status = wait_for_xfer(sdd, xfer, use_dma);
/* Quiese the signals */
writel(S3C64XX_SPI_SLAVE_SIG_INACT,
sdd->regs + S3C64XX_SPI_SLAVE_SEL);
if (status) {
dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
@ -942,8 +1002,10 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
{
struct s3c64xx_spi_csinfo *cs;
struct device_node *slave_np, *data_np = NULL;
struct s3c64xx_spi_driver_data *sdd;
u32 fb_delay = 0;
sdd = spi_master_get_devdata(spi->master);
slave_np = spi->dev.of_node;
if (!slave_np) {
dev_err(&spi->dev, "device node not found\n");
@ -963,7 +1025,10 @@ static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
return ERR_PTR(-ENOMEM);
}
cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
/* The CS line is asserted/deasserted by the gpio pin */
if (sdd->cs_gpio)
cs->line = of_get_named_gpio(data_np, "cs-gpio", 0);
if (!gpio_is_valid(cs->line)) {
dev_err(&spi->dev, "chip select gpio is not specified or invalid\n");
kfree(cs);
@ -1003,7 +1068,8 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
return -ENODEV;
}
if (!spi_get_ctldata(spi)) {
/* Request gpio only if cs line is asserted by gpio pins */
if (sdd->cs_gpio) {
err = gpio_request_one(cs->line, GPIOF_OUT_INIT_HIGH,
dev_name(&spi->dev));
if (err) {
@ -1012,9 +1078,11 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
cs->line, err);
goto err_gpio_req;
}
spi_set_ctldata(spi, cs);
}
if (!spi_get_ctldata(spi))
spi_set_ctldata(spi, cs);
sci = sdd->cntrlr_info;
spin_lock_irqsave(&sdd->lock, flags);
@ -1092,8 +1160,10 @@ err_gpio_req:
static void s3c64xx_spi_cleanup(struct spi_device *spi)
{
struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
struct s3c64xx_spi_driver_data *sdd;
if (cs) {
sdd = spi_master_get_devdata(spi->master);
if (cs && sdd->cs_gpio) {
gpio_free(cs->line);
if (spi->dev.of_node)
kfree(cs);
@ -1270,7 +1340,11 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
sdd->cntrlr_info = sci;
sdd->pdev = pdev;
sdd->sfr_start = mem_res->start;
sdd->cs_gpio = true;
if (pdev->dev.of_node) {
if (!of_find_property(pdev->dev.of_node, "cs-gpio", NULL))
sdd->cs_gpio = false;
ret = of_alias_get_id(pdev->dev.of_node, "spi");
if (ret < 0) {
dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
@ -1287,19 +1361,19 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
if (!sdd->pdev->dev.of_node) {
res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
if (!res) {
dev_err(&pdev->dev, "Unable to get SPI tx dma "
"resource\n");
return -ENXIO;
}
sdd->tx_dma.dmach = res->start;
dev_warn(&pdev->dev, "Unable to get SPI tx dma "
"resource. Switching to poll mode\n");
sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
} else
sdd->tx_dma.dmach = res->start;
res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
if (!res) {
dev_err(&pdev->dev, "Unable to get SPI rx dma "
"resource\n");
return -ENXIO;
}
sdd->rx_dma.dmach = res->start;
dev_warn(&pdev->dev, "Unable to get SPI rx dma "
"resource. Switching to poll mode\n");
sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
} else
sdd->rx_dma.dmach = res->start;
}
sdd->tx_dma.direction = DMA_MEM_TO_DEV;
@ -1534,6 +1608,15 @@ static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
.clk_from_cmu = true,
};
static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
.fifo_lvl_mask = { 0x1ff },
.rx_lvl_offset = 15,
.tx_st_done = 25,
.high_speed = true,
.clk_from_cmu = true,
.quirks = S3C64XX_SPI_QUIRK_POLL,
};
static struct platform_device_id s3c64xx_spi_driver_ids[] = {
{
.name = "s3c2443-spi",
@ -1557,15 +1640,16 @@ static struct platform_device_id s3c64xx_spi_driver_ids[] = {
{ },
};
#ifdef CONFIG_OF
static const struct of_device_id s3c64xx_spi_dt_match[] = {
{ .compatible = "samsung,exynos4210-spi",
.data = (void *)&exynos4_spi_port_config,
},
{ .compatible = "samsung,exynos5440-spi",
.data = (void *)&exynos5440_spi_port_config,
},
{ },
};
MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
#endif /* CONFIG_OF */
static struct platform_driver s3c64xx_spi_driver = {
.driver = {