Merge branch 'spi-4.20' into spi-next

This commit is contained in:
Mark Brown 2018-10-21 17:00:14 +01:00
commit 4b51c747e4
No known key found for this signature in database
GPG Key ID: 24D68B725D5487D0
53 changed files with 3941 additions and 396 deletions

View File

@ -53,20 +53,8 @@ Required properties:
- clocks: Serial engine core clock needed by the device.
Qualcomm Technologies Inc. GENI Serial Engine based SPI Controller
Required properties:
- compatible: Must contain "qcom,geni-spi".
- reg: Must contain SPI register location and length.
- interrupts: Must contain SPI controller interrupts.
- clock-names: Must contain "se".
- clocks: Serial engine core clock needed by the device.
- spi-max-frequency: Specifies maximum SPI clock frequency, units - Hz.
- #address-cells: Must be <1> to define a chip select address on
the SPI bus.
- #size-cells: Must be <0>.
SPI slave nodes must be children of the SPI master node and conform to SPI bus
binding as described in Documentation/devicetree/bindings/spi/spi-bus.txt.
node binding is described in
Documentation/devicetree/bindings/spi/qcom,spi-geni-qcom.txt.
Example:
geniqup@8c0000 {
@ -103,17 +91,4 @@ Example:
pinctrl-1 = <&qup_1_uart_3_sleep>;
};
spi0: spi@a84000 {
compatible = "qcom,geni-spi";
reg = <0xa84000 0x4000>;
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "se";
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qup_1_spi_2_active>;
pinctrl-1 = <&qup_1_spi_2_sleep>;
spi-max-frequency = <19200000>;
#address-cells = <1>;
#size-cells = <0>;
};
}

View File

@ -0,0 +1,39 @@
GENI based Qualcomm Universal Peripheral (QUP) Serial Peripheral Interface (SPI)
The QUP v3 core is a GENI based AHB slave that provides a common data path
(an output FIFO and an input FIFO) for serial peripheral interface (SPI)
mini-core.
SPI in master mode supports up to 50MHz, up to four chip selects, programmable
data path from 4 bits to 32 bits and numerous protocol variants.
Required properties:
- compatible: Must contain "qcom,geni-spi".
- reg: Must contain SPI register location and length.
- interrupts: Must contain SPI controller interrupts.
- clock-names: Must contain "se".
- clocks: Serial engine core clock needed by the device.
- #address-cells: Must be <1> to define a chip select address on
the SPI bus.
- #size-cells: Must be <0>.
SPI Controller nodes must be child of GENI based Qualcomm Universal
Peripharal. Please refer GENI based QUP wrapper controller node bindings
described in Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt.
SPI slave nodes must be children of the SPI master node and conform to SPI bus
binding as described in Documentation/devicetree/bindings/spi/spi-bus.txt.
Example:
spi0: spi@a84000 {
compatible = "qcom,geni-spi";
reg = <0xa84000 0x4000>;
interrupts = <GIC_SPI 354 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "se";
clocks = <&clock_gcc GCC_QUPV3_WRAP0_S0_CLK>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&qup_1_spi_2_active>;
pinctrl-1 = <&qup_1_spi_2_sleep>;
#address-cells = <1>;
#size-cells = <0>;
};

View File

@ -0,0 +1,36 @@
Qualcomm Quad Serial Peripheral Interface (QSPI)
The QSPI controller allows SPI protocol communication in single, dual, or quad
wire transmission modes for read/write access to slaves such as NOR flash.
Required properties:
- compatible: An SoC specific identifier followed by "qcom,qspi-v1", such as
"qcom,sdm845-qspi", "qcom,qspi-v1"
- reg: Should contain the base register location and length.
- interrupts: Interrupt number used by the controller.
- clocks: Should contain the core and AHB clock.
- clock-names: Should be "core" for core clock and "iface" for AHB clock.
SPI slave nodes must be children of the SPI master node and can contain
properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
Example:
qspi: spi@88df000 {
compatible = "qcom,sdm845-qspi", "qcom,qspi-v1";
reg = <0x88df000 0x600>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "iface", "core";
clocks = <&gcc GCC_QSPI_CNOC_PERIPH_AHB_CLK>,
<&gcc GCC_QSPI_CORE_CLK>;
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-max-frequency = <25000000>;
spi-tx-bus-width = <2>;
spi-rx-bus-width = <2>;
};
};

View File

@ -2,7 +2,9 @@ Renesas MSIOF spi controller
Required properties:
- compatible : "renesas,msiof-r8a7743" (RZ/G1M)
"renesas,msiof-r8a7744" (RZ/G1N)
"renesas,msiof-r8a7745" (RZ/G1E)
"renesas,msiof-r8a774a1" (RZ/G2M)
"renesas,msiof-r8a7790" (R-Car H2)
"renesas,msiof-r8a7791" (R-Car M2-W)
"renesas,msiof-r8a7792" (R-Car V2H)
@ -11,10 +13,14 @@ Required properties:
"renesas,msiof-r8a7795" (R-Car H3)
"renesas,msiof-r8a7796" (R-Car M3-W)
"renesas,msiof-r8a77965" (R-Car M3-N)
"renesas,msiof-r8a77970" (R-Car V3M)
"renesas,msiof-r8a77980" (R-Car V3H)
"renesas,msiof-r8a77990" (R-Car E3)
"renesas,msiof-r8a77995" (R-Car D3)
"renesas,msiof-sh73a0" (SH-Mobile AG5)
"renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
"renesas,rcar-gen2-msiof" (generic R-Car Gen2 and RZ/G1 compatible device)
"renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
"renesas,rcar-gen3-msiof" (generic R-Car Gen3 and RZ/G2 compatible device)
"renesas,sh-msiof" (deprecated)
When compatible with the generic version, nodes

View File

@ -2,7 +2,7 @@ Synopsys DesignWare AMBA 2.0 Synchronous Serial Interface.
Required properties:
- compatible : "snps,dw-apb-ssi" or "mscc,<soc>-spi", where soc is "ocelot" or
"jaguar2"
"jaguar2", or "amazon,alpine-dw-apb-ssi"
- reg : The register base for the controller. For "mscc,<soc>-spi", a second
register set is required (named ICPU_CFG:SPI_MST)
- interrupts : One interrupt, used by the controller.

View File

@ -3,6 +3,7 @@
Required properties:
- compatible :
- "fsl,imx7ulp-spi" for LPSPI compatible with the one integrated on i.MX7ULP soc
- "fsl,imx8qxp-spi" for LPSPI compatible with the one integrated on i.MX8QXP soc
- reg : address and length of the lpspi master registers
- interrupts : lpspi interrupt
- clocks : lpspi clock specifier

View File

@ -0,0 +1,24 @@
PXA2xx SSP SPI Controller
Required properties:
- compatible: Must be "marvell,mmp2-ssp".
- reg: Offset and length of the device's register set.
- interrupts: Should be the interrupt number.
- clocks: Should contain a single entry describing the clock input.
- #address-cells: Number of cells required to define a chip select address.
- #size-cells: Should be zero.
Optional properties:
- cs-gpios: list of GPIO chip selects. See the SPI bus bindings,
Documentation/devicetree/bindings/spi/spi-bus.txt
Child nodes represent devices on the SPI bus
See ../spi/spi-bus.txt
Example:
ssp1: spi@d4035000 {
compatible = "marvell,mmp2-ssp";
reg = <0xd4035000 0x1000>;
clocks = <&soc_clocks MMP2_CLK_SSP0>;
interrupts = <0>;
};

View File

@ -3,7 +3,7 @@ Device tree configuration for Renesas RSPI/QSPI driver
Required properties:
- compatible : For Renesas Serial Peripheral Interface on legacy SH:
"renesas,rspi-<soctype>", "renesas,rspi" as fallback.
For Renesas Serial Peripheral Interface on RZ/A1H:
For Renesas Serial Peripheral Interface on RZ/A:
"renesas,rspi-<soctype>", "renesas,rspi-rz" as fallback.
For Quad Serial Peripheral Interface on R-Car Gen2 and
RZ/G1 devices:
@ -11,7 +11,9 @@ Required properties:
Examples with soctypes are:
- "renesas,rspi-sh7757" (SH)
- "renesas,rspi-r7s72100" (RZ/A1H)
- "renesas,rspi-r7s9210" (RZ/A2)
- "renesas,qspi-r8a7743" (RZ/G1M)
- "renesas,qspi-r8a7744" (RZ/G1N)
- "renesas,qspi-r8a7745" (RZ/G1E)
- "renesas,qspi-r8a7790" (R-Car H2)
- "renesas,qspi-r8a7791" (R-Car M2-W)

View File

@ -0,0 +1,32 @@
Binding for MTK SPI Slave controller
Required properties:
- compatible: should be one of the following.
- mediatek,mt2712-spi-slave: for mt2712 platforms
- reg: Address and length of the register set for the device.
- interrupts: Should contain spi interrupt.
- clocks: phandles to input clocks.
It's clock gate, and should be <&infracfg CLK_INFRA_AO_SPI1>.
- clock-names: should be "spi" for the clock gate.
Optional properties:
- assigned-clocks: it's mux clock, should be <&topckgen CLK_TOP_SPISLV_SEL>.
- assigned-clock-parents: parent of mux clock.
It's PLL, and should be one of the following.
- <&topckgen CLK_TOP_UNIVPLL1_D2>: specify parent clock 312MHZ.
It's the default one.
- <&topckgen CLK_TOP_UNIVPLL1_D4>: specify parent clock 156MHZ.
- <&topckgen CLK_TOP_UNIVPLL2_D4>: specify parent clock 104MHZ.
- <&topckgen CLK_TOP_UNIVPLL1_D8>: specify parent clock 78MHZ.
Example:
- SoC Specific Portion:
spis1: spi@10013000 {
compatible = "mediatek,mt2712-spi-slave";
reg = <0 0x10013000 0 0x100>;
interrupts = <GIC_SPI 283 IRQ_TYPE_LEVEL_LOW>;
clocks = <&infracfg CLK_INFRA_AO_SPI1>;
clock-names = "spi";
assigned-clocks = <&topckgen CLK_TOP_SPISLV_SEL>;
assigned-clock-parents = <&topckgen CLK_TOP_UNIVPLL1_D2>;
};

View File

@ -0,0 +1,26 @@
Spreadtrum SPI Controller
Required properties:
- compatible: Should be "sprd,sc9860-spi".
- reg: Offset and length of SPI controller register space.
- interrupts: Should contain SPI interrupt.
- clock-names: Should contain following entries:
"spi" for SPI clock,
"source" for SPI source (parent) clock,
"enable" for SPI module enable clock.
- clocks: List of clock input name strings sorted in the same order
as the clock-names property.
- #address-cells: The number of cells required to define a chip select
address on the SPI bus. Should be set to 1.
- #size-cells: Should be set to 0.
Example:
spi0: spi@70a00000{
compatible = "sprd,sc9860-spi";
reg = <0 0x70a00000 0 0x1000>;
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "spi", "source","enable";
clocks = <&clk_spi0>, <&ext_26m>, <&clk_ap_apb_gates 5>;
#address-cells = <1>;
#size-cells = <0>;
};

View File

@ -0,0 +1,44 @@
* STMicroelectronics Quad Serial Peripheral Interface(QSPI)
Required properties:
- compatible: should be "st,stm32f469-qspi"
- reg: the first contains the register location and length.
the second contains the memory mapping address and length
- reg-names: should contain the reg names "qspi" "qspi_mm"
- interrupts: should contain the interrupt for the device
- clocks: the phandle of the clock needed by the QSPI controller
- A pinctrl must be defined to set pins in mode of operation for QSPI transfer
Optional properties:
- resets: must contain the phandle to the reset controller.
A spi flash (NOR/NAND) must be a child of spi node and could have some
properties. Also see jedec,spi-nor.txt.
Required properties:
- reg: chip-Select number (QSPI controller may connect 2 flashes)
- spi-max-frequency: max frequency of spi bus
Optional property:
- spi-rx-bus-width: see ./spi-bus.txt for the description
Example:
qspi: spi@a0001000 {
compatible = "st,stm32f469-qspi";
reg = <0xa0001000 0x1000>, <0x90000000 0x10000000>;
reg-names = "qspi", "qspi_mm";
interrupts = <91>;
resets = <&rcc STM32F4_AHB3_RESET(QSPI)>;
clocks = <&rcc 0 STM32F4_AHB3_CLOCK(QSPI)>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_qspi0>;
flash@0 {
compatible = "jedec,spi-nor";
reg = <0>;
spi-rx-bus-width = <4>;
spi-max-frequency = <108000000>;
...
};
};

View File

@ -513,7 +513,7 @@ EXPORT_SYMBOL(geni_se_resources_on);
*/
int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
{
unsigned long freq = 0;
long freq = 0;
int i;
if (se->clk_perf_tbl) {
@ -529,7 +529,7 @@ int geni_se_clk_tbl_get(struct geni_se *se, unsigned long **tbl)
for (i = 0; i < MAX_CLK_PERF_LEVEL; i++) {
freq = clk_round_rate(se->clk, freq + 1);
if (!freq || freq == se->clk_perf_tbl[i - 1])
if (freq <= 0 || freq == se->clk_perf_tbl[i - 1])
break;
se->clk_perf_tbl[i] = freq;
}
@ -544,16 +544,17 @@ EXPORT_SYMBOL(geni_se_clk_tbl_get);
* @se: Pointer to the concerned serial engine.
* @req_freq: Requested clock frequency.
* @index: Index of the resultant frequency in the table.
* @res_freq: Resultant frequency which matches or is closer to the
* requested frequency.
* @res_freq: Resultant frequency of the source clock.
* @exact: Flag to indicate exact multiple requirement of the requested
* frequency.
*
* This function is called by the protocol drivers to determine the matching
* or exact multiple of the requested frequency, as provided by the serial
* engine clock in order to meet the performance requirements. If there is
* no matching or exact multiple of the requested frequency found, then it
* selects the closest floor frequency, if exact flag is not set.
* This function is called by the protocol drivers to determine the best match
* of the requested frequency as provided by the serial engine clock in order
* to meet the performance requirements.
*
* If we return success:
* - if @exact is true then @res_freq / <an_integer> == @req_freq
* - if @exact is false then @res_freq / <an_integer> <= @req_freq
*
* Return: 0 on success, standard Linux error codes on failure.
*/
@ -564,6 +565,9 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
unsigned long *tbl;
int num_clk_levels;
int i;
unsigned long best_delta;
unsigned long new_delta;
unsigned int divider;
num_clk_levels = geni_se_clk_tbl_get(se, &tbl);
if (num_clk_levels < 0)
@ -572,18 +576,21 @@ int geni_se_clk_freq_match(struct geni_se *se, unsigned long req_freq,
if (num_clk_levels == 0)
return -EINVAL;
*res_freq = 0;
best_delta = ULONG_MAX;
for (i = 0; i < num_clk_levels; i++) {
if (!(tbl[i] % req_freq)) {
divider = DIV_ROUND_UP(tbl[i], req_freq);
new_delta = req_freq - tbl[i] / divider;
if (new_delta < best_delta) {
/* We have a new best! */
*index = i;
*res_freq = tbl[i];
return 0;
}
if (!(*res_freq) || ((tbl[i] > *res_freq) &&
(tbl[i] < req_freq))) {
*index = i;
*res_freq = tbl[i];
/* If the new best is exact then we're done */
if (new_delta == 0)
return 0;
/* Record how close we got */
best_delta = new_delta;
}
}

View File

@ -129,7 +129,7 @@ config SPI_BCM63XX
config SPI_BCM63XX_HSSPI
tristate "Broadcom BCM63XX HS SPI controller driver"
depends on BCM63XX || COMPILE_TEST
depends on BCM63XX || ARCH_BCM_63XX || COMPILE_TEST
help
This enables support for the High Speed SPI controller present on
newer Broadcom BCM63XX SoCs.
@ -520,6 +520,12 @@ config SPI_RSPI
help
SPI driver for Renesas RSPI and QSPI blocks.
config SPI_QCOM_QSPI
tristate "QTI QSPI controller"
depends on ARCH_QCOM
help
QSPI(Quad SPI) driver for Qualcomm QSPI controller.
config SPI_QUP
tristate "Qualcomm SPI controller with QUP interface"
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
@ -533,6 +539,18 @@ config SPI_QUP
This driver can also be built as a module. If so, the module
will be called spi_qup.
config SPI_QCOM_GENI
tristate "Qualcomm GENI based SPI controller"
depends on QCOM_GENI_SE
help
This driver supports GENI serial engine based SPI controller in
master mode on the Qualcomm Technologies Inc.'s SoCs. If you say
yes to this option, support will be included for the built-in SPI
interface on the Qualcomm Technologies Inc.'s SoCs.
This driver can also be built as a module. If so, the module
will be called spi-geni-qcom.
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
depends on ARCH_S3C24XX
@ -596,6 +614,22 @@ config SPI_SIRF
help
SPI driver for CSR SiRFprimaII SoCs
config SPI_SLAVE_MT27XX
tristate "MediaTek SPI slave device"
depends on ARCH_MEDIATEK || COMPILE_TEST
depends on SPI_SLAVE
help
This selects the MediaTek(R) SPI slave device driver.
If you want to use MediaTek(R) SPI slave interface,
say Y or M here.If you are not sure, say N.
SPI slave drivers for Mediatek MT27XX series ARM SoCs.
config SPI_SPRD
tristate "Spreadtrum SPI controller"
depends on ARCH_SPRD || COMPILE_TEST
help
SPI driver for Spreadtrum SoCs.
config SPI_SPRD_ADI
tristate "Spreadtrum ADI controller"
depends on ARCH_SPRD || COMPILE_TEST
@ -613,6 +647,15 @@ config SPI_STM32
is not available, the driver automatically falls back to
PIO mode.
config SPI_STM32_QSPI
tristate "STMicroelectronics STM32 QUAD SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
depends on OF
help
This enables support for the Quad SPI controller in master mode.
This driver does not support generic SPI. The implementation only
supports spi-mem interface.
config SPI_ST_SSC4
tristate "STMicroelectronics SPI SSC-based driver"
depends on ARCH_STI || COMPILE_TEST

View File

@ -74,6 +74,8 @@ obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
spi-pxa2xx-platform-objs := spi-pxa2xx.o spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
obj-$(CONFIG_SPI_QCOM_GENI) += spi-geni-qcom.o
obj-$(CONFIG_SPI_QCOM_QSPI) += spi-qcom-qspi.o
obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
obj-$(CONFIG_SPI_RB4XX) += spi-rb4xx.o
@ -88,8 +90,11 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o
obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o
obj-$(CONFIG_SPI_STM32) += spi-stm32.o
obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o
obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o

View File

@ -1767,10 +1767,8 @@ static int atmel_spi_suspend(struct device *dev)
/* Stop the queue running */
ret = spi_master_suspend(master);
if (ret) {
dev_warn(dev, "cannot suspend master\n");
if (ret)
return ret;
}
if (!pm_runtime_suspended(dev))
atmel_spi_runtime_suspend(dev);
@ -1799,11 +1797,7 @@ static int atmel_spi_resume(struct device *dev)
}
/* Start the queue running */
ret = spi_master_resume(master);
if (ret)
dev_err(dev, "problem starting queue (%d)\n", ret);
return ret;
return spi_master_resume(master);
}
#endif

View File

@ -101,6 +101,7 @@ struct bcm63xx_hsspi {
struct platform_device *pdev;
struct clk *clk;
struct clk *pll_clk;
void __iomem *regs;
u8 __iomem *fifo;
@ -332,7 +333,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
struct resource *res_mem;
void __iomem *regs;
struct device *dev = &pdev->dev;
struct clk *clk;
struct clk *clk, *pll_clk = NULL;
int irq, ret;
u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
@ -358,7 +359,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
rate = clk_get_rate(clk);
if (!rate) {
struct clk *pll_clk = devm_clk_get(dev, "pll");
pll_clk = devm_clk_get(dev, "pll");
if (IS_ERR(pll_clk)) {
ret = PTR_ERR(pll_clk);
@ -373,19 +374,20 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
clk_disable_unprepare(pll_clk);
if (!rate) {
ret = -EINVAL;
goto out_disable_clk;
goto out_disable_pll_clk;
}
}
master = spi_alloc_master(&pdev->dev, sizeof(*bs));
if (!master) {
ret = -ENOMEM;
goto out_disable_clk;
goto out_disable_pll_clk;
}
bs = spi_master_get_devdata(master);
bs->pdev = pdev;
bs->clk = clk;
bs->pll_clk = pll_clk;
bs->regs = regs;
bs->speed_hz = rate;
bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
@ -440,6 +442,8 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
out_put_master:
spi_master_put(master);
out_disable_pll_clk:
clk_disable_unprepare(pll_clk);
out_disable_clk:
clk_disable_unprepare(clk);
return ret;
@ -453,6 +457,7 @@ static int bcm63xx_hsspi_remove(struct platform_device *pdev)
/* reset the hardware and block queue progress */
__raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
return 0;
@ -465,6 +470,7 @@ static int bcm63xx_hsspi_suspend(struct device *dev)
struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
spi_master_suspend(master);
clk_disable_unprepare(bs->pll_clk);
clk_disable_unprepare(bs->clk);
return 0;
@ -480,6 +486,12 @@ static int bcm63xx_hsspi_resume(struct device *dev)
if (ret)
return ret;
if (bs->pll_clk) {
ret = clk_prepare_enable(bs->pll_clk);
if (ret)
return ret;
}
spi_master_resume(master);
return 0;

View File

@ -208,13 +208,11 @@ static inline void clear_io_bits(void __iomem *addr, u32 bits)
static void davinci_spi_chipselect(struct spi_device *spi, int value)
{
struct davinci_spi *dspi;
struct davinci_spi_platform_data *pdata;
struct davinci_spi_config *spicfg = spi->controller_data;
u8 chip_sel = spi->chip_select;
u16 spidat1 = CS_DEFAULT;
dspi = spi_master_get_devdata(spi->master);
pdata = &dspi->pdata;
/* program delay transfers if tx_delay is non zero */
if (spicfg && spicfg->wdelay)
@ -232,7 +230,8 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
!(spi->mode & SPI_CS_HIGH));
} else {
if (value == BITBANG_CS_ACTIVE) {
spidat1 |= SPIDAT1_CSHOLD_MASK;
if (!(spi->mode & SPI_CS_WORD))
spidat1 |= SPIDAT1_CSHOLD_MASK;
spidat1 &= ~(0x1 << chip_sel);
}
}
@ -421,26 +420,17 @@ static int davinci_spi_setup(struct spi_device *spi)
{
int retval = 0;
struct davinci_spi *dspi;
struct davinci_spi_platform_data *pdata;
struct spi_master *master = spi->master;
struct device_node *np = spi->dev.of_node;
bool internal_cs = true;
dspi = spi_master_get_devdata(spi->master);
pdata = &dspi->pdata;
if (!(spi->mode & SPI_NO_CS)) {
if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
retval = gpio_direction_output(
spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
internal_cs = false;
} else if (pdata->chip_sel &&
spi->chip_select < pdata->num_chipselect &&
pdata->chip_sel[spi->chip_select] != SPI_INTERN_CS) {
spi->cs_gpio = pdata->chip_sel[spi->chip_select];
retval = gpio_direction_output(
spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
internal_cs = false;
}
if (retval) {
@ -449,8 +439,9 @@ static int davinci_spi_setup(struct spi_device *spi)
return retval;
}
if (internal_cs)
if (internal_cs) {
set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
}
}
if (spi->mode & SPI_READY)
@ -985,7 +976,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
dspi->prescaler_limit = pdata->prescaler_limit;
dspi->version = pdata->version;
dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_WORD;
if (dspi->version == SPI_VERSION_2)
dspi->bitbang.flags |= SPI_READY;

View File

@ -34,8 +34,9 @@ struct dw_spi_mmio {
};
#define MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL 0x24
#define OCELOT_IF_SI_OWNER_MASK GENMASK(5, 4)
#define OCELOT_IF_SI_OWNER_OFFSET 4
#define JAGUAR2_IF_SI_OWNER_OFFSET 6
#define MSCC_IF_SI_OWNER_MASK GENMASK(1, 0)
#define MSCC_IF_SI_OWNER_SISL 0
#define MSCC_IF_SI_OWNER_SIBM 1
#define MSCC_IF_SI_OWNER_SIMC 2
@ -76,7 +77,8 @@ static void dw_spi_mscc_set_cs(struct spi_device *spi, bool enable)
}
static int dw_spi_mscc_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
struct dw_spi_mmio *dwsmmio,
const char *cpu_syscon, u32 if_si_owner_offset)
{
struct dw_spi_mscc *dwsmscc;
struct resource *res;
@ -92,7 +94,7 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
return PTR_ERR(dwsmscc->spi_mst);
}
dwsmscc->syscon = syscon_regmap_lookup_by_compatible("mscc,ocelot-cpu-syscon");
dwsmscc->syscon = syscon_regmap_lookup_by_compatible(cpu_syscon);
if (IS_ERR(dwsmscc->syscon))
return PTR_ERR(dwsmscc->syscon);
@ -101,8 +103,8 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
/* Select the owner of the SI interface */
regmap_update_bits(dwsmscc->syscon, MSCC_CPU_SYSTEM_CTRL_GENERAL_CTRL,
OCELOT_IF_SI_OWNER_MASK,
MSCC_IF_SI_OWNER_SIMC << OCELOT_IF_SI_OWNER_OFFSET);
MSCC_IF_SI_OWNER_MASK << if_si_owner_offset,
MSCC_IF_SI_OWNER_SIMC << if_si_owner_offset);
dwsmmio->dws.set_cs = dw_spi_mscc_set_cs;
dwsmmio->priv = dwsmscc;
@ -110,6 +112,28 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
return 0;
}
static int dw_spi_mscc_ocelot_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
return dw_spi_mscc_init(pdev, dwsmmio, "mscc,ocelot-cpu-syscon",
OCELOT_IF_SI_OWNER_OFFSET);
}
static int dw_spi_mscc_jaguar2_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
return dw_spi_mscc_init(pdev, dwsmmio, "mscc,jaguar2-cpu-syscon",
JAGUAR2_IF_SI_OWNER_OFFSET);
}
static int dw_spi_alpine_init(struct platform_device *pdev,
struct dw_spi_mmio *dwsmmio)
{
dwsmmio->dws.cs_override = 1;
return 0;
}
static int dw_spi_mmio_probe(struct platform_device *pdev)
{
int (*init_func)(struct platform_device *pdev,
@ -212,7 +236,9 @@ static int dw_spi_mmio_remove(struct platform_device *pdev)
static const struct of_device_id dw_spi_mmio_of_match[] = {
{ .compatible = "snps,dw-apb-ssi", },
{ .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_init},
{ .compatible = "mscc,ocelot-spi", .data = dw_spi_mscc_ocelot_init},
{ .compatible = "mscc,jaguar2-spi", .data = dw_spi_mscc_jaguar2_init},
{ .compatible = "amazon,alpine-dw-apb-ssi", .data = dw_spi_alpine_init},
{ /* end of table */}
};
MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);

View File

@ -144,6 +144,8 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
if (!enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
else if (dws->cs_override)
dw_writel(dws, DW_SPI_SER, 0);
}
EXPORT_SYMBOL_GPL(dw_spi_set_cs);
@ -308,15 +310,10 @@ static int dw_spi_transfer_one(struct spi_controller *master,
dws->current_freq = transfer->speed_hz;
spi_set_clk(dws, chip->clk_div);
}
if (transfer->bits_per_word == 8) {
dws->n_bytes = 1;
dws->dma_width = 1;
} else if (transfer->bits_per_word == 16) {
dws->n_bytes = 2;
dws->dma_width = 2;
} else {
return -EINVAL;
}
dws->n_bytes = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
dws->dma_width = DIV_ROUND_UP(transfer->bits_per_word, BITS_PER_BYTE);
/* Default SPI mode is SCPOL = 0, SCPH = 0 */
cr0 = (transfer->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
@ -468,6 +465,10 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws)
dws->fifo_len = (fifo == 1) ? 0 : fifo;
dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
}
/* enable HW fixup for explicit CS deselect for Amazon's alpine chip */
if (dws->cs_override)
dw_writel(dws, DW_SPI_CS_OVERRIDE, 0xF);
}
int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
@ -496,7 +497,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
}
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
master->setup = dw_spi_setup;
@ -572,13 +573,8 @@ EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
int dw_spi_resume_host(struct dw_spi *dws)
{
int ret;
spi_hw_init(&dws->master->dev, dws);
ret = spi_controller_resume(dws->master);
if (ret)
dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
return ret;
return spi_controller_resume(dws->master);
}
EXPORT_SYMBOL_GPL(dw_spi_resume_host);

View File

@ -32,6 +32,7 @@
#define DW_SPI_IDR 0x58
#define DW_SPI_VERSION 0x5c
#define DW_SPI_DR 0x60
#define DW_SPI_CS_OVERRIDE 0xf4
/* Bit fields in CTRLR0 */
#define SPI_DFS_OFFSET 0
@ -109,6 +110,7 @@ struct dw_spi {
u32 fifo_len; /* depth of the FIFO buffer */
u32 max_freq; /* max bus freq supported */
int cs_override;
u32 reg_io_width; /* DR I/O width in bytes */
u16 bus_num;
u16 num_cs; /* supported slave numbers */

View File

@ -246,6 +246,19 @@ static int ep93xx_spi_read_write(struct spi_master *master)
return -EINPROGRESS;
}
static enum dma_transfer_direction
ep93xx_dma_data_to_trans_dir(enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
return DMA_MEM_TO_DEV;
case DMA_FROM_DEVICE:
return DMA_DEV_TO_MEM;
default:
return DMA_TRANS_NONE;
}
}
/**
* ep93xx_spi_dma_prepare() - prepares a DMA transfer
* @master: SPI master
@ -257,7 +270,7 @@ static int ep93xx_spi_read_write(struct spi_master *master)
*/
static struct dma_async_tx_descriptor *
ep93xx_spi_dma_prepare(struct spi_master *master,
enum dma_transfer_direction dir)
enum dma_data_direction dir)
{
struct ep93xx_spi *espi = spi_master_get_devdata(master);
struct spi_transfer *xfer = master->cur_msg->state;
@ -277,9 +290,9 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
memset(&conf, 0, sizeof(conf));
conf.direction = dir;
conf.direction = ep93xx_dma_data_to_trans_dir(dir);
if (dir == DMA_DEV_TO_MEM) {
if (dir == DMA_FROM_DEVICE) {
chan = espi->dma_rx;
buf = xfer->rx_buf;
sgt = &espi->rx_sgt;
@ -343,7 +356,8 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
if (!nents)
return ERR_PTR(-ENOMEM);
txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, conf.direction,
DMA_CTRL_ACK);
if (!txd) {
dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
return ERR_PTR(-ENOMEM);
@ -360,13 +374,13 @@ ep93xx_spi_dma_prepare(struct spi_master *master,
* unmapped.
*/
static void ep93xx_spi_dma_finish(struct spi_master *master,
enum dma_transfer_direction dir)
enum dma_data_direction dir)
{
struct ep93xx_spi *espi = spi_master_get_devdata(master);
struct dma_chan *chan;
struct sg_table *sgt;
if (dir == DMA_DEV_TO_MEM) {
if (dir == DMA_FROM_DEVICE) {
chan = espi->dma_rx;
sgt = &espi->rx_sgt;
} else {
@ -381,8 +395,8 @@ static void ep93xx_spi_dma_callback(void *callback_param)
{
struct spi_master *master = callback_param;
ep93xx_spi_dma_finish(master, DMA_MEM_TO_DEV);
ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
ep93xx_spi_dma_finish(master, DMA_TO_DEVICE);
ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
spi_finalize_current_transfer(master);
}
@ -392,15 +406,15 @@ static int ep93xx_spi_dma_transfer(struct spi_master *master)
struct ep93xx_spi *espi = spi_master_get_devdata(master);
struct dma_async_tx_descriptor *rxd, *txd;
rxd = ep93xx_spi_dma_prepare(master, DMA_DEV_TO_MEM);
rxd = ep93xx_spi_dma_prepare(master, DMA_FROM_DEVICE);
if (IS_ERR(rxd)) {
dev_err(&master->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
return PTR_ERR(rxd);
}
txd = ep93xx_spi_dma_prepare(master, DMA_MEM_TO_DEV);
txd = ep93xx_spi_dma_prepare(master, DMA_TO_DEVICE);
if (IS_ERR(txd)) {
ep93xx_spi_dma_finish(master, DMA_DEV_TO_MEM);
ep93xx_spi_dma_finish(master, DMA_FROM_DEVICE);
dev_err(&master->dev, "DMA TX failed: %ld\n", PTR_ERR(txd));
return PTR_ERR(txd);
}

View File

@ -798,10 +798,8 @@ static int of_fsl_espi_suspend(struct device *dev)
int ret;
ret = spi_master_suspend(master);
if (ret) {
dev_warn(dev, "cannot suspend master\n");
if (ret)
return ret;
}
return pm_runtime_force_suspend(dev);
}

View File

@ -276,7 +276,7 @@ static int fsl_lpspi_config(struct fsl_lpspi_data *fsl_lpspi)
fsl_lpspi_set_watermark(fsl_lpspi);
temp = CFGR1_PCSCFG | CFGR1_MASTER | CFGR1_NOSTALL;
temp = CFGR1_PCSCFG | CFGR1_MASTER;
if (fsl_lpspi->config.mode & SPI_CS_HIGH)
temp |= CFGR1_PCSPOL;
writel(temp, fsl_lpspi->base + IMX7ULP_CFGR1);

703
drivers/spi/spi-geni-qcom.c Normal file
View File

@ -0,0 +1,703 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/log2.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/qcom-geni-se.h>
#include <linux/spi/spi.h>
#include <linux/spinlock.h>
/* SPI SE specific registers and respective register fields */
#define SE_SPI_CPHA 0x224
#define CPHA BIT(0)
#define SE_SPI_LOOPBACK 0x22c
#define LOOPBACK_ENABLE 0x1
#define NORMAL_MODE 0x0
#define LOOPBACK_MSK GENMASK(1, 0)
#define SE_SPI_CPOL 0x230
#define CPOL BIT(2)
#define SE_SPI_DEMUX_OUTPUT_INV 0x24c
#define CS_DEMUX_OUTPUT_INV_MSK GENMASK(3, 0)
#define SE_SPI_DEMUX_SEL 0x250
#define CS_DEMUX_OUTPUT_SEL GENMASK(3, 0)
#define SE_SPI_TRANS_CFG 0x25c
#define CS_TOGGLE BIT(0)
#define SE_SPI_WORD_LEN 0x268
#define WORD_LEN_MSK GENMASK(9, 0)
#define MIN_WORD_LEN 4
#define SE_SPI_TX_TRANS_LEN 0x26c
#define SE_SPI_RX_TRANS_LEN 0x270
#define TRANS_LEN_MSK GENMASK(23, 0)
#define SE_SPI_PRE_POST_CMD_DLY 0x274
#define SE_SPI_DELAY_COUNTERS 0x278
#define SPI_INTER_WORDS_DELAY_MSK GENMASK(9, 0)
#define SPI_CS_CLK_DELAY_MSK GENMASK(19, 10)
#define SPI_CS_CLK_DELAY_SHFT 10
/* M_CMD OP codes for SPI */
#define SPI_TX_ONLY 1
#define SPI_RX_ONLY 2
#define SPI_FULL_DUPLEX 3
#define SPI_TX_RX 7
#define SPI_CS_ASSERT 8
#define SPI_CS_DEASSERT 9
#define SPI_SCK_ONLY 10
/* M_CMD params for SPI */
#define SPI_PRE_CMD_DELAY BIT(0)
#define TIMESTAMP_BEFORE BIT(1)
#define FRAGMENTATION BIT(2)
#define TIMESTAMP_AFTER BIT(3)
#define POST_CMD_DELAY BIT(4)
/* SPI M_COMMAND OPCODE */
enum spi_mcmd_code {
CMD_NONE,
CMD_XFER,
CMD_CS,
CMD_CANCEL,
};
struct spi_geni_master {
struct geni_se se;
struct device *dev;
u32 tx_fifo_depth;
u32 fifo_width_bits;
u32 tx_wm;
unsigned long cur_speed_hz;
unsigned int cur_bits_per_word;
unsigned int tx_rem_bytes;
unsigned int rx_rem_bytes;
const struct spi_transfer *cur_xfer;
struct completion xfer_done;
unsigned int oversampling;
spinlock_t lock;
unsigned int cur_mcmd;
int irq;
};
static void handle_fifo_timeout(struct spi_master *spi,
struct spi_message *msg);
static int get_spi_clk_cfg(unsigned int speed_hz,
struct spi_geni_master *mas,
unsigned int *clk_idx,
unsigned int *clk_div)
{
unsigned long sclk_freq;
unsigned int actual_hz;
struct geni_se *se = &mas->se;
int ret;
ret = geni_se_clk_freq_match(&mas->se,
speed_hz * mas->oversampling,
clk_idx, &sclk_freq, false);
if (ret) {
dev_err(mas->dev, "Failed(%d) to find src clk for %dHz\n",
ret, speed_hz);
return ret;
}
*clk_div = DIV_ROUND_UP(sclk_freq, mas->oversampling * speed_hz);
actual_hz = sclk_freq / (mas->oversampling * *clk_div);
dev_dbg(mas->dev, "req %u=>%u sclk %lu, idx %d, div %d\n", speed_hz,
actual_hz, sclk_freq, *clk_idx, *clk_div);
ret = clk_set_rate(se->clk, sclk_freq);
if (ret)
dev_err(mas->dev, "clk_set_rate failed %d\n", ret);
return ret;
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
{
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
struct spi_master *spi = dev_get_drvdata(mas->dev);
struct geni_se *se = &mas->se;
unsigned long timeout;
reinit_completion(&mas->xfer_done);
pm_runtime_get_sync(mas->dev);
if (!(slv->mode & SPI_CS_HIGH))
set_flag = !set_flag;
mas->cur_mcmd = CMD_CS;
if (set_flag)
geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
else
geni_se_setup_m_cmd(se, SPI_CS_DEASSERT, 0);
timeout = wait_for_completion_timeout(&mas->xfer_done, HZ);
if (!timeout)
handle_fifo_timeout(spi, NULL);
pm_runtime_put(mas->dev);
}
static void spi_setup_word_len(struct spi_geni_master *mas, u16 mode,
unsigned int bits_per_word)
{
unsigned int pack_words;
bool msb_first = (mode & SPI_LSB_FIRST) ? false : true;
struct geni_se *se = &mas->se;
u32 word_len;
word_len = readl(se->base + SE_SPI_WORD_LEN);
/*
* If bits_per_word isn't a byte aligned value, set the packing to be
* 1 SPI word per FIFO word.
*/
if (!(mas->fifo_width_bits % bits_per_word))
pack_words = mas->fifo_width_bits / bits_per_word;
else
pack_words = 1;
word_len &= ~WORD_LEN_MSK;
word_len |= ((bits_per_word - MIN_WORD_LEN) & WORD_LEN_MSK);
geni_se_config_packing(&mas->se, bits_per_word, pack_words, msb_first,
true, true);
writel(word_len, se->base + SE_SPI_WORD_LEN);
}
static int setup_fifo_params(struct spi_device *spi_slv,
struct spi_master *spi)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 loopback_cfg, cpol, cpha, demux_output_inv;
u32 demux_sel, clk_sel, m_clk_cfg, idx, div;
int ret;
loopback_cfg = readl(se->base + SE_SPI_LOOPBACK);
cpol = readl(se->base + SE_SPI_CPOL);
cpha = readl(se->base + SE_SPI_CPHA);
demux_output_inv = 0;
loopback_cfg &= ~LOOPBACK_MSK;
cpol &= ~CPOL;
cpha &= ~CPHA;
if (spi_slv->mode & SPI_LOOP)
loopback_cfg |= LOOPBACK_ENABLE;
if (spi_slv->mode & SPI_CPOL)
cpol |= CPOL;
if (spi_slv->mode & SPI_CPHA)
cpha |= CPHA;
if (spi_slv->mode & SPI_CS_HIGH)
demux_output_inv = BIT(spi_slv->chip_select);
demux_sel = spi_slv->chip_select;
mas->cur_speed_hz = spi_slv->max_speed_hz;
mas->cur_bits_per_word = spi_slv->bits_per_word;
ret = get_spi_clk_cfg(mas->cur_speed_hz, mas, &idx, &div);
if (ret) {
dev_err(mas->dev, "Err setting clks ret(%d) for %ld\n",
ret, mas->cur_speed_hz);
return ret;
}
clk_sel = idx & CLK_SEL_MSK;
m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
spi_setup_word_len(mas, spi_slv->mode, spi_slv->bits_per_word);
writel(loopback_cfg, se->base + SE_SPI_LOOPBACK);
writel(demux_sel, se->base + SE_SPI_DEMUX_SEL);
writel(cpha, se->base + SE_SPI_CPHA);
writel(cpol, se->base + SE_SPI_CPOL);
writel(demux_output_inv, se->base + SE_SPI_DEMUX_OUTPUT_INV);
writel(clk_sel, se->base + SE_GENI_CLK_SEL);
writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
return 0;
}
static int spi_geni_prepare_message(struct spi_master *spi,
struct spi_message *spi_msg)
{
int ret;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
struct geni_se *se = &mas->se;
geni_se_select_mode(se, GENI_SE_FIFO);
reinit_completion(&mas->xfer_done);
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
return ret;
}
static int spi_geni_init(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
unsigned int proto, major, minor, ver;
pm_runtime_get_sync(mas->dev);
proto = geni_se_read_proto(se);
if (proto != GENI_SE_SPI) {
dev_err(mas->dev, "Invalid proto %d\n", proto);
pm_runtime_put(mas->dev);
return -ENXIO;
}
mas->tx_fifo_depth = geni_se_get_tx_fifo_depth(se);
/* Width of Tx and Rx FIFO is same */
mas->fifo_width_bits = geni_se_get_tx_fifo_width(se);
/*
* Hardware programming guide suggests to configure
* RX FIFO RFR level to fifo_depth-2.
*/
geni_se_init(se, 0x0, mas->tx_fifo_depth - 2);
/* Transmit an entire FIFO worth of data per IRQ */
mas->tx_wm = 1;
ver = geni_se_get_qup_hw_version(se);
major = GENI_SE_VERSION_MAJOR(ver);
minor = GENI_SE_VERSION_MINOR(ver);
if (major == 1 && minor == 0)
mas->oversampling = 2;
else
mas->oversampling = 1;
pm_runtime_put(mas->dev);
return 0;
}
static void setup_fifo_xfer(struct spi_transfer *xfer,
struct spi_geni_master *mas,
u16 mode, struct spi_master *spi)
{
u32 m_cmd = 0;
u32 spi_tx_cfg, len;
struct geni_se *se = &mas->se;
spi_tx_cfg = readl(se->base + SE_SPI_TRANS_CFG);
if (xfer->bits_per_word != mas->cur_bits_per_word) {
spi_setup_word_len(mas, mode, xfer->bits_per_word);
mas->cur_bits_per_word = xfer->bits_per_word;
}
/* Speed and bits per word can be overridden per transfer */
if (xfer->speed_hz != mas->cur_speed_hz) {
int ret;
u32 clk_sel, m_clk_cfg;
unsigned int idx, div;
ret = get_spi_clk_cfg(xfer->speed_hz, mas, &idx, &div);
if (ret) {
dev_err(mas->dev, "Err setting clks:%d\n", ret);
return;
}
/*
* SPI core clock gets configured with the requested frequency
* or the frequency closer to the requested frequency.
* For that reason requested frequency is stored in the
* cur_speed_hz and referred in the consecutive transfer instead
* of calling clk_get_rate() API.
*/
mas->cur_speed_hz = xfer->speed_hz;
clk_sel = idx & CLK_SEL_MSK;
m_clk_cfg = (div << CLK_DIV_SHFT) | SER_CLK_EN;
writel(clk_sel, se->base + SE_GENI_CLK_SEL);
writel(m_clk_cfg, se->base + GENI_SER_M_CLK_CFG);
}
mas->tx_rem_bytes = 0;
mas->rx_rem_bytes = 0;
if (xfer->tx_buf && xfer->rx_buf)
m_cmd = SPI_FULL_DUPLEX;
else if (xfer->tx_buf)
m_cmd = SPI_TX_ONLY;
else if (xfer->rx_buf)
m_cmd = SPI_RX_ONLY;
spi_tx_cfg &= ~CS_TOGGLE;
if (!(mas->cur_bits_per_word % MIN_WORD_LEN))
len = xfer->len * BITS_PER_BYTE / mas->cur_bits_per_word;
else
len = xfer->len / (mas->cur_bits_per_word / BITS_PER_BYTE + 1);
len &= TRANS_LEN_MSK;
mas->cur_xfer = xfer;
if (m_cmd & SPI_TX_ONLY) {
mas->tx_rem_bytes = xfer->len;
writel(len, se->base + SE_SPI_TX_TRANS_LEN);
}
if (m_cmd & SPI_RX_ONLY) {
writel(len, se->base + SE_SPI_RX_TRANS_LEN);
mas->rx_rem_bytes = xfer->len;
}
writel(spi_tx_cfg, se->base + SE_SPI_TRANS_CFG);
mas->cur_mcmd = CMD_XFER;
geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
/*
* TX_WATERMARK_REG should be set after SPI configuration and
* setting up GENI SE engine, as driver starts data transfer
* for the watermark interrupt.
*/
if (m_cmd & SPI_TX_ONLY)
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
}
static void handle_fifo_timeout(struct spi_master *spi,
struct spi_message *msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
unsigned long time_left, flags;
struct geni_se *se = &mas->se;
spin_lock_irqsave(&mas->lock, flags);
reinit_completion(&mas->xfer_done);
mas->cur_mcmd = CMD_CANCEL;
geni_se_cancel_m_cmd(se);
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
spin_unlock_irqrestore(&mas->lock, flags);
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
if (time_left)
return;
spin_lock_irqsave(&mas->lock, flags);
reinit_completion(&mas->xfer_done);
geni_se_abort_m_cmd(se);
spin_unlock_irqrestore(&mas->lock, flags);
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
if (!time_left)
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
}
static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_device *slv,
struct spi_transfer *xfer)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
/* Terminate and return success for 0 byte length transfer */
if (!xfer->len)
return 0;
setup_fifo_xfer(xfer, mas, slv->mode, spi);
return 1;
}
static unsigned int geni_byte_per_fifo_word(struct spi_geni_master *mas)
{
/*
* Calculate how many bytes we'll put in each FIFO word. If the
* transfer words don't pack cleanly into a FIFO word we'll just put
* one transfer word in each FIFO word. If they do pack we'll pack 'em.
*/
if (mas->fifo_width_bits % mas->cur_bits_per_word)
return roundup_pow_of_two(DIV_ROUND_UP(mas->cur_bits_per_word,
BITS_PER_BYTE));
return mas->fifo_width_bits / BITS_PER_BYTE;
}
static void geni_spi_handle_tx(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
unsigned int max_bytes;
const u8 *tx_buf;
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
if (mas->tx_rem_bytes < max_bytes)
max_bytes = mas->tx_rem_bytes;
tx_buf = mas->cur_xfer->tx_buf + mas->cur_xfer->len - mas->tx_rem_bytes;
while (i < max_bytes) {
unsigned int j;
unsigned int bytes_to_write;
u32 fifo_word = 0;
u8 *fifo_byte = (u8 *)&fifo_word;
bytes_to_write = min(bytes_per_fifo_word, max_bytes - i);
for (j = 0; j < bytes_to_write; j++)
fifo_byte[j] = tx_buf[i++];
iowrite32_rep(se->base + SE_GENI_TX_FIFOn, &fifo_word, 1);
}
mas->tx_rem_bytes -= max_bytes;
if (!mas->tx_rem_bytes)
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
}
static void geni_spi_handle_rx(struct spi_geni_master *mas)
{
struct geni_se *se = &mas->se;
u32 rx_fifo_status;
unsigned int rx_bytes;
unsigned int rx_last_byte_valid;
u8 *rx_buf;
unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
unsigned int i = 0;
rx_fifo_status = readl(se->base + SE_GENI_RX_FIFO_STATUS);
rx_bytes = (rx_fifo_status & RX_FIFO_WC_MSK) * bytes_per_fifo_word;
if (rx_fifo_status & RX_LAST) {
rx_last_byte_valid = rx_fifo_status & RX_LAST_BYTE_VALID_MSK;
rx_last_byte_valid >>= RX_LAST_BYTE_VALID_SHFT;
if (rx_last_byte_valid && rx_last_byte_valid < 4)
rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
}
if (mas->rx_rem_bytes < rx_bytes)
rx_bytes = mas->rx_rem_bytes;
rx_buf = mas->cur_xfer->rx_buf + mas->cur_xfer->len - mas->rx_rem_bytes;
while (i < rx_bytes) {
u32 fifo_word = 0;
u8 *fifo_byte = (u8 *)&fifo_word;
unsigned int bytes_to_read;
unsigned int j;
bytes_to_read = min(bytes_per_fifo_word, rx_bytes - i);
ioread32_rep(se->base + SE_GENI_RX_FIFOn, &fifo_word, 1);
for (j = 0; j < bytes_to_read; j++)
rx_buf[i++] = fifo_byte[j];
}
mas->rx_rem_bytes -= rx_bytes;
}
static irqreturn_t geni_spi_isr(int irq, void *data)
{
struct spi_master *spi = data;
struct spi_geni_master *mas = spi_master_get_devdata(spi);
struct geni_se *se = &mas->se;
u32 m_irq;
unsigned long flags;
irqreturn_t ret = IRQ_HANDLED;
if (mas->cur_mcmd == CMD_NONE)
return IRQ_NONE;
spin_lock_irqsave(&mas->lock, flags);
m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
geni_spi_handle_rx(mas);
if (m_irq & M_TX_FIFO_WATERMARK_EN)
geni_spi_handle_tx(mas);
if (m_irq & M_CMD_DONE_EN) {
if (mas->cur_mcmd == CMD_XFER)
spi_finalize_current_transfer(spi);
else if (mas->cur_mcmd == CMD_CS)
complete(&mas->xfer_done);
mas->cur_mcmd = CMD_NONE;
/*
* If this happens, then a CMD_DONE came before all the Tx
* buffer bytes were sent out. This is unusual, log this
* condition and disable the WM interrupt to prevent the
* system from stalling due an interrupt storm.
* If this happens when all Rx bytes haven't been received, log
* the condition.
* The only known time this can happen is if bits_per_word != 8
* and some registers that expect xfer lengths in num spi_words
* weren't written correctly.
*/
if (mas->tx_rem_bytes) {
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
mas->tx_rem_bytes, mas->cur_bits_per_word);
}
if (mas->rx_rem_bytes)
dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
mas->rx_rem_bytes, mas->cur_bits_per_word);
}
if ((m_irq & M_CMD_CANCEL_EN) || (m_irq & M_CMD_ABORT_EN)) {
mas->cur_mcmd = CMD_NONE;
complete(&mas->xfer_done);
}
writel(m_irq, se->base + SE_GENI_M_IRQ_CLEAR);
spin_unlock_irqrestore(&mas->lock, flags);
return ret;
}
static int spi_geni_probe(struct platform_device *pdev)
{
int ret;
struct spi_master *spi;
struct spi_geni_master *mas;
struct resource *res;
struct geni_se *se;
spi = spi_alloc_master(&pdev->dev, sizeof(*mas));
if (!spi)
return -ENOMEM;
platform_set_drvdata(pdev, spi);
mas = spi_master_get_devdata(spi);
mas->dev = &pdev->dev;
mas->se.dev = &pdev->dev;
mas->se.wrapper = dev_get_drvdata(pdev->dev.parent);
se = &mas->se;
spi->bus_num = -1;
spi->dev.of_node = pdev->dev.of_node;
mas->se.clk = devm_clk_get(&pdev->dev, "se");
if (IS_ERR(mas->se.clk)) {
ret = PTR_ERR(mas->se.clk);
dev_err(&pdev->dev, "Err getting SE Core clk %d\n", ret);
goto spi_geni_probe_err;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
se->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(se->base)) {
ret = PTR_ERR(se->base);
goto spi_geni_probe_err;
}
spi->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP | SPI_CS_HIGH;
spi->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
spi->num_chipselect = 4;
spi->max_speed_hz = 50000000;
spi->prepare_message = spi_geni_prepare_message;
spi->transfer_one = spi_geni_transfer_one;
spi->auto_runtime_pm = true;
spi->handle_err = handle_fifo_timeout;
spi->set_cs = spi_geni_set_cs;
init_completion(&mas->xfer_done);
spin_lock_init(&mas->lock);
pm_runtime_enable(&pdev->dev);
ret = spi_geni_init(mas);
if (ret)
goto spi_geni_probe_runtime_disable;
mas->irq = platform_get_irq(pdev, 0);
if (mas->irq < 0) {
ret = mas->irq;
dev_err(&pdev->dev, "Err getting IRQ %d\n", ret);
goto spi_geni_probe_runtime_disable;
}
ret = request_irq(mas->irq, geni_spi_isr,
IRQF_TRIGGER_HIGH, "spi_geni", spi);
if (ret)
goto spi_geni_probe_runtime_disable;
ret = spi_register_master(spi);
if (ret)
goto spi_geni_probe_free_irq;
return 0;
spi_geni_probe_free_irq:
free_irq(mas->irq, spi);
spi_geni_probe_runtime_disable:
pm_runtime_disable(&pdev->dev);
spi_geni_probe_err:
spi_master_put(spi);
return ret;
}
static int spi_geni_remove(struct platform_device *pdev)
{
struct spi_master *spi = platform_get_drvdata(pdev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
spi_unregister_master(spi);
free_irq(mas->irq, spi);
pm_runtime_disable(&pdev->dev);
return 0;
}
static int __maybe_unused spi_geni_runtime_suspend(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
return geni_se_resources_off(&mas->se);
}
static int __maybe_unused spi_geni_runtime_resume(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
struct spi_geni_master *mas = spi_master_get_devdata(spi);
return geni_se_resources_on(&mas->se);
}
static int __maybe_unused spi_geni_suspend(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
int ret;
ret = spi_master_suspend(spi);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret)
spi_master_resume(spi);
return ret;
}
static int __maybe_unused spi_geni_resume(struct device *dev)
{
struct spi_master *spi = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
ret = spi_master_resume(spi);
if (ret)
pm_runtime_force_suspend(dev);
return ret;
}
static const struct dev_pm_ops spi_geni_pm_ops = {
SET_RUNTIME_PM_OPS(spi_geni_runtime_suspend,
spi_geni_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(spi_geni_suspend, spi_geni_resume)
};
static const struct of_device_id spi_geni_dt_match[] = {
{ .compatible = "qcom,geni-spi" },
{}
};
MODULE_DEVICE_TABLE(of, spi_geni_dt_match);
static struct platform_driver spi_geni_driver = {
.probe = spi_geni_probe,
.remove = spi_geni_remove,
.driver = {
.name = "geni_spi",
.pm = &spi_geni_pm_ops,
.of_match_table = spi_geni_dt_match,
},
};
module_platform_driver(spi_geni_driver);
MODULE_DESCRIPTION("SPI driver for GENI based QUP cores");
MODULE_LICENSE("GPL v2");

View File

@ -295,9 +295,11 @@ static int spi_gpio_request(struct device *dev,
spi_gpio->miso = devm_gpiod_get_optional(dev, "miso", GPIOD_IN);
if (IS_ERR(spi_gpio->miso))
return PTR_ERR(spi_gpio->miso);
if (!spi_gpio->miso)
/* HW configuration without MISO pin */
*mflags |= SPI_MASTER_NO_RX;
/*
* No setting SPI_MASTER_NO_RX here - if there is only a MOSI
* pin connected the host can still do RX by changing the
* direction of the line.
*/
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
if (IS_ERR(spi_gpio->sck))
@ -423,7 +425,7 @@ static int spi_gpio_probe(struct platform_device *pdev)
spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
spi_gpio->bitbang.set_line_direction = spi_gpio_set_direction;
if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
if ((master_flags & SPI_MASTER_NO_TX) == 0) {
spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
@ -447,10 +449,8 @@ static int spi_gpio_probe(struct platform_device *pdev)
static int spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
spi_gpio = platform_get_drvdata(pdev);
pdata = dev_get_platdata(&pdev->dev);
/* stop() unregisters child devices too */
spi_bitbang_stop(&spi_gpio->bitbang);

View File

@ -63,6 +63,7 @@ struct spi_imx_devtype_data {
void (*trigger)(struct spi_imx_data *);
int (*rx_available)(struct spi_imx_data *);
void (*reset)(struct spi_imx_data *);
void (*setup_wml)(struct spi_imx_data *);
void (*disable)(struct spi_imx_data *);
bool has_dmamode;
bool has_slavemode;
@ -216,7 +217,6 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *transfer)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
unsigned int bytes_per_word, i;
if (!master->dma_rx)
return false;
@ -224,14 +224,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
if (spi_imx->slave_mode)
return false;
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
if (transfer->len < spi_imx->devtype_data->fifo_size)
return false;
for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
if (!(transfer->len % (i * bytes_per_word)))
break;
}
spi_imx->wml = i;
spi_imx->dynamic_burst = 0;
return true;
@ -583,18 +578,21 @@ static int mx51_ecspi_config(struct spi_device *spi)
else /* SCLK is _very_ slow */
usleep_range(delay, delay + 10);
return 0;
}
static void mx51_setup_wml(struct spi_imx_data *spi_imx)
{
/*
* Configure the DMA register: setup the watermark
* and enable DMA request.
*/
writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml) |
writel(MX51_ECSPI_DMA_RX_WML(spi_imx->wml - 1) |
MX51_ECSPI_DMA_TX_WML(spi_imx->wml) |
MX51_ECSPI_DMA_RXT_WML(spi_imx->wml) |
MX51_ECSPI_DMA_TEDEN | MX51_ECSPI_DMA_RXDEN |
MX51_ECSPI_DMA_RXTDEN, spi_imx->base + MX51_ECSPI_DMA);
return 0;
}
static int mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
@ -931,6 +929,7 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
.trigger = mx51_ecspi_trigger,
.rx_available = mx51_ecspi_rx_available,
.reset = mx51_ecspi_reset,
.setup_wml = mx51_setup_wml,
.fifo_size = 64,
.has_dmamode = true,
.dynamic_burst = true,
@ -1138,7 +1137,6 @@ static int spi_imx_setupxfer(struct spi_device *spi,
struct spi_transfer *t)
{
struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
int ret;
if (!t)
return 0;
@ -1179,12 +1177,6 @@ static int spi_imx_setupxfer(struct spi_device *spi,
else
spi_imx->usedma = 0;
if (spi_imx->usedma) {
ret = spi_imx_dma_configure(spi->master);
if (ret)
return ret;
}
if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) {
spi_imx->rx = mx53_ecspi_rx_slave;
spi_imx->tx = mx53_ecspi_tx_slave;
@ -1289,6 +1281,31 @@ static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
unsigned long timeout;
struct spi_master *master = spi_imx->bitbang.master;
struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
struct scatterlist *last_sg = sg_last(rx->sgl, rx->nents);
unsigned int bytes_per_word, i;
int ret;
/* Get the right burst length from the last sg to ensure no tail data */
bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word);
for (i = spi_imx->devtype_data->fifo_size / 2; i > 0; i--) {
if (!(sg_dma_len(last_sg) % (i * bytes_per_word)))
break;
}
/* Use 1 as wml in case no available burst length got */
if (i == 0)
i = 1;
spi_imx->wml = i;
ret = spi_imx_dma_configure(master);
if (ret)
return ret;
if (!spi_imx->devtype_data->setup_wml) {
dev_err(spi_imx->dev, "No setup_wml()?\n");
return -EINVAL;
}
spi_imx->devtype_data->setup_wml(spi_imx);
/*
* The TX DMA setup starts the transfer, so make sure RX is configured

View File

@ -12,6 +12,8 @@
#include "internals.h"
#define SPI_MEM_MAX_BUSWIDTH 4
/**
* spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
* memory operation
@ -149,6 +151,44 @@ static bool spi_mem_default_supports_op(struct spi_mem *mem,
}
EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
static bool spi_mem_buswidth_is_valid(u8 buswidth)
{
if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
return false;
return true;
}
static int spi_mem_check_op(const struct spi_mem_op *op)
{
if (!op->cmd.buswidth)
return -EINVAL;
if ((op->addr.nbytes && !op->addr.buswidth) ||
(op->dummy.nbytes && !op->dummy.buswidth) ||
(op->data.nbytes && !op->data.buswidth))
return -EINVAL;
if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
!spi_mem_buswidth_is_valid(op->addr.buswidth) ||
!spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
!spi_mem_buswidth_is_valid(op->data.buswidth))
return -EINVAL;
return 0;
}
static bool spi_mem_internal_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct spi_controller *ctlr = mem->spi->controller;
if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
return ctlr->mem_ops->supports_op(mem, op);
return spi_mem_default_supports_op(mem, op);
}
/**
* spi_mem_supports_op() - Check if a memory device and the controller it is
* connected to support a specific memory operation
@ -166,12 +206,10 @@ EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
*/
bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct spi_controller *ctlr = mem->spi->controller;
if (spi_mem_check_op(op))
return false;
if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
return ctlr->mem_ops->supports_op(mem, op);
return spi_mem_default_supports_op(mem, op);
return spi_mem_internal_supports_op(mem, op);
}
EXPORT_SYMBOL_GPL(spi_mem_supports_op);
@ -196,7 +234,11 @@ int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
u8 *tmpbuf;
int ret;
if (!spi_mem_supports_op(mem, op))
ret = spi_mem_check_op(op);
if (ret)
return ret;
if (!spi_mem_internal_supports_op(mem, op))
return -ENOTSUPP;
if (ctlr->mem_ops) {
@ -346,10 +388,25 @@ EXPORT_SYMBOL_GPL(spi_mem_get_name);
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct spi_controller *ctlr = mem->spi->controller;
size_t len;
len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
return ctlr->mem_ops->adjust_op_size(mem, op);
if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
if (len > spi_max_transfer_size(mem->spi))
return -EINVAL;
op->data.nbytes = min3((size_t)op->data.nbytes,
spi_max_transfer_size(mem->spi),
spi_max_message_size(mem->spi) -
len);
if (!op->data.nbytes)
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);

View File

@ -98,6 +98,7 @@ struct mtk_spi {
struct clk *parent_clk, *sel_clk, *spi_clk;
struct spi_transfer *cur_transfer;
u32 xfer_len;
u32 num_xfered;
struct scatterlist *tx_sgl, *rx_sgl;
u32 tx_sgl_len, rx_sgl_len;
const struct mtk_spi_compatible *dev_comp;
@ -385,6 +386,7 @@ static int mtk_spi_fifo_transfer(struct spi_master *master,
mdata->cur_transfer = xfer;
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, xfer->len);
mdata->num_xfered = 0;
mtk_spi_prepare_transfer(master, xfer);
mtk_spi_setup_packet(master);
@ -415,6 +417,7 @@ static int mtk_spi_dma_transfer(struct spi_master *master,
mdata->tx_sgl_len = 0;
mdata->rx_sgl_len = 0;
mdata->cur_transfer = xfer;
mdata->num_xfered = 0;
mtk_spi_prepare_transfer(master, xfer);
@ -482,7 +485,7 @@ static int mtk_spi_setup(struct spi_device *spi)
static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
{
u32 cmd, reg_val, cnt, remainder;
u32 cmd, reg_val, cnt, remainder, len;
struct spi_master *master = dev_id;
struct mtk_spi *mdata = spi_master_get_devdata(master);
struct spi_transfer *trans = mdata->cur_transfer;
@ -497,36 +500,38 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id)
if (trans->rx_buf) {
cnt = mdata->xfer_len / 4;
ioread32_rep(mdata->base + SPI_RX_DATA_REG,
trans->rx_buf, cnt);
trans->rx_buf + mdata->num_xfered, cnt);
remainder = mdata->xfer_len % 4;
if (remainder > 0) {
reg_val = readl(mdata->base + SPI_RX_DATA_REG);
memcpy(trans->rx_buf + (cnt * 4),
&reg_val, remainder);
memcpy(trans->rx_buf +
mdata->num_xfered +
(cnt * 4),
&reg_val,
remainder);
}
}
trans->len -= mdata->xfer_len;
if (!trans->len) {
mdata->num_xfered += mdata->xfer_len;
if (mdata->num_xfered == trans->len) {
spi_finalize_current_transfer(master);
return IRQ_HANDLED;
}
if (trans->tx_buf)
trans->tx_buf += mdata->xfer_len;
if (trans->rx_buf)
trans->rx_buf += mdata->xfer_len;
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, trans->len);
len = trans->len - mdata->num_xfered;
mdata->xfer_len = min(MTK_SPI_MAX_FIFO_SIZE, len);
mtk_spi_setup_packet(master);
cnt = trans->len / 4;
iowrite32_rep(mdata->base + SPI_TX_DATA_REG, trans->tx_buf, cnt);
cnt = len / 4;
iowrite32_rep(mdata->base + SPI_TX_DATA_REG,
trans->tx_buf + mdata->num_xfered, cnt);
remainder = trans->len % 4;
remainder = len % 4;
if (remainder > 0) {
reg_val = 0;
memcpy(&reg_val, trans->tx_buf + (cnt * 4), remainder);
memcpy(&reg_val,
trans->tx_buf + (cnt * 4) + mdata->num_xfered,
remainder);
writel(reg_val, mdata->base + SPI_TX_DATA_REG);
}

View File

@ -33,6 +33,7 @@
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/gcd.h>
#include <linux/iopoll.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
@ -126,6 +127,7 @@ struct omap2_mcspi_regs {
};
struct omap2_mcspi {
struct completion txdone;
struct spi_master *master;
/* Virtual base address of the controller */
void __iomem *base;
@ -135,6 +137,7 @@ struct omap2_mcspi {
struct device *dev;
struct omap2_mcspi_regs ctx;
int fifo_depth;
bool slave_aborted;
unsigned int pin_dir:1;
};
@ -274,19 +277,23 @@ static void omap2_mcspi_set_cs(struct spi_device *spi, bool enable)
}
}
static void omap2_mcspi_set_master_mode(struct spi_master *master)
static void omap2_mcspi_set_mode(struct spi_master *master)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
u32 l;
/*
* Setup when switching from (reset default) slave mode
* to single-channel master mode
* Choose master or slave mode
*/
l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
l &= ~(OMAP2_MCSPI_MODULCTRL_STEST);
if (spi_controller_is_slave(master)) {
l |= (OMAP2_MCSPI_MODULCTRL_MS);
} else {
l &= ~(OMAP2_MCSPI_MODULCTRL_MS);
l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
}
mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
ctx->modulctrl = l;
@ -299,7 +306,7 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
unsigned int wcnt;
int max_fifo_depth, fifo_depth, bytes_per_word;
int max_fifo_depth, bytes_per_word;
u32 chconf, xferlevel;
mcspi = spi_master_get_devdata(master);
@ -315,10 +322,6 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
else
max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
fifo_depth = gcd(t->len, max_fifo_depth);
if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
goto disable_fifo;
wcnt = t->len / bytes_per_word;
if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
goto disable_fifo;
@ -326,16 +329,17 @@ static void omap2_mcspi_set_fifo(const struct spi_device *spi,
xferlevel = wcnt << 16;
if (t->rx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFER;
xferlevel |= (fifo_depth - 1) << 8;
xferlevel |= (bytes_per_word - 1) << 8;
}
if (t->tx_buf != NULL) {
chconf |= OMAP2_MCSPI_CHCONF_FFET;
xferlevel |= fifo_depth - 1;
xferlevel |= bytes_per_word - 1;
}
mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
mcspi_write_chconf0(spi, chconf);
mcspi->fifo_depth = fifo_depth;
mcspi->fifo_depth = max_fifo_depth;
return;
}
@ -353,18 +357,22 @@ disable_fifo:
static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
{
unsigned long timeout;
u32 val;
timeout = jiffies + msecs_to_jiffies(1000);
while (!(readl_relaxed(reg) & bit)) {
if (time_after(jiffies, timeout)) {
if (!(readl_relaxed(reg) & bit))
return -ETIMEDOUT;
else
return 0;
}
cpu_relax();
return readl_poll_timeout(reg, val, val & bit, 1, MSEC_PER_SEC);
}
static int mcspi_wait_for_completion(struct omap2_mcspi *mcspi,
struct completion *x)
{
if (spi_controller_is_slave(mcspi->master)) {
if (wait_for_completion_interruptible(x) ||
mcspi->slave_aborted)
return -EINTR;
} else {
wait_for_completion(x);
}
return 0;
}
@ -517,7 +525,12 @@ omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
dma_async_issue_pending(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 1);
wait_for_completion(&mcspi_dma->dma_rx_completion);
ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_rx_completion);
if (ret || mcspi->slave_aborted) {
dmaengine_terminate_sync(mcspi_dma->dma_rx);
omap2_mcspi_set_dma_req(spi, 1, 0);
return 0;
}
for (x = 0; x < nb_sizes; x++)
kfree(sg_out[x]);
@ -585,7 +598,6 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
struct dma_slave_config cfg;
enum dma_slave_buswidth width;
unsigned es;
u32 burst;
void __iomem *chstat_reg;
void __iomem *irqstat_reg;
int wait_res;
@ -605,34 +617,49 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
}
count = xfer->len;
burst = 1;
if (mcspi->fifo_depth > 0) {
if (count > mcspi->fifo_depth)
burst = mcspi->fifo_depth / es;
else
burst = count / es;
}
memset(&cfg, 0, sizeof(cfg));
cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
cfg.src_addr_width = width;
cfg.dst_addr_width = width;
cfg.src_maxburst = burst;
cfg.dst_maxburst = burst;
cfg.src_maxburst = es;
cfg.dst_maxburst = es;
rx = xfer->rx_buf;
tx = xfer->tx_buf;
if (tx != NULL)
mcspi->slave_aborted = false;
reinit_completion(&mcspi_dma->dma_tx_completion);
reinit_completion(&mcspi_dma->dma_rx_completion);
reinit_completion(&mcspi->txdone);
if (tx) {
/* Enable EOW IRQ to know end of tx in slave mode */
if (spi_controller_is_slave(spi->master))
mcspi_write_reg(spi->master,
OMAP2_MCSPI_IRQENABLE,
OMAP2_MCSPI_IRQSTATUS_EOW);
omap2_mcspi_tx_dma(spi, xfer, cfg);
}
if (rx != NULL)
count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
if (tx != NULL) {
wait_for_completion(&mcspi_dma->dma_tx_completion);
int ret;
ret = mcspi_wait_for_completion(mcspi, &mcspi_dma->dma_tx_completion);
if (ret || mcspi->slave_aborted) {
dmaengine_terminate_sync(mcspi_dma->dma_tx);
omap2_mcspi_set_dma_req(spi, 0, 0);
return 0;
}
if (spi_controller_is_slave(mcspi->master)) {
ret = mcspi_wait_for_completion(mcspi, &mcspi->txdone);
if (ret || mcspi->slave_aborted)
return 0;
}
if (mcspi->fifo_depth > 0) {
irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
@ -1089,6 +1116,36 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
gpio_free(spi->cs_gpio);
}
static irqreturn_t omap2_mcspi_irq_handler(int irq, void *data)
{
struct omap2_mcspi *mcspi = data;
u32 irqstat;
irqstat = mcspi_read_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS);
if (!irqstat)
return IRQ_NONE;
/* Disable IRQ and wakeup slave xfer task */
mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQENABLE, 0);
if (irqstat & OMAP2_MCSPI_IRQSTATUS_EOW)
complete(&mcspi->txdone);
return IRQ_HANDLED;
}
static int omap2_mcspi_slave_abort(struct spi_master *master)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
struct omap2_mcspi_dma *mcspi_dma = mcspi->dma_channels;
mcspi->slave_aborted = true;
complete(&mcspi_dma->dma_rx_completion);
complete(&mcspi_dma->dma_tx_completion);
complete(&mcspi->txdone);
return 0;
}
static int omap2_mcspi_transfer_one(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *t)
@ -1255,10 +1312,20 @@ static bool omap2_mcspi_can_dma(struct spi_master *master,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_dma *mcspi_dma =
&mcspi->dma_channels[spi->chip_select];
if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx)
return false;
if (spi_controller_is_slave(master))
return true;
return (xfer->len >= DMA_MIN_BYTES);
}
static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
@ -1275,7 +1342,7 @@ static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
OMAP2_MCSPI_WAKEUPENABLE_WKEN);
ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
omap2_mcspi_set_master_mode(master);
omap2_mcspi_set_mode(master);
pm_runtime_mark_last_busy(mcspi->dev);
pm_runtime_put_autosuspend(mcspi->dev);
return 0;
@ -1350,11 +1417,12 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
struct device_node *node = pdev->dev.of_node;
const struct of_device_id *match;
master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
if (master == NULL) {
dev_dbg(&pdev->dev, "master allocation failed\n");
if (of_property_read_bool(node, "spi-slave"))
master = spi_alloc_slave(&pdev->dev, sizeof(*mcspi));
else
master = spi_alloc_master(&pdev->dev, sizeof(*mcspi));
if (!master)
return -ENOMEM;
}
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
@ -1366,6 +1434,7 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->transfer_one = omap2_mcspi_transfer_one;
master->set_cs = omap2_mcspi_set_cs;
master->cleanup = omap2_mcspi_cleanup;
master->slave_abort = omap2_mcspi_slave_abort;
master->dev.of_node = node;
master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
@ -1417,15 +1486,31 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
}
status = platform_get_irq(pdev, 0);
if (status == -EPROBE_DEFER)
goto free_master;
if (status < 0) {
dev_err(&pdev->dev, "no irq resource found\n");
goto free_master;
}
init_completion(&mcspi->txdone);
status = devm_request_irq(&pdev->dev, status,
omap2_mcspi_irq_handler, 0, pdev->name,
mcspi);
if (status) {
dev_err(&pdev->dev, "Cannot request IRQ");
goto free_master;
}
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_enable(&pdev->dev);
status = omap2_mcspi_master_setup(mcspi);
status = omap2_mcspi_controller_setup(mcspi);
if (status < 0)
goto disable_pm;
status = devm_spi_register_master(&pdev->dev, master);
status = devm_spi_register_controller(&pdev->dev, master);
if (status < 0)
goto disable_pm;

View File

@ -431,6 +431,7 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
int word_len;
struct orion_spi *orion_spi;
int cs = spi->chip_select;
void __iomem *vaddr;
word_len = spi->bits_per_word;
count = xfer->len;
@ -441,8 +442,9 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
* Use SPI direct write mode if base address is available. Otherwise
* fall back to PIO mode for this transfer.
*/
if ((orion_spi->child[cs].direct_access.vaddr) && (xfer->tx_buf) &&
(word_len == 8)) {
vaddr = orion_spi->child[cs].direct_access.vaddr;
if (vaddr && xfer->tx_buf && word_len == 8) {
unsigned int cnt = count / 4;
unsigned int rem = count % 4;
@ -450,13 +452,11 @@ orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
* Send the TX-data to the SPI device via the direct
* mapped address window
*/
iowrite32_rep(orion_spi->child[cs].direct_access.vaddr,
xfer->tx_buf, cnt);
iowrite32_rep(vaddr, xfer->tx_buf, cnt);
if (rem) {
u32 *buf = (u32 *)xfer->tx_buf;
iowrite8_rep(orion_spi->child[cs].direct_access.vaddr,
&buf[cnt], rem);
iowrite8_rep(vaddr, &buf[cnt], rem);
}
return count;
@ -683,6 +683,7 @@ static int orion_spi_probe(struct platform_device *pdev)
}
for_each_available_child_of_node(pdev->dev.of_node, np) {
struct orion_direct_acc *dir_acc;
u32 cs;
int cs_gpio;
@ -750,14 +751,13 @@ static int orion_spi_probe(struct platform_device *pdev)
* This needs to get extended for the direct SPI-NOR / SPI-NAND
* support, once this gets implemented.
*/
spi->child[cs].direct_access.vaddr = devm_ioremap(&pdev->dev,
r->start,
PAGE_SIZE);
if (!spi->child[cs].direct_access.vaddr) {
dir_acc = &spi->child[cs].direct_access;
dir_acc->vaddr = devm_ioremap(&pdev->dev, r->start, PAGE_SIZE);
if (!dir_acc->vaddr) {
status = -ENOMEM;
goto out_rel_axi_clk;
}
spi->child[cs].direct_access.size = PAGE_SIZE;
dir_acc->size = PAGE_SIZE;
dev_info(&pdev->dev, "CS%d configured for direct access\n", cs);
}

View File

@ -468,7 +468,7 @@ static int ring_desc_ring_alloc(struct pic32_sqi *sqi)
/* allocate coherent DMAable memory for hardware buffer descriptors. */
sqi->bd = dma_zalloc_coherent(&sqi->master->dev,
sizeof(*bd) * PESQI_BD_COUNT,
&sqi->bd_dma, GFP_DMA32);
&sqi->bd_dma, GFP_KERNEL);
if (!sqi->bd) {
dev_err(&sqi->master->dev, "failed allocating dma buffer\n");
return -ENOMEM;
@ -656,7 +656,7 @@ static int pic32_sqi_probe(struct platform_device *pdev)
master->max_speed_hz = clk_get_rate(sqi->base_clk);
master->dma_alignment = 32;
master->max_dma_len = PESQI_BD_BUF_LEN_MAX;
master->dev.of_node = of_node_get(pdev->dev.of_node);
master->dev.of_node = pdev->dev.of_node;
master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_TX_DUAL |
SPI_RX_DUAL | SPI_TX_QUAD | SPI_RX_QUAD;
master->flags = SPI_MASTER_HALF_DUPLEX;

View File

@ -320,7 +320,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
xfer->rx_sg.sgl,
xfer->rx_sg.nents,
DMA_FROM_DEVICE,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EINVAL;
@ -330,7 +330,7 @@ static int pic32_spi_dma_transfer(struct pic32_spi *pic32s,
desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
xfer->tx_sg.sgl,
xfer->tx_sg.nents,
DMA_TO_DEVICE,
DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EINVAL;
@ -774,7 +774,7 @@ static int pic32_spi_probe(struct platform_device *pdev)
if (ret)
goto err_master;
master->dev.of_node = of_node_get(pdev->dev.of_node);
master->dev.of_node = pdev->dev.of_node;
master->mode_bits = SPI_MODE_3 | SPI_MODE_0 | SPI_CS_HIGH;
master->num_chipselect = 1; /* single chip-select */
master->max_speed_hz = clk_get_rate(pic32s->clk);

View File

@ -1490,10 +1490,8 @@ static void do_polling_transfer(struct pl022 *pl022)
struct spi_message *message = NULL;
struct spi_transfer *transfer = NULL;
struct spi_transfer *previous = NULL;
struct chip_data *chip;
unsigned long time, timeout;
chip = pl022->cur_chip;
message = pl022->cur_msg;
while (message->state != STATE_DONE) {
@ -2325,10 +2323,8 @@ static int pl022_suspend(struct device *dev)
int ret;
ret = spi_master_suspend(pl022->master);
if (ret) {
dev_warn(dev, "cannot suspend master\n");
if (ret)
return ret;
}
ret = pm_runtime_force_suspend(dev);
if (ret) {
@ -2353,9 +2349,7 @@ static int pl022_resume(struct device *dev)
/* Start the queue running */
ret = spi_master_resume(pl022->master);
if (ret)
dev_err(dev, "problem starting queue (%d)\n", ret);
else
if (!ret)
dev_dbg(dev, "resumed\n");
return ret;

View File

@ -33,6 +33,7 @@
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/acpi.h>
#include <linux/of_device.h>
#include "spi-pxa2xx.h"
@ -665,9 +666,11 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
bytes_left = drv_data->rx_end - drv_data->rx;
switch (drv_data->n_bytes) {
case 4:
bytes_left >>= 1;
bytes_left >>= 2;
break;
case 2:
bytes_left >>= 1;
break;
}
rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
@ -1333,9 +1336,6 @@ static void cleanup(struct spi_device *spi)
kfree(chip);
}
#ifdef CONFIG_PCI
#ifdef CONFIG_ACPI
static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
{ "INT33C0", LPSS_LPT_SSP },
{ "INT33C1", LPSS_LPT_SSP },
@ -1347,23 +1347,6 @@ static const struct acpi_device_id pxa2xx_spi_acpi_match[] = {
};
MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
unsigned int devid;
int port_id = -1;
if (adev && adev->pnp.unique_id &&
!kstrtouint(adev->pnp.unique_id, 0, &devid))
port_id = devid;
return port_id;
}
#else /* !CONFIG_ACPI */
static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
return -1;
}
#endif
/*
* PCI IDs of compound devices that integrate both host controller and private
* integrated DMA engine. Please note these are not used in module
@ -1410,6 +1393,37 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = {
{ },
};
static const struct of_device_id pxa2xx_spi_of_match[] = {
{ .compatible = "marvell,mmp2-ssp", .data = (void *)MMP2_SSP },
{},
};
MODULE_DEVICE_TABLE(of, pxa2xx_spi_of_match);
#ifdef CONFIG_ACPI
static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
unsigned int devid;
int port_id = -1;
if (adev && adev->pnp.unique_id &&
!kstrtouint(adev->pnp.unique_id, 0, &devid))
port_id = devid;
return port_id;
}
#else /* !CONFIG_ACPI */
static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
{
return -1;
}
#endif /* CONFIG_ACPI */
#ifdef CONFIG_PCI
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{
struct device *dev = param;
@ -1420,6 +1434,8 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
return true;
}
#endif /* CONFIG_PCI */
static struct pxa2xx_spi_master *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
@ -1429,11 +1445,15 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
struct resource *res;
const struct acpi_device_id *adev_id = NULL;
const struct pci_device_id *pcidev_id = NULL;
int type;
const struct of_device_id *of_id = NULL;
enum pxa_ssp_type type;
adev = ACPI_COMPANION(&pdev->dev);
if (dev_is_pci(pdev->dev.parent))
if (pdev->dev.of_node)
of_id = of_match_device(pdev->dev.driver->of_match_table,
&pdev->dev);
else if (dev_is_pci(pdev->dev.parent))
pcidev_id = pci_match_id(pxa2xx_spi_pci_compound_match,
to_pci_dev(pdev->dev.parent));
else if (adev)
@ -1443,9 +1463,11 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
return NULL;
if (adev_id)
type = (int)adev_id->driver_data;
type = (enum pxa_ssp_type)adev_id->driver_data;
else if (pcidev_id)
type = (int)pcidev_id->driver_data;
type = (enum pxa_ssp_type)pcidev_id->driver_data;
else if (of_id)
type = (enum pxa_ssp_type)of_id->data;
else
return NULL;
@ -1464,11 +1486,13 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
if (IS_ERR(ssp->mmio_base))
return NULL;
#ifdef CONFIG_PCI
if (pcidev_id) {
pdata->tx_param = pdev->dev.parent;
pdata->rx_param = pdev->dev.parent;
pdata->dma_filter = pxa2xx_spi_idma_filter;
}
#endif
ssp->clk = devm_clk_get(&pdev->dev, NULL);
ssp->irq = platform_get_irq(pdev, 0);
@ -1482,14 +1506,6 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
return pdata;
}
#else /* !CONFIG_PCI */
static inline struct pxa2xx_spi_master *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
return NULL;
}
#endif
static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master,
unsigned int cs)
{
@ -1764,14 +1780,6 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
return 0;
}
static void pxa2xx_spi_shutdown(struct platform_device *pdev)
{
int status = 0;
if ((status = pxa2xx_spi_remove(pdev)) != 0)
dev_err(&pdev->dev, "shutdown failed with %d\n", status);
}
#ifdef CONFIG_PM_SLEEP
static int pxa2xx_spi_suspend(struct device *dev)
{
@ -1808,13 +1816,7 @@ static int pxa2xx_spi_resume(struct device *dev)
lpss_ssp_setup(drv_data);
/* Start the queue running */
status = spi_controller_resume(drv_data->master);
if (status != 0) {
dev_err(dev, "problem starting queue (%d)\n", status);
return status;
}
return 0;
return spi_controller_resume(drv_data->master);
}
#endif
@ -1848,10 +1850,10 @@ static struct platform_driver driver = {
.name = "pxa2xx-spi",
.pm = &pxa2xx_spi_pm_ops,
.acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
.of_match_table = of_match_ptr(pxa2xx_spi_of_match),
},
.probe = pxa2xx_spi_probe,
.remove = pxa2xx_spi_remove,
.shutdown = pxa2xx_spi_shutdown,
};
static int __init pxa2xx_spi_init(void)

581
drivers/spi/spi-qcom-qspi.c Normal file
View File

@ -0,0 +1,581 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (c) 2017-2018, The Linux foundation. All rights reserved.
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#define QSPI_NUM_CS 2
#define QSPI_BYTES_PER_WORD 4
#define MSTR_CONFIG 0x0000
#define FULL_CYCLE_MODE BIT(3)
#define FB_CLK_EN BIT(4)
#define PIN_HOLDN BIT(6)
#define PIN_WPN BIT(7)
#define DMA_ENABLE BIT(8)
#define BIG_ENDIAN_MODE BIT(9)
#define SPI_MODE_MSK 0xc00
#define SPI_MODE_SHFT 10
#define CHIP_SELECT_NUM BIT(12)
#define SBL_EN BIT(13)
#define LPA_BASE_MSK 0x3c000
#define LPA_BASE_SHFT 14
#define TX_DATA_DELAY_MSK 0xc0000
#define TX_DATA_DELAY_SHFT 18
#define TX_CLK_DELAY_MSK 0x300000
#define TX_CLK_DELAY_SHFT 20
#define TX_CS_N_DELAY_MSK 0xc00000
#define TX_CS_N_DELAY_SHFT 22
#define TX_DATA_OE_DELAY_MSK 0x3000000
#define TX_DATA_OE_DELAY_SHFT 24
#define AHB_MASTER_CFG 0x0004
#define HMEM_TYPE_START_MID_TRANS_MSK 0x7
#define HMEM_TYPE_START_MID_TRANS_SHFT 0
#define HMEM_TYPE_LAST_TRANS_MSK 0x38
#define HMEM_TYPE_LAST_TRANS_SHFT 3
#define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_MSK 0xc0
#define USE_HMEMTYPE_LAST_ON_DESC_OR_CHAIN_SHFT 6
#define HMEMTYPE_READ_TRANS_MSK 0x700
#define HMEMTYPE_READ_TRANS_SHFT 8
#define HSHARED BIT(11)
#define HINNERSHARED BIT(12)
#define MSTR_INT_EN 0x000C
#define MSTR_INT_STATUS 0x0010
#define RESP_FIFO_UNDERRUN BIT(0)
#define RESP_FIFO_NOT_EMPTY BIT(1)
#define RESP_FIFO_RDY BIT(2)
#define HRESP_FROM_NOC_ERR BIT(3)
#define WR_FIFO_EMPTY BIT(9)
#define WR_FIFO_FULL BIT(10)
#define WR_FIFO_OVERRUN BIT(11)
#define TRANSACTION_DONE BIT(16)
#define QSPI_ERR_IRQS (RESP_FIFO_UNDERRUN | HRESP_FROM_NOC_ERR | \
WR_FIFO_OVERRUN)
#define QSPI_ALL_IRQS (QSPI_ERR_IRQS | RESP_FIFO_RDY | \
WR_FIFO_EMPTY | WR_FIFO_FULL | \
TRANSACTION_DONE)
#define PIO_XFER_CTRL 0x0014
#define REQUEST_COUNT_MSK 0xffff
#define PIO_XFER_CFG 0x0018
#define TRANSFER_DIRECTION BIT(0)
#define MULTI_IO_MODE_MSK 0xe
#define MULTI_IO_MODE_SHFT 1
#define TRANSFER_FRAGMENT BIT(8)
#define SDR_1BIT 1
#define SDR_2BIT 2
#define SDR_4BIT 3
#define DDR_1BIT 5
#define DDR_2BIT 6
#define DDR_4BIT 7
#define DMA_DESC_SINGLE_SPI 1
#define DMA_DESC_DUAL_SPI 2
#define DMA_DESC_QUAD_SPI 3
#define PIO_XFER_STATUS 0x001c
#define WR_FIFO_BYTES_MSK 0xffff0000
#define WR_FIFO_BYTES_SHFT 16
#define PIO_DATAOUT_1B 0x0020
#define PIO_DATAOUT_4B 0x0024
#define RD_FIFO_STATUS 0x002c
#define FIFO_EMPTY BIT(11)
#define WR_CNTS_MSK 0x7f0
#define WR_CNTS_SHFT 4
#define RDY_64BYTE BIT(3)
#define RDY_32BYTE BIT(2)
#define RDY_16BYTE BIT(1)
#define FIFO_RDY BIT(0)
#define RD_FIFO_CFG 0x0028
#define CONTINUOUS_MODE BIT(0)
#define RD_FIFO_RESET 0x0030
#define RESET_FIFO BIT(0)
#define CUR_MEM_ADDR 0x0048
#define HW_VERSION 0x004c
#define RD_FIFO 0x0050
#define SAMPLING_CLK_CFG 0x0090
#define SAMPLING_CLK_STATUS 0x0094
enum qspi_dir {
QSPI_READ,
QSPI_WRITE,
};
struct qspi_xfer {
union {
const void *tx_buf;
void *rx_buf;
};
unsigned int rem_bytes;
unsigned int buswidth;
enum qspi_dir dir;
bool is_last;
};
enum qspi_clocks {
QSPI_CLK_CORE,
QSPI_CLK_IFACE,
QSPI_NUM_CLKS
};
struct qcom_qspi {
void __iomem *base;
struct device *dev;
struct clk_bulk_data clks[QSPI_NUM_CLKS];
struct qspi_xfer xfer;
/* Lock to protect data accessed by IRQs */
spinlock_t lock;
};
static u32 qspi_buswidth_to_iomode(struct qcom_qspi *ctrl,
unsigned int buswidth)
{
switch (buswidth) {
case 1:
return SDR_1BIT << MULTI_IO_MODE_SHFT;
case 2:
return SDR_2BIT << MULTI_IO_MODE_SHFT;
case 4:
return SDR_4BIT << MULTI_IO_MODE_SHFT;
default:
dev_warn_once(ctrl->dev,
"Unexpected bus width: %u\n", buswidth);
return SDR_1BIT << MULTI_IO_MODE_SHFT;
}
}
static void qcom_qspi_pio_xfer_cfg(struct qcom_qspi *ctrl)
{
u32 pio_xfer_cfg;
const struct qspi_xfer *xfer;
xfer = &ctrl->xfer;
pio_xfer_cfg = readl(ctrl->base + PIO_XFER_CFG);
pio_xfer_cfg &= ~TRANSFER_DIRECTION;
pio_xfer_cfg |= xfer->dir;
if (xfer->is_last)
pio_xfer_cfg &= ~TRANSFER_FRAGMENT;
else
pio_xfer_cfg |= TRANSFER_FRAGMENT;
pio_xfer_cfg &= ~MULTI_IO_MODE_MSK;
pio_xfer_cfg |= qspi_buswidth_to_iomode(ctrl, xfer->buswidth);
writel(pio_xfer_cfg, ctrl->base + PIO_XFER_CFG);
}
static void qcom_qspi_pio_xfer_ctrl(struct qcom_qspi *ctrl)
{
u32 pio_xfer_ctrl;
pio_xfer_ctrl = readl(ctrl->base + PIO_XFER_CTRL);
pio_xfer_ctrl &= ~REQUEST_COUNT_MSK;
pio_xfer_ctrl |= ctrl->xfer.rem_bytes;
writel(pio_xfer_ctrl, ctrl->base + PIO_XFER_CTRL);
}
static void qcom_qspi_pio_xfer(struct qcom_qspi *ctrl)
{
u32 ints;
qcom_qspi_pio_xfer_cfg(ctrl);
/* Ack any previous interrupts that might be hanging around */
writel(QSPI_ALL_IRQS, ctrl->base + MSTR_INT_STATUS);
/* Setup new interrupts */
if (ctrl->xfer.dir == QSPI_WRITE)
ints = QSPI_ERR_IRQS | WR_FIFO_EMPTY;
else
ints = QSPI_ERR_IRQS | RESP_FIFO_RDY;
writel(ints, ctrl->base + MSTR_INT_EN);
/* Kick off the transfer */
qcom_qspi_pio_xfer_ctrl(ctrl);
}
static void qcom_qspi_handle_err(struct spi_master *master,
struct spi_message *msg)
{
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
unsigned long flags;
spin_lock_irqsave(&ctrl->lock, flags);
writel(0, ctrl->base + MSTR_INT_EN);
ctrl->xfer.rem_bytes = 0;
spin_unlock_irqrestore(&ctrl->lock, flags);
}
static int qcom_qspi_transfer_one(struct spi_master *master,
struct spi_device *slv,
struct spi_transfer *xfer)
{
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
int ret;
unsigned long speed_hz;
unsigned long flags;
speed_hz = slv->max_speed_hz;
if (xfer->speed_hz)
speed_hz = xfer->speed_hz;
/* In regular operation (SBL_EN=1) core must be 4x transfer clock */
ret = clk_set_rate(ctrl->clks[QSPI_CLK_CORE].clk, speed_hz * 4);
if (ret) {
dev_err(ctrl->dev, "Failed to set core clk %d\n", ret);
return ret;
}
spin_lock_irqsave(&ctrl->lock, flags);
/* We are half duplex, so either rx or tx will be set */
if (xfer->rx_buf) {
ctrl->xfer.dir = QSPI_READ;
ctrl->xfer.buswidth = xfer->rx_nbits;
ctrl->xfer.rx_buf = xfer->rx_buf;
} else {
ctrl->xfer.dir = QSPI_WRITE;
ctrl->xfer.buswidth = xfer->tx_nbits;
ctrl->xfer.tx_buf = xfer->tx_buf;
}
ctrl->xfer.is_last = list_is_last(&xfer->transfer_list,
&master->cur_msg->transfers);
ctrl->xfer.rem_bytes = xfer->len;
qcom_qspi_pio_xfer(ctrl);
spin_unlock_irqrestore(&ctrl->lock, flags);
/* We'll call spi_finalize_current_transfer() when done */
return 1;
}
static int qcom_qspi_prepare_message(struct spi_master *master,
struct spi_message *message)
{
u32 mstr_cfg;
struct qcom_qspi *ctrl;
int tx_data_oe_delay = 1;
int tx_data_delay = 1;
unsigned long flags;
ctrl = spi_master_get_devdata(master);
spin_lock_irqsave(&ctrl->lock, flags);
mstr_cfg = readl(ctrl->base + MSTR_CONFIG);
mstr_cfg &= ~CHIP_SELECT_NUM;
if (message->spi->chip_select)
mstr_cfg |= CHIP_SELECT_NUM;
mstr_cfg |= FB_CLK_EN | PIN_WPN | PIN_HOLDN | SBL_EN | FULL_CYCLE_MODE;
mstr_cfg &= ~(SPI_MODE_MSK | TX_DATA_OE_DELAY_MSK | TX_DATA_DELAY_MSK);
mstr_cfg |= message->spi->mode << SPI_MODE_SHFT;
mstr_cfg |= tx_data_oe_delay << TX_DATA_OE_DELAY_SHFT;
mstr_cfg |= tx_data_delay << TX_DATA_DELAY_SHFT;
mstr_cfg &= ~DMA_ENABLE;
writel(mstr_cfg, ctrl->base + MSTR_CONFIG);
spin_unlock_irqrestore(&ctrl->lock, flags);
return 0;
}
static irqreturn_t pio_read(struct qcom_qspi *ctrl)
{
u32 rd_fifo_status;
u32 rd_fifo;
unsigned int wr_cnts;
unsigned int bytes_to_read;
unsigned int words_to_read;
u32 *word_buf;
u8 *byte_buf;
int i;
rd_fifo_status = readl(ctrl->base + RD_FIFO_STATUS);
if (!(rd_fifo_status & FIFO_RDY)) {
dev_dbg(ctrl->dev, "Spurious IRQ %#x\n", rd_fifo_status);
return IRQ_NONE;
}
wr_cnts = (rd_fifo_status & WR_CNTS_MSK) >> WR_CNTS_SHFT;
wr_cnts = min(wr_cnts, ctrl->xfer.rem_bytes);
words_to_read = wr_cnts / QSPI_BYTES_PER_WORD;
bytes_to_read = wr_cnts % QSPI_BYTES_PER_WORD;
if (words_to_read) {
word_buf = ctrl->xfer.rx_buf;
ctrl->xfer.rem_bytes -= words_to_read * QSPI_BYTES_PER_WORD;
ioread32_rep(ctrl->base + RD_FIFO, word_buf, words_to_read);
ctrl->xfer.rx_buf = word_buf + words_to_read;
}
if (bytes_to_read) {
byte_buf = ctrl->xfer.rx_buf;
rd_fifo = readl(ctrl->base + RD_FIFO);
ctrl->xfer.rem_bytes -= bytes_to_read;
for (i = 0; i < bytes_to_read; i++)
*byte_buf++ = rd_fifo >> (i * BITS_PER_BYTE);
ctrl->xfer.rx_buf = byte_buf;
}
return IRQ_HANDLED;
}
static irqreturn_t pio_write(struct qcom_qspi *ctrl)
{
const void *xfer_buf = ctrl->xfer.tx_buf;
const int *word_buf;
const char *byte_buf;
unsigned int wr_fifo_bytes;
unsigned int wr_fifo_words;
unsigned int wr_size;
unsigned int rem_words;
wr_fifo_bytes = readl(ctrl->base + PIO_XFER_STATUS);
wr_fifo_bytes >>= WR_FIFO_BYTES_SHFT;
if (ctrl->xfer.rem_bytes < QSPI_BYTES_PER_WORD) {
/* Process the last 1-3 bytes */
wr_size = min(wr_fifo_bytes, ctrl->xfer.rem_bytes);
ctrl->xfer.rem_bytes -= wr_size;
byte_buf = xfer_buf;
while (wr_size--)
writel(*byte_buf++,
ctrl->base + PIO_DATAOUT_1B);
ctrl->xfer.tx_buf = byte_buf;
} else {
/*
* Process all the whole words; to keep things simple we'll
* just wait for the next interrupt to handle the last 1-3
* bytes if we don't have an even number of words.
*/
rem_words = ctrl->xfer.rem_bytes / QSPI_BYTES_PER_WORD;
wr_fifo_words = wr_fifo_bytes / QSPI_BYTES_PER_WORD;
wr_size = min(rem_words, wr_fifo_words);
ctrl->xfer.rem_bytes -= wr_size * QSPI_BYTES_PER_WORD;
word_buf = xfer_buf;
iowrite32_rep(ctrl->base + PIO_DATAOUT_4B, word_buf, wr_size);
ctrl->xfer.tx_buf = word_buf + wr_size;
}
return IRQ_HANDLED;
}
static irqreturn_t qcom_qspi_irq(int irq, void *dev_id)
{
u32 int_status;
struct qcom_qspi *ctrl = dev_id;
irqreturn_t ret = IRQ_NONE;
unsigned long flags;
spin_lock_irqsave(&ctrl->lock, flags);
int_status = readl(ctrl->base + MSTR_INT_STATUS);
writel(int_status, ctrl->base + MSTR_INT_STATUS);
if (ctrl->xfer.dir == QSPI_WRITE) {
if (int_status & WR_FIFO_EMPTY)
ret = pio_write(ctrl);
} else {
if (int_status & RESP_FIFO_RDY)
ret = pio_read(ctrl);
}
if (int_status & QSPI_ERR_IRQS) {
if (int_status & RESP_FIFO_UNDERRUN)
dev_err(ctrl->dev, "IRQ error: FIFO underrun\n");
if (int_status & WR_FIFO_OVERRUN)
dev_err(ctrl->dev, "IRQ error: FIFO overrun\n");
if (int_status & HRESP_FROM_NOC_ERR)
dev_err(ctrl->dev, "IRQ error: NOC response error\n");
ret = IRQ_HANDLED;
}
if (!ctrl->xfer.rem_bytes) {
writel(0, ctrl->base + MSTR_INT_EN);
spi_finalize_current_transfer(dev_get_drvdata(ctrl->dev));
}
spin_unlock_irqrestore(&ctrl->lock, flags);
return ret;
}
static int qcom_qspi_probe(struct platform_device *pdev)
{
int ret;
struct device *dev;
struct resource *res;
struct spi_master *master;
struct qcom_qspi *ctrl;
dev = &pdev->dev;
master = spi_alloc_master(dev, sizeof(*ctrl));
if (!master)
return -ENOMEM;
platform_set_drvdata(pdev, master);
ctrl = spi_master_get_devdata(master);
spin_lock_init(&ctrl->lock);
ctrl->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ctrl->base = devm_ioremap_resource(dev, res);
if (IS_ERR(ctrl->base)) {
ret = PTR_ERR(ctrl->base);
goto exit_probe_master_put;
}
ctrl->clks[QSPI_CLK_CORE].id = "core";
ctrl->clks[QSPI_CLK_IFACE].id = "iface";
ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks);
if (ret)
goto exit_probe_master_put;
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(dev, "Failed to get irq %d\n", ret);
goto exit_probe_master_put;
}
ret = devm_request_irq(dev, ret, qcom_qspi_irq,
IRQF_TRIGGER_HIGH, dev_name(dev), ctrl);
if (ret) {
dev_err(dev, "Failed to request irq %d\n", ret);
goto exit_probe_master_put;
}
master->max_speed_hz = 300000000;
master->num_chipselect = QSPI_NUM_CS;
master->bus_num = -1;
master->dev.of_node = pdev->dev.of_node;
master->mode_bits = SPI_MODE_0 |
SPI_TX_DUAL | SPI_RX_DUAL |
SPI_TX_QUAD | SPI_RX_QUAD;
master->flags = SPI_MASTER_HALF_DUPLEX;
master->prepare_message = qcom_qspi_prepare_message;
master->transfer_one = qcom_qspi_transfer_one;
master->handle_err = qcom_qspi_handle_err;
master->auto_runtime_pm = true;
pm_runtime_enable(dev);
ret = spi_register_master(master);
if (!ret)
return 0;
pm_runtime_disable(dev);
exit_probe_master_put:
spi_master_put(master);
return ret;
}
static int qcom_qspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
/* Unregister _before_ disabling pm_runtime() so we stop transfers */
spi_unregister_master(master);
pm_runtime_disable(&pdev->dev);
return 0;
}
static int __maybe_unused qcom_qspi_runtime_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
clk_bulk_disable_unprepare(QSPI_NUM_CLKS, ctrl->clks);
return 0;
}
static int __maybe_unused qcom_qspi_runtime_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct qcom_qspi *ctrl = spi_master_get_devdata(master);
return clk_bulk_prepare_enable(QSPI_NUM_CLKS, ctrl->clks);
}
static int __maybe_unused qcom_qspi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
int ret;
ret = spi_master_suspend(master);
if (ret)
return ret;
ret = pm_runtime_force_suspend(dev);
if (ret)
spi_master_resume(master);
return ret;
}
static int __maybe_unused qcom_qspi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
int ret;
ret = pm_runtime_force_resume(dev);
if (ret)
return ret;
ret = spi_master_resume(master);
if (ret)
pm_runtime_force_suspend(dev);
return ret;
}
static const struct dev_pm_ops qcom_qspi_dev_pm_ops = {
SET_RUNTIME_PM_OPS(qcom_qspi_runtime_suspend,
qcom_qspi_runtime_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(qcom_qspi_suspend, qcom_qspi_resume)
};
static const struct of_device_id qcom_qspi_dt_match[] = {
{ .compatible = "qcom,qspi-v1", },
{ }
};
MODULE_DEVICE_TABLE(of, qcom_qspi_dt_match);
static struct platform_driver qcom_qspi_driver = {
.driver = {
.name = "qcom_qspi",
.pm = &qcom_qspi_dev_pm_ops,
.of_match_table = qcom_qspi_dt_match,
},
.probe = qcom_qspi_probe,
.remove = qcom_qspi_remove,
};
module_platform_driver(qcom_qspi_driver);
MODULE_DESCRIPTION("SPI driver for QSPI cores");
MODULE_LICENSE("GPL v2");

View File

@ -159,7 +159,7 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
master->bus_num = 0;
master->num_chipselect = 3;
master->mode_bits = SPI_TX_DUAL;
master->bits_per_word_mask = BIT(7);
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->flags = SPI_MASTER_MUST_TX;
master->transfer_one = rb4xx_transfer_one;
master->set_cs = rb4xx_set_cs;

View File

@ -164,7 +164,6 @@ enum rockchip_ssi_type {
struct rockchip_spi_dma_data {
struct dma_chan *ch;
enum dma_transfer_direction direction;
dma_addr_t addr;
};
@ -202,12 +201,11 @@ struct rockchip_spi {
bool cs_asserted[ROCKCHIP_SPI_MAX_CS_NUM];
u32 use_dma;
bool use_dma;
struct sg_table tx_sg;
struct sg_table rx_sg;
struct rockchip_spi_dma_data dma_rx;
struct rockchip_spi_dma_data dma_tx;
struct dma_slave_caps dma_caps;
};
static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
@ -381,6 +379,8 @@ static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
{
int remain = 0;
spi_enable_chip(rs, 1);
do {
if (rs->tx) {
remain = rs->tx_end - rs->tx;
@ -455,19 +455,16 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
rxdesc = NULL;
if (rs->rx) {
rxconf.direction = rs->dma_rx.direction;
rxconf.direction = DMA_DEV_TO_MEM;
rxconf.src_addr = rs->dma_rx.addr;
rxconf.src_addr_width = rs->n_bytes;
if (rs->dma_caps.max_burst > 4)
rxconf.src_maxburst = 4;
else
rxconf.src_maxburst = 1;
rxconf.src_maxburst = 1;
dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
rxdesc = dmaengine_prep_slave_sg(
rs->dma_rx.ch,
rs->rx_sg.sgl, rs->rx_sg.nents,
rs->dma_rx.direction, DMA_PREP_INTERRUPT);
DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
if (!rxdesc)
return -EINVAL;
@ -477,19 +474,16 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
txdesc = NULL;
if (rs->tx) {
txconf.direction = rs->dma_tx.direction;
txconf.direction = DMA_MEM_TO_DEV;
txconf.dst_addr = rs->dma_tx.addr;
txconf.dst_addr_width = rs->n_bytes;
if (rs->dma_caps.max_burst > 4)
txconf.dst_maxburst = 4;
else
txconf.dst_maxburst = 1;
txconf.dst_maxburst = rs->fifo_len / 2;
dmaengine_slave_config(rs->dma_tx.ch, &txconf);
txdesc = dmaengine_prep_slave_sg(
rs->dma_tx.ch,
rs->tx_sg.sgl, rs->tx_sg.nents,
rs->dma_tx.direction, DMA_PREP_INTERRUPT);
DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
if (!txdesc) {
if (rxdesc)
dmaengine_terminate_sync(rs->dma_rx.ch);
@ -509,6 +503,8 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
dma_async_issue_pending(rs->dma_rx.ch);
}
spi_enable_chip(rs, 1);
if (txdesc) {
spin_lock_irqsave(&rs->lock, flags);
rs->state |= TXBUSY;
@ -517,7 +513,8 @@ static int rockchip_spi_prepare_dma(struct rockchip_spi *rs)
dma_async_issue_pending(rs->dma_tx.ch);
}
return 0;
/* 1 means the transfer is in progress */
return 1;
}
static void rockchip_spi_config(struct rockchip_spi *rs)
@ -581,7 +578,7 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_DMATDLR);
writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
@ -600,7 +597,6 @@ static int rockchip_spi_transfer_one(
struct spi_device *spi,
struct spi_transfer *xfer)
{
int ret = 0;
struct rockchip_spi *rs = spi_master_get_devdata(master);
WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@ -638,30 +634,16 @@ static int rockchip_spi_transfer_one(
/* we need prepare dma before spi was enabled */
if (master->can_dma && master->can_dma(master, spi, xfer))
rs->use_dma = 1;
rs->use_dma = true;
else
rs->use_dma = 0;
rs->use_dma = false;
rockchip_spi_config(rs);
if (rs->use_dma) {
if (rs->tmode == CR0_XFM_RO) {
/* rx: dma must be prepared first */
ret = rockchip_spi_prepare_dma(rs);
spi_enable_chip(rs, 1);
} else {
/* tx or tr: spi must be enabled first */
spi_enable_chip(rs, 1);
ret = rockchip_spi_prepare_dma(rs);
}
/* successful DMA prepare means the transfer is in progress */
ret = ret ? ret : 1;
} else {
spi_enable_chip(rs, 1);
ret = rockchip_spi_pio_transfer(rs);
}
if (rs->use_dma)
return rockchip_spi_prepare_dma(rs);
return ret;
return rockchip_spi_pio_transfer(rs);
}
static bool rockchip_spi_can_dma(struct spi_master *master,
@ -783,11 +765,8 @@ static int rockchip_spi_probe(struct platform_device *pdev)
}
if (rs->dma_tx.ch && rs->dma_rx.ch) {
dma_get_slave_caps(rs->dma_rx.ch, &(rs->dma_caps));
rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
rs->dma_tx.direction = DMA_MEM_TO_DEV;
rs->dma_rx.direction = DMA_DEV_TO_MEM;
master->can_dma = rockchip_spi_can_dma;
master->dma_tx = rs->dma_tx.ch;

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SH RSPI driver
*
@ -6,15 +7,6 @@
*
* Based on spi-sh.c:
* Copyright (C) 2011 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH HSPI bus driver
*
@ -7,15 +8,6 @@
* Based on pxa2xx_spi.c:
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
@ -316,6 +308,6 @@ static struct platform_driver hspi_driver = {
module_platform_driver(hspi_driver);
MODULE_DESCRIPTION("SuperH HSPI bus driver");
MODULE_LICENSE("GPL");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
MODULE_ALIAS("platform:sh-hspi");

View File

@ -1,14 +1,10 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH MSIOF SPI Master Interface
*
* Copyright (c) 2009 Magnus Damm
* Copyright (C) 2014 Renesas Electronics Corporation
* Copyright (C) 2014-2017 Glider bvba
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/bitmap.h>

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SH SPI bus driver
*
@ -5,15 +6,6 @@
*
* Based on pxa2xx_spi.c:
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
@ -522,6 +514,6 @@ static struct platform_driver spi_sh_driver = {
module_platform_driver(spi_sh_driver);
MODULE_DESCRIPTION("SH SPI bus driver");
MODULE_LICENSE("GPL");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Yoshihiro Shimoda");
MODULE_ALIAS("platform:sh_spi");

View File

@ -0,0 +1,554 @@
// SPDX-License-Identifier: GPL-2.0+
// Copyright (c) 2018 MediaTek Inc.
#include <linux/clk.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#define SPIS_IRQ_EN_REG 0x0
#define SPIS_IRQ_CLR_REG 0x4
#define SPIS_IRQ_ST_REG 0x8
#define SPIS_IRQ_MASK_REG 0xc
#define SPIS_CFG_REG 0x10
#define SPIS_RX_DATA_REG 0x14
#define SPIS_TX_DATA_REG 0x18
#define SPIS_RX_DST_REG 0x1c
#define SPIS_TX_SRC_REG 0x20
#define SPIS_DMA_CFG_REG 0x30
#define SPIS_SOFT_RST_REG 0x40
/* SPIS_IRQ_EN_REG */
#define DMA_DONE_EN BIT(7)
#define DATA_DONE_EN BIT(2)
#define RSTA_DONE_EN BIT(1)
#define CMD_INVALID_EN BIT(0)
/* SPIS_IRQ_ST_REG */
#define DMA_DONE_ST BIT(7)
#define DATA_DONE_ST BIT(2)
#define RSTA_DONE_ST BIT(1)
#define CMD_INVALID_ST BIT(0)
/* SPIS_IRQ_MASK_REG */
#define DMA_DONE_MASK BIT(7)
#define DATA_DONE_MASK BIT(2)
#define RSTA_DONE_MASK BIT(1)
#define CMD_INVALID_MASK BIT(0)
/* SPIS_CFG_REG */
#define SPIS_TX_ENDIAN BIT(7)
#define SPIS_RX_ENDIAN BIT(6)
#define SPIS_TXMSBF BIT(5)
#define SPIS_RXMSBF BIT(4)
#define SPIS_CPHA BIT(3)
#define SPIS_CPOL BIT(2)
#define SPIS_TX_EN BIT(1)
#define SPIS_RX_EN BIT(0)
/* SPIS_DMA_CFG_REG */
#define TX_DMA_TRIG_EN BIT(31)
#define TX_DMA_EN BIT(30)
#define RX_DMA_EN BIT(29)
#define TX_DMA_LEN 0xfffff
/* SPIS_SOFT_RST_REG */
#define SPIS_DMA_ADDR_EN BIT(1)
#define SPIS_SOFT_RST BIT(0)
#define MTK_SPI_SLAVE_MAX_FIFO_SIZE 512U
struct mtk_spi_slave {
struct device *dev;
void __iomem *base;
struct clk *spi_clk;
struct completion xfer_done;
struct spi_transfer *cur_transfer;
bool slave_aborted;
};
static const struct of_device_id mtk_spi_slave_of_match[] = {
{ .compatible = "mediatek,mt2712-spi-slave", },
{}
};
MODULE_DEVICE_TABLE(of, mtk_spi_slave_of_match);
static void mtk_spi_slave_disable_dma(struct mtk_spi_slave *mdata)
{
u32 reg_val;
reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
reg_val &= ~RX_DMA_EN;
reg_val &= ~TX_DMA_EN;
writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
}
static void mtk_spi_slave_disable_xfer(struct mtk_spi_slave *mdata)
{
u32 reg_val;
reg_val = readl(mdata->base + SPIS_CFG_REG);
reg_val &= ~SPIS_TX_EN;
reg_val &= ~SPIS_RX_EN;
writel(reg_val, mdata->base + SPIS_CFG_REG);
}
static int mtk_spi_slave_wait_for_completion(struct mtk_spi_slave *mdata)
{
if (wait_for_completion_interruptible(&mdata->xfer_done) ||
mdata->slave_aborted) {
dev_err(mdata->dev, "interrupted\n");
return -EINTR;
}
return 0;
}
static int mtk_spi_slave_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
struct spi_device *spi = msg->spi;
bool cpha, cpol;
u32 reg_val;
cpha = spi->mode & SPI_CPHA ? 1 : 0;
cpol = spi->mode & SPI_CPOL ? 1 : 0;
reg_val = readl(mdata->base + SPIS_CFG_REG);
if (cpha)
reg_val |= SPIS_CPHA;
else
reg_val &= ~SPIS_CPHA;
if (cpol)
reg_val |= SPIS_CPOL;
else
reg_val &= ~SPIS_CPOL;
if (spi->mode & SPI_LSB_FIRST)
reg_val &= ~(SPIS_TXMSBF | SPIS_RXMSBF);
else
reg_val |= SPIS_TXMSBF | SPIS_RXMSBF;
reg_val &= ~SPIS_TX_ENDIAN;
reg_val &= ~SPIS_RX_ENDIAN;
writel(reg_val, mdata->base + SPIS_CFG_REG);
return 0;
}
static int mtk_spi_slave_fifo_transfer(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
int reg_val, cnt, remainder, ret;
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
reg_val = readl(mdata->base + SPIS_CFG_REG);
if (xfer->rx_buf)
reg_val |= SPIS_RX_EN;
if (xfer->tx_buf)
reg_val |= SPIS_TX_EN;
writel(reg_val, mdata->base + SPIS_CFG_REG);
cnt = xfer->len / 4;
if (xfer->tx_buf)
iowrite32_rep(mdata->base + SPIS_TX_DATA_REG,
xfer->tx_buf, cnt);
remainder = xfer->len % 4;
if (xfer->tx_buf && remainder > 0) {
reg_val = 0;
memcpy(&reg_val, xfer->tx_buf + cnt * 4, remainder);
writel(reg_val, mdata->base + SPIS_TX_DATA_REG);
}
ret = mtk_spi_slave_wait_for_completion(mdata);
if (ret) {
mtk_spi_slave_disable_xfer(mdata);
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
}
return ret;
}
static int mtk_spi_slave_dma_transfer(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
struct device *dev = mdata->dev;
int reg_val, ret;
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
if (xfer->tx_buf) {
/* tx_buf is a const void* where we need a void * for
* the dma mapping
*/
void *nonconst_tx = (void *)xfer->tx_buf;
xfer->tx_dma = dma_map_single(dev, nonconst_tx,
xfer->len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, xfer->tx_dma)) {
ret = -ENOMEM;
goto disable_transfer;
}
}
if (xfer->rx_buf) {
xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
xfer->len, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, xfer->rx_dma)) {
ret = -ENOMEM;
goto unmap_txdma;
}
}
writel(xfer->tx_dma, mdata->base + SPIS_TX_SRC_REG);
writel(xfer->rx_dma, mdata->base + SPIS_RX_DST_REG);
writel(SPIS_DMA_ADDR_EN, mdata->base + SPIS_SOFT_RST_REG);
/* enable config reg tx rx_enable */
reg_val = readl(mdata->base + SPIS_CFG_REG);
if (xfer->tx_buf)
reg_val |= SPIS_TX_EN;
if (xfer->rx_buf)
reg_val |= SPIS_RX_EN;
writel(reg_val, mdata->base + SPIS_CFG_REG);
/* config dma */
reg_val = 0;
reg_val |= (xfer->len - 1) & TX_DMA_LEN;
writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
reg_val = readl(mdata->base + SPIS_DMA_CFG_REG);
if (xfer->tx_buf)
reg_val |= TX_DMA_EN;
if (xfer->rx_buf)
reg_val |= RX_DMA_EN;
reg_val |= TX_DMA_TRIG_EN;
writel(reg_val, mdata->base + SPIS_DMA_CFG_REG);
ret = mtk_spi_slave_wait_for_completion(mdata);
if (ret)
goto unmap_rxdma;
return 0;
unmap_rxdma:
if (xfer->rx_buf)
dma_unmap_single(dev, xfer->rx_dma,
xfer->len, DMA_FROM_DEVICE);
unmap_txdma:
if (xfer->tx_buf)
dma_unmap_single(dev, xfer->tx_dma,
xfer->len, DMA_TO_DEVICE);
disable_transfer:
mtk_spi_slave_disable_dma(mdata);
mtk_spi_slave_disable_xfer(mdata);
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
return ret;
}
static int mtk_spi_slave_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
reinit_completion(&mdata->xfer_done);
mdata->slave_aborted = false;
mdata->cur_transfer = xfer;
if (xfer->len > MTK_SPI_SLAVE_MAX_FIFO_SIZE)
return mtk_spi_slave_dma_transfer(ctlr, spi, xfer);
else
return mtk_spi_slave_fifo_transfer(ctlr, spi, xfer);
}
static int mtk_spi_slave_setup(struct spi_device *spi)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(spi->master);
u32 reg_val;
reg_val = DMA_DONE_EN | DATA_DONE_EN |
RSTA_DONE_EN | CMD_INVALID_EN;
writel(reg_val, mdata->base + SPIS_IRQ_EN_REG);
reg_val = DMA_DONE_MASK | DATA_DONE_MASK |
RSTA_DONE_MASK | CMD_INVALID_MASK;
writel(reg_val, mdata->base + SPIS_IRQ_MASK_REG);
mtk_spi_slave_disable_dma(mdata);
mtk_spi_slave_disable_xfer(mdata);
return 0;
}
static int mtk_slave_abort(struct spi_controller *ctlr)
{
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
mdata->slave_aborted = true;
complete(&mdata->xfer_done);
return 0;
}
static irqreturn_t mtk_spi_slave_interrupt(int irq, void *dev_id)
{
struct spi_controller *ctlr = dev_id;
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
struct spi_transfer *trans = mdata->cur_transfer;
u32 int_status, reg_val, cnt, remainder;
int_status = readl(mdata->base + SPIS_IRQ_ST_REG);
writel(int_status, mdata->base + SPIS_IRQ_CLR_REG);
if (!trans)
return IRQ_NONE;
if ((int_status & DMA_DONE_ST) &&
((int_status & DATA_DONE_ST) ||
(int_status & RSTA_DONE_ST))) {
writel(SPIS_SOFT_RST, mdata->base + SPIS_SOFT_RST_REG);
if (trans->tx_buf)
dma_unmap_single(mdata->dev, trans->tx_dma,
trans->len, DMA_TO_DEVICE);
if (trans->rx_buf)
dma_unmap_single(mdata->dev, trans->rx_dma,
trans->len, DMA_FROM_DEVICE);
mtk_spi_slave_disable_dma(mdata);
mtk_spi_slave_disable_xfer(mdata);
}
if ((!(int_status & DMA_DONE_ST)) &&
((int_status & DATA_DONE_ST) ||
(int_status & RSTA_DONE_ST))) {
cnt = trans->len / 4;
if (trans->rx_buf)
ioread32_rep(mdata->base + SPIS_RX_DATA_REG,
trans->rx_buf, cnt);
remainder = trans->len % 4;
if (trans->rx_buf && remainder > 0) {
reg_val = readl(mdata->base + SPIS_RX_DATA_REG);
memcpy(trans->rx_buf + (cnt * 4),
&reg_val, remainder);
}
mtk_spi_slave_disable_xfer(mdata);
}
if (int_status & CMD_INVALID_ST) {
dev_warn(&ctlr->dev, "cmd invalid\n");
return IRQ_NONE;
}
mdata->cur_transfer = NULL;
complete(&mdata->xfer_done);
return IRQ_HANDLED;
}
static int mtk_spi_slave_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct mtk_spi_slave *mdata;
struct resource *res;
int irq, ret;
ctlr = spi_alloc_slave(&pdev->dev, sizeof(*mdata));
if (!ctlr) {
dev_err(&pdev->dev, "failed to alloc spi slave\n");
return -ENOMEM;
}
ctlr->auto_runtime_pm = true;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
ctlr->mode_bits |= SPI_LSB_FIRST;
ctlr->prepare_message = mtk_spi_slave_prepare_message;
ctlr->transfer_one = mtk_spi_slave_transfer_one;
ctlr->setup = mtk_spi_slave_setup;
ctlr->slave_abort = mtk_slave_abort;
mdata = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, ctlr);
init_completion(&mdata->xfer_done);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -ENODEV;
dev_err(&pdev->dev, "failed to determine base address\n");
goto err_put_ctlr;
}
mdata->dev = &pdev->dev;
mdata->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(mdata->base)) {
ret = PTR_ERR(mdata->base);
goto err_put_ctlr;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
ret = irq;
goto err_put_ctlr;
}
ret = devm_request_irq(&pdev->dev, irq, mtk_spi_slave_interrupt,
IRQF_TRIGGER_NONE, dev_name(&pdev->dev), ctlr);
if (ret) {
dev_err(&pdev->dev, "failed to register irq (%d)\n", ret);
goto err_put_ctlr;
}
mdata->spi_clk = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(mdata->spi_clk)) {
ret = PTR_ERR(mdata->spi_clk);
dev_err(&pdev->dev, "failed to get spi-clk: %d\n", ret);
goto err_put_ctlr;
}
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(&pdev->dev, "failed to enable spi_clk (%d)\n", ret);
goto err_put_ctlr;
}
pm_runtime_enable(&pdev->dev);
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret) {
dev_err(&pdev->dev,
"failed to register slave controller(%d)\n", ret);
clk_disable_unprepare(mdata->spi_clk);
goto err_disable_runtime_pm;
}
clk_disable_unprepare(mdata->spi_clk);
return 0;
err_disable_runtime_pm:
pm_runtime_disable(&pdev->dev);
err_put_ctlr:
spi_controller_put(ctlr);
return ret;
}
static int mtk_spi_slave_remove(struct platform_device *pdev)
{
pm_runtime_disable(&pdev->dev);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int mtk_spi_slave_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
int ret;
ret = spi_controller_suspend(ctlr);
if (ret)
return ret;
if (!pm_runtime_suspended(dev))
clk_disable_unprepare(mdata->spi_clk);
return ret;
}
static int mtk_spi_slave_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
int ret;
if (!pm_runtime_suspended(dev)) {
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
return ret;
}
}
ret = spi_controller_resume(ctlr);
if (ret < 0)
clk_disable_unprepare(mdata->spi_clk);
return ret;
}
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM
static int mtk_spi_slave_runtime_suspend(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
clk_disable_unprepare(mdata->spi_clk);
return 0;
}
static int mtk_spi_slave_runtime_resume(struct device *dev)
{
struct spi_controller *ctlr = dev_get_drvdata(dev);
struct mtk_spi_slave *mdata = spi_controller_get_devdata(ctlr);
int ret;
ret = clk_prepare_enable(mdata->spi_clk);
if (ret < 0) {
dev_err(dev, "failed to enable spi_clk (%d)\n", ret);
return ret;
}
return 0;
}
#endif /* CONFIG_PM */
static const struct dev_pm_ops mtk_spi_slave_pm = {
SET_SYSTEM_SLEEP_PM_OPS(mtk_spi_slave_suspend, mtk_spi_slave_resume)
SET_RUNTIME_PM_OPS(mtk_spi_slave_runtime_suspend,
mtk_spi_slave_runtime_resume, NULL)
};
static struct platform_driver mtk_spi_slave_driver = {
.driver = {
.name = "mtk-spi-slave",
.pm = &mtk_spi_slave_pm,
.of_match_table = mtk_spi_slave_of_match,
},
.probe = mtk_spi_slave_probe,
.remove = mtk_spi_slave_remove,
};
module_platform_driver(mtk_spi_slave_driver);
MODULE_DESCRIPTION("MTK SPI Slave Controller driver");
MODULE_AUTHOR("Leilk Liu <leilk.liu@mediatek.com>");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:mtk-spi-slave");

View File

@ -60,6 +60,7 @@ static void spi_slave_system_control_complete(void *arg)
case CMD_REBOOT:
dev_info(&priv->spi->dev, "Rebooting system...\n");
kernel_restart(NULL);
break;
case CMD_POWEROFF:
dev_info(&priv->spi->dev, "Powering off system...\n");

745
drivers/spi/spi-sprd.c Normal file
View File

@ -0,0 +1,745 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Spreadtrum Communications Inc.
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#define SPRD_SPI_TXD 0x0
#define SPRD_SPI_CLKD 0x4
#define SPRD_SPI_CTL0 0x8
#define SPRD_SPI_CTL1 0xc
#define SPRD_SPI_CTL2 0x10
#define SPRD_SPI_CTL3 0x14
#define SPRD_SPI_CTL4 0x18
#define SPRD_SPI_CTL5 0x1c
#define SPRD_SPI_INT_EN 0x20
#define SPRD_SPI_INT_CLR 0x24
#define SPRD_SPI_INT_RAW_STS 0x28
#define SPRD_SPI_INT_MASK_STS 0x2c
#define SPRD_SPI_STS1 0x30
#define SPRD_SPI_STS2 0x34
#define SPRD_SPI_DSP_WAIT 0x38
#define SPRD_SPI_STS3 0x3c
#define SPRD_SPI_CTL6 0x40
#define SPRD_SPI_STS4 0x44
#define SPRD_SPI_FIFO_RST 0x48
#define SPRD_SPI_CTL7 0x4c
#define SPRD_SPI_STS5 0x50
#define SPRD_SPI_CTL8 0x54
#define SPRD_SPI_CTL9 0x58
#define SPRD_SPI_CTL10 0x5c
#define SPRD_SPI_CTL11 0x60
#define SPRD_SPI_CTL12 0x64
#define SPRD_SPI_STS6 0x68
#define SPRD_SPI_STS7 0x6c
#define SPRD_SPI_STS8 0x70
#define SPRD_SPI_STS9 0x74
/* Bits & mask definition for register CTL0 */
#define SPRD_SPI_SCK_REV BIT(13)
#define SPRD_SPI_NG_TX BIT(1)
#define SPRD_SPI_NG_RX BIT(0)
#define SPRD_SPI_CHNL_LEN_MASK GENMASK(4, 0)
#define SPRD_SPI_CSN_MASK GENMASK(11, 8)
#define SPRD_SPI_CS0_VALID BIT(8)
/* Bits & mask definition for register SPI_INT_EN */
#define SPRD_SPI_TX_END_INT_EN BIT(8)
#define SPRD_SPI_RX_END_INT_EN BIT(9)
/* Bits & mask definition for register SPI_INT_RAW_STS */
#define SPRD_SPI_TX_END_RAW BIT(8)
#define SPRD_SPI_RX_END_RAW BIT(9)
/* Bits & mask definition for register SPI_INT_CLR */
#define SPRD_SPI_TX_END_CLR BIT(8)
#define SPRD_SPI_RX_END_CLR BIT(9)
/* Bits & mask definition for register INT_MASK_STS */
#define SPRD_SPI_MASK_RX_END BIT(9)
#define SPRD_SPI_MASK_TX_END BIT(8)
/* Bits & mask definition for register STS2 */
#define SPRD_SPI_TX_BUSY BIT(8)
/* Bits & mask definition for register CTL1 */
#define SPRD_SPI_RX_MODE BIT(12)
#define SPRD_SPI_TX_MODE BIT(13)
#define SPRD_SPI_RTX_MD_MASK GENMASK(13, 12)
/* Bits & mask definition for register CTL2 */
#define SPRD_SPI_DMA_EN BIT(6)
/* Bits & mask definition for register CTL4 */
#define SPRD_SPI_START_RX BIT(9)
#define SPRD_SPI_ONLY_RECV_MASK GENMASK(8, 0)
/* Bits & mask definition for register SPI_INT_CLR */
#define SPRD_SPI_RX_END_INT_CLR BIT(9)
#define SPRD_SPI_TX_END_INT_CLR BIT(8)
/* Bits & mask definition for register SPI_INT_RAW */
#define SPRD_SPI_RX_END_IRQ BIT(9)
#define SPRD_SPI_TX_END_IRQ BIT(8)
/* Bits & mask definition for register CTL12 */
#define SPRD_SPI_SW_RX_REQ BIT(0)
#define SPRD_SPI_SW_TX_REQ BIT(1)
/* Bits & mask definition for register CTL7 */
#define SPRD_SPI_DATA_LINE2_EN BIT(15)
#define SPRD_SPI_MODE_MASK GENMASK(5, 3)
#define SPRD_SPI_MODE_OFFSET 3
#define SPRD_SPI_3WIRE_MODE 4
#define SPRD_SPI_4WIRE_MODE 0
/* Bits & mask definition for register CTL8 */
#define SPRD_SPI_TX_MAX_LEN_MASK GENMASK(19, 0)
#define SPRD_SPI_TX_LEN_H_MASK GENMASK(3, 0)
#define SPRD_SPI_TX_LEN_H_OFFSET 16
/* Bits & mask definition for register CTL9 */
#define SPRD_SPI_TX_LEN_L_MASK GENMASK(15, 0)
/* Bits & mask definition for register CTL10 */
#define SPRD_SPI_RX_MAX_LEN_MASK GENMASK(19, 0)
#define SPRD_SPI_RX_LEN_H_MASK GENMASK(3, 0)
#define SPRD_SPI_RX_LEN_H_OFFSET 16
/* Bits & mask definition for register CTL11 */
#define SPRD_SPI_RX_LEN_L_MASK GENMASK(15, 0)
/* Default & maximum word delay cycles */
#define SPRD_SPI_MIN_DELAY_CYCLE 14
#define SPRD_SPI_MAX_DELAY_CYCLE 130
#define SPRD_SPI_FIFO_SIZE 32
#define SPRD_SPI_CHIP_CS_NUM 0x4
#define SPRD_SPI_CHNL_LEN 2
#define SPRD_SPI_DEFAULT_SOURCE 26000000
#define SPRD_SPI_MAX_SPEED_HZ 48000000
#define SPRD_SPI_AUTOSUSPEND_DELAY 100
struct sprd_spi {
void __iomem *base;
struct device *dev;
struct clk *clk;
u32 src_clk;
u32 hw_mode;
u32 trans_len;
u32 trans_mode;
u32 word_delay;
u32 hw_speed_hz;
u32 len;
int status;
const void *tx_buf;
void *rx_buf;
int (*read_bufs)(struct sprd_spi *ss, u32 len);
int (*write_bufs)(struct sprd_spi *ss, u32 len);
};
static u32 sprd_spi_transfer_max_timeout(struct sprd_spi *ss,
struct spi_transfer *t)
{
/*
* The time spent on transmission of the full FIFO data is the maximum
* SPI transmission time.
*/
u32 size = t->bits_per_word * SPRD_SPI_FIFO_SIZE;
u32 bit_time_us = DIV_ROUND_UP(USEC_PER_SEC, ss->hw_speed_hz);
u32 total_time_us = size * bit_time_us;
/*
* There is an interval between data and the data in our SPI hardware,
* so the total transmission time need add the interval time.
*/
u32 interval_cycle = SPRD_SPI_FIFO_SIZE * ss->word_delay;
u32 interval_time_us = DIV_ROUND_UP(interval_cycle * USEC_PER_SEC,
ss->src_clk);
return total_time_us + interval_time_us;
}
static int sprd_spi_wait_for_tx_end(struct sprd_spi *ss, struct spi_transfer *t)
{
u32 val, us;
int ret;
us = sprd_spi_transfer_max_timeout(ss, t);
ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
val & SPRD_SPI_TX_END_IRQ, 0, us);
if (ret) {
dev_err(ss->dev, "SPI error, spi send timeout!\n");
return ret;
}
ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_STS2, val,
!(val & SPRD_SPI_TX_BUSY), 0, us);
if (ret) {
dev_err(ss->dev, "SPI error, spi busy timeout!\n");
return ret;
}
writel_relaxed(SPRD_SPI_TX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
return 0;
}
static int sprd_spi_wait_for_rx_end(struct sprd_spi *ss, struct spi_transfer *t)
{
u32 val, us;
int ret;
us = sprd_spi_transfer_max_timeout(ss, t);
ret = readl_relaxed_poll_timeout(ss->base + SPRD_SPI_INT_RAW_STS, val,
val & SPRD_SPI_RX_END_IRQ, 0, us);
if (ret) {
dev_err(ss->dev, "SPI error, spi rx timeout!\n");
return ret;
}
writel_relaxed(SPRD_SPI_RX_END_INT_CLR, ss->base + SPRD_SPI_INT_CLR);
return 0;
}
static void sprd_spi_tx_req(struct sprd_spi *ss)
{
writel_relaxed(SPRD_SPI_SW_TX_REQ, ss->base + SPRD_SPI_CTL12);
}
static void sprd_spi_rx_req(struct sprd_spi *ss)
{
writel_relaxed(SPRD_SPI_SW_RX_REQ, ss->base + SPRD_SPI_CTL12);
}
static void sprd_spi_enter_idle(struct sprd_spi *ss)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
val &= ~SPRD_SPI_RTX_MD_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL1);
}
static void sprd_spi_set_transfer_bits(struct sprd_spi *ss, u32 bits)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
/* Set the valid bits for every transaction */
val &= ~(SPRD_SPI_CHNL_LEN_MASK << SPRD_SPI_CHNL_LEN);
val |= bits << SPRD_SPI_CHNL_LEN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
}
static void sprd_spi_set_tx_length(struct sprd_spi *ss, u32 length)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL8);
length &= SPRD_SPI_TX_MAX_LEN_MASK;
val &= ~SPRD_SPI_TX_LEN_H_MASK;
val |= length >> SPRD_SPI_TX_LEN_H_OFFSET;
writel_relaxed(val, ss->base + SPRD_SPI_CTL8);
val = length & SPRD_SPI_TX_LEN_L_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL9);
}
static void sprd_spi_set_rx_length(struct sprd_spi *ss, u32 length)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL10);
length &= SPRD_SPI_RX_MAX_LEN_MASK;
val &= ~SPRD_SPI_RX_LEN_H_MASK;
val |= length >> SPRD_SPI_RX_LEN_H_OFFSET;
writel_relaxed(val, ss->base + SPRD_SPI_CTL10);
val = length & SPRD_SPI_RX_LEN_L_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL11);
}
static void sprd_spi_chipselect(struct spi_device *sdev, bool cs)
{
struct spi_controller *sctlr = sdev->controller;
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
u32 val;
val = readl_relaxed(ss->base + SPRD_SPI_CTL0);
/* The SPI controller will pull down CS pin if cs is 0 */
if (!cs) {
val &= ~SPRD_SPI_CS0_VALID;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
} else {
val |= SPRD_SPI_CSN_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
}
}
static int sprd_spi_write_only_receive(struct sprd_spi *ss, u32 len)
{
u32 val;
/* Clear the start receive bit and reset receive data number */
val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
val &= ~(SPRD_SPI_START_RX | SPRD_SPI_ONLY_RECV_MASK);
writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
/* Set the receive data length */
val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
val |= len & SPRD_SPI_ONLY_RECV_MASK;
writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
/* Trigger to receive data */
val = readl_relaxed(ss->base + SPRD_SPI_CTL4);
val |= SPRD_SPI_START_RX;
writel_relaxed(val, ss->base + SPRD_SPI_CTL4);
return len;
}
static int sprd_spi_write_bufs_u8(struct sprd_spi *ss, u32 len)
{
u8 *tx_p = (u8 *)ss->tx_buf;
int i;
for (i = 0; i < len; i++)
writeb_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
ss->tx_buf += i;
return i;
}
static int sprd_spi_write_bufs_u16(struct sprd_spi *ss, u32 len)
{
u16 *tx_p = (u16 *)ss->tx_buf;
int i;
for (i = 0; i < len; i++)
writew_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
ss->tx_buf += i << 1;
return i << 1;
}
static int sprd_spi_write_bufs_u32(struct sprd_spi *ss, u32 len)
{
u32 *tx_p = (u32 *)ss->tx_buf;
int i;
for (i = 0; i < len; i++)
writel_relaxed(tx_p[i], ss->base + SPRD_SPI_TXD);
ss->tx_buf += i << 2;
return i << 2;
}
static int sprd_spi_read_bufs_u8(struct sprd_spi *ss, u32 len)
{
u8 *rx_p = (u8 *)ss->rx_buf;
int i;
for (i = 0; i < len; i++)
rx_p[i] = readb_relaxed(ss->base + SPRD_SPI_TXD);
ss->rx_buf += i;
return i;
}
static int sprd_spi_read_bufs_u16(struct sprd_spi *ss, u32 len)
{
u16 *rx_p = (u16 *)ss->rx_buf;
int i;
for (i = 0; i < len; i++)
rx_p[i] = readw_relaxed(ss->base + SPRD_SPI_TXD);
ss->rx_buf += i << 1;
return i << 1;
}
static int sprd_spi_read_bufs_u32(struct sprd_spi *ss, u32 len)
{
u32 *rx_p = (u32 *)ss->rx_buf;
int i;
for (i = 0; i < len; i++)
rx_p[i] = readl_relaxed(ss->base + SPRD_SPI_TXD);
ss->rx_buf += i << 2;
return i << 2;
}
static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u32 trans_len = ss->trans_len, len;
int ret, write_size = 0;
while (trans_len) {
len = trans_len > SPRD_SPI_FIFO_SIZE ? SPRD_SPI_FIFO_SIZE :
trans_len;
if (ss->trans_mode & SPRD_SPI_TX_MODE) {
sprd_spi_set_tx_length(ss, len);
write_size += ss->write_bufs(ss, len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to transfer.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_tx_req(ss);
ret = sprd_spi_wait_for_tx_end(ss, t);
} else {
sprd_spi_set_rx_length(ss, len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to read.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_rx_req(ss);
else
write_size += ss->write_bufs(ss, len);
ret = sprd_spi_wait_for_rx_end(ss, t);
}
if (ret)
goto complete;
if (ss->trans_mode & SPRD_SPI_RX_MODE)
ss->read_bufs(ss, len);
trans_len -= len;
}
ret = write_size;
complete:
sprd_spi_enter_idle(ss);
return ret;
}
static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
{
/*
* From SPI datasheet, the prescale calculation formula:
* prescale = SPI source clock / (2 * SPI_freq) - 1;
*/
u32 clk_div = DIV_ROUND_UP(ss->src_clk, speed_hz << 1) - 1;
/* Save the real hardware speed */
ss->hw_speed_hz = (ss->src_clk >> 1) / (clk_div + 1);
writel_relaxed(clk_div, ss->base + SPRD_SPI_CLKD);
}
static void sprd_spi_init_hw(struct sprd_spi *ss, struct spi_transfer *t)
{
u16 word_delay, interval;
u32 val;
val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
val &= ~(SPRD_SPI_SCK_REV | SPRD_SPI_NG_TX | SPRD_SPI_NG_RX);
/* Set default chip selection, clock phase and clock polarity */
val |= ss->hw_mode & SPI_CPHA ? SPRD_SPI_NG_RX : SPRD_SPI_NG_TX;
val |= ss->hw_mode & SPI_CPOL ? SPRD_SPI_SCK_REV : 0;
writel_relaxed(val, ss->base + SPRD_SPI_CTL0);
/*
* Set the intervals of two SPI frames, and the inteval calculation
* formula as below per datasheet:
* interval time (source clock cycles) = interval * 4 + 10.
*/
word_delay = clamp_t(u16, t->word_delay, SPRD_SPI_MIN_DELAY_CYCLE,
SPRD_SPI_MAX_DELAY_CYCLE);
interval = DIV_ROUND_UP(word_delay - 10, 4);
ss->word_delay = interval * 4 + 10;
writel_relaxed(interval, ss->base + SPRD_SPI_CTL5);
/* Reset SPI fifo */
writel_relaxed(1, ss->base + SPRD_SPI_FIFO_RST);
writel_relaxed(0, ss->base + SPRD_SPI_FIFO_RST);
/* Set SPI work mode */
val = readl_relaxed(ss->base + SPRD_SPI_CTL7);
val &= ~SPRD_SPI_MODE_MASK;
if (ss->hw_mode & SPI_3WIRE)
val |= SPRD_SPI_3WIRE_MODE << SPRD_SPI_MODE_OFFSET;
else
val |= SPRD_SPI_4WIRE_MODE << SPRD_SPI_MODE_OFFSET;
if (ss->hw_mode & SPI_TX_DUAL)
val |= SPRD_SPI_DATA_LINE2_EN;
else
val &= ~SPRD_SPI_DATA_LINE2_EN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL7);
}
static int sprd_spi_setup_transfer(struct spi_device *sdev,
struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sdev->controller);
u8 bits_per_word = t->bits_per_word;
u32 val, mode = 0;
ss->len = t->len;
ss->tx_buf = t->tx_buf;
ss->rx_buf = t->rx_buf;
ss->hw_mode = sdev->mode;
sprd_spi_init_hw(ss, t);
/* Set tansfer speed and valid bits */
sprd_spi_set_speed(ss, t->speed_hz);
sprd_spi_set_transfer_bits(ss, bits_per_word);
if (bits_per_word > 16)
bits_per_word = round_up(bits_per_word, 16);
else
bits_per_word = round_up(bits_per_word, 8);
switch (bits_per_word) {
case 8:
ss->trans_len = t->len;
ss->read_bufs = sprd_spi_read_bufs_u8;
ss->write_bufs = sprd_spi_write_bufs_u8;
break;
case 16:
ss->trans_len = t->len >> 1;
ss->read_bufs = sprd_spi_read_bufs_u16;
ss->write_bufs = sprd_spi_write_bufs_u16;
break;
case 32:
ss->trans_len = t->len >> 2;
ss->read_bufs = sprd_spi_read_bufs_u32;
ss->write_bufs = sprd_spi_write_bufs_u32;
break;
default:
return -EINVAL;
}
/* Set transfer read or write mode */
val = readl_relaxed(ss->base + SPRD_SPI_CTL1);
val &= ~SPRD_SPI_RTX_MD_MASK;
if (t->tx_buf)
mode |= SPRD_SPI_TX_MODE;
if (t->rx_buf)
mode |= SPRD_SPI_RX_MODE;
writel_relaxed(val | mode, ss->base + SPRD_SPI_CTL1);
ss->trans_mode = mode;
/*
* If in only receive mode, we need to trigger the SPI controller to
* receive data automatically.
*/
if (ss->trans_mode == SPRD_SPI_RX_MODE)
ss->write_bufs = sprd_spi_write_only_receive;
return 0;
}
static int sprd_spi_transfer_one(struct spi_controller *sctlr,
struct spi_device *sdev,
struct spi_transfer *t)
{
int ret;
ret = sprd_spi_setup_transfer(sdev, t);
if (ret)
goto setup_err;
ret = sprd_spi_txrx_bufs(sdev, t);
if (ret == t->len)
ret = 0;
else if (ret >= 0)
ret = -EREMOTEIO;
setup_err:
spi_finalize_current_transfer(sctlr);
return ret;
}
static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
{
struct clk *clk_spi, *clk_parent;
clk_spi = devm_clk_get(&pdev->dev, "spi");
if (IS_ERR(clk_spi)) {
dev_warn(&pdev->dev, "can't get the spi clock\n");
clk_spi = NULL;
}
clk_parent = devm_clk_get(&pdev->dev, "source");
if (IS_ERR(clk_parent)) {
dev_warn(&pdev->dev, "can't get the source clock\n");
clk_parent = NULL;
}
ss->clk = devm_clk_get(&pdev->dev, "enable");
if (IS_ERR(ss->clk)) {
dev_err(&pdev->dev, "can't get the enable clock\n");
return PTR_ERR(ss->clk);
}
if (!clk_set_parent(clk_spi, clk_parent))
ss->src_clk = clk_get_rate(clk_spi);
else
ss->src_clk = SPRD_SPI_DEFAULT_SOURCE;
return 0;
}
static int sprd_spi_probe(struct platform_device *pdev)
{
struct spi_controller *sctlr;
struct resource *res;
struct sprd_spi *ss;
int ret;
pdev->id = of_alias_get_id(pdev->dev.of_node, "spi");
sctlr = spi_alloc_master(&pdev->dev, sizeof(*ss));
if (!sctlr)
return -ENOMEM;
ss = spi_controller_get_devdata(sctlr);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
ss->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(ss->base)) {
ret = PTR_ERR(ss->base);
goto free_controller;
}
ss->dev = &pdev->dev;
sctlr->dev.of_node = pdev->dev.of_node;
sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
sctlr->bus_num = pdev->id;
sctlr->set_cs = sprd_spi_chipselect;
sctlr->transfer_one = sprd_spi_transfer_one;
sctlr->auto_runtime_pm = true;
sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
SPRD_SPI_MAX_SPEED_HZ);
platform_set_drvdata(pdev, sctlr);
ret = sprd_spi_clk_init(pdev, ss);
if (ret)
goto free_controller;
ret = clk_prepare_enable(ss->clk);
if (ret)
goto free_controller;
ret = pm_runtime_set_active(&pdev->dev);
if (ret < 0)
goto disable_clk;
pm_runtime_set_autosuspend_delay(&pdev->dev,
SPRD_SPI_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0) {
dev_err(&pdev->dev, "failed to resume SPI controller\n");
goto err_rpm_put;
}
ret = devm_spi_register_controller(&pdev->dev, sctlr);
if (ret)
goto err_rpm_put;
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
return 0;
err_rpm_put:
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
disable_clk:
clk_disable_unprepare(ss->clk);
free_controller:
spi_controller_put(sctlr);
return ret;
}
static int sprd_spi_remove(struct platform_device *pdev)
{
struct spi_controller *sctlr = platform_get_drvdata(pdev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
int ret;
ret = pm_runtime_get_sync(ss->dev);
if (ret < 0) {
dev_err(ss->dev, "failed to resume SPI controller\n");
return ret;
}
clk_disable_unprepare(ss->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
}
static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
{
struct spi_controller *sctlr = dev_get_drvdata(dev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
clk_disable_unprepare(ss->clk);
return 0;
}
static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
{
struct spi_controller *sctlr = dev_get_drvdata(dev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
int ret;
ret = clk_prepare_enable(ss->clk);
if (ret)
return ret;
return 0;
}
static const struct dev_pm_ops sprd_spi_pm_ops = {
SET_RUNTIME_PM_OPS(sprd_spi_runtime_suspend,
sprd_spi_runtime_resume, NULL)
};
static const struct of_device_id sprd_spi_of_match[] = {
{ .compatible = "sprd,sc9860-spi", },
{ /* sentinel */ }
};
static struct platform_driver sprd_spi_driver = {
.driver = {
.name = "sprd-spi",
.of_match_table = sprd_spi_of_match,
.pm = &sprd_spi_pm_ops,
},
.probe = sprd_spi_probe,
.remove = sprd_spi_remove,
};
module_platform_driver(sprd_spi_driver);
MODULE_DESCRIPTION("Spreadtrum SPI Controller driver");
MODULE_AUTHOR("Lanqing Liu <lanqing.liu@spreadtrum.com>");
MODULE_LICENSE("GPL v2");

View File

@ -0,0 +1,512 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Ludovic Barre <ludovic.barre@st.com> for STMicroelectronics.
*/
#include <linux/bitfield.h>
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/spi/spi-mem.h>
#define QSPI_CR 0x00
#define CR_EN BIT(0)
#define CR_ABORT BIT(1)
#define CR_DMAEN BIT(2)
#define CR_TCEN BIT(3)
#define CR_SSHIFT BIT(4)
#define CR_DFM BIT(6)
#define CR_FSEL BIT(7)
#define CR_FTHRES_MASK GENMASK(12, 8)
#define CR_TEIE BIT(16)
#define CR_TCIE BIT(17)
#define CR_FTIE BIT(18)
#define CR_SMIE BIT(19)
#define CR_TOIE BIT(20)
#define CR_PRESC_MASK GENMASK(31, 24)
#define QSPI_DCR 0x04
#define DCR_FSIZE_MASK GENMASK(20, 16)
#define QSPI_SR 0x08
#define SR_TEF BIT(0)
#define SR_TCF BIT(1)
#define SR_FTF BIT(2)
#define SR_SMF BIT(3)
#define SR_TOF BIT(4)
#define SR_BUSY BIT(5)
#define SR_FLEVEL_MASK GENMASK(13, 8)
#define QSPI_FCR 0x0c
#define FCR_CTEF BIT(0)
#define FCR_CTCF BIT(1)
#define QSPI_DLR 0x10
#define QSPI_CCR 0x14
#define CCR_INST_MASK GENMASK(7, 0)
#define CCR_IMODE_MASK GENMASK(9, 8)
#define CCR_ADMODE_MASK GENMASK(11, 10)
#define CCR_ADSIZE_MASK GENMASK(13, 12)
#define CCR_DCYC_MASK GENMASK(22, 18)
#define CCR_DMODE_MASK GENMASK(25, 24)
#define CCR_FMODE_MASK GENMASK(27, 26)
#define CCR_FMODE_INDW (0U << 26)
#define CCR_FMODE_INDR (1U << 26)
#define CCR_FMODE_APM (2U << 26)
#define CCR_FMODE_MM (3U << 26)
#define CCR_BUSWIDTH_0 0x0
#define CCR_BUSWIDTH_1 0x1
#define CCR_BUSWIDTH_2 0x2
#define CCR_BUSWIDTH_4 0x3
#define QSPI_AR 0x18
#define QSPI_ABR 0x1c
#define QSPI_DR 0x20
#define QSPI_PSMKR 0x24
#define QSPI_PSMAR 0x28
#define QSPI_PIR 0x2c
#define QSPI_LPTR 0x30
#define LPTR_DFT_TIMEOUT 0x10
#define STM32_QSPI_MAX_MMAP_SZ SZ_256M
#define STM32_QSPI_MAX_NORCHIP 2
#define STM32_FIFO_TIMEOUT_US 30000
#define STM32_BUSY_TIMEOUT_US 100000
#define STM32_ABT_TIMEOUT_US 100000
struct stm32_qspi_flash {
struct stm32_qspi *qspi;
u32 cs;
u32 presc;
};
struct stm32_qspi {
struct device *dev;
void __iomem *io_base;
void __iomem *mm_base;
resource_size_t mm_size;
struct clk *clk;
u32 clk_rate;
struct stm32_qspi_flash flash[STM32_QSPI_MAX_NORCHIP];
struct completion data_completion;
u32 fmode;
/*
* to protect device configuration, could be different between
* 2 flash access (bk1, bk2)
*/
struct mutex lock;
};
static irqreturn_t stm32_qspi_irq(int irq, void *dev_id)
{
struct stm32_qspi *qspi = (struct stm32_qspi *)dev_id;
u32 cr, sr;
sr = readl_relaxed(qspi->io_base + QSPI_SR);
if (sr & (SR_TEF | SR_TCF)) {
/* disable irq */
cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_TCIE & ~CR_TEIE;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
complete(&qspi->data_completion);
}
return IRQ_HANDLED;
}
static void stm32_qspi_read_fifo(u8 *val, void __iomem *addr)
{
*val = readb_relaxed(addr);
}
static void stm32_qspi_write_fifo(u8 *val, void __iomem *addr)
{
writeb_relaxed(*val, addr);
}
static int stm32_qspi_tx_poll(struct stm32_qspi *qspi,
const struct spi_mem_op *op)
{
void (*tx_fifo)(u8 *val, void __iomem *addr);
u32 len = op->data.nbytes, sr;
u8 *buf;
int ret;
if (op->data.dir == SPI_MEM_DATA_IN) {
tx_fifo = stm32_qspi_read_fifo;
buf = op->data.buf.in;
} else {
tx_fifo = stm32_qspi_write_fifo;
buf = (u8 *)op->data.buf.out;
}
while (len--) {
ret = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR,
sr, (sr & SR_FTF), 1,
STM32_FIFO_TIMEOUT_US);
if (ret) {
dev_err(qspi->dev, "fifo timeout (len:%d stat:%#x)\n",
len, sr);
return ret;
}
tx_fifo(buf++, qspi->io_base + QSPI_DR);
}
return 0;
}
static int stm32_qspi_tx_mm(struct stm32_qspi *qspi,
const struct spi_mem_op *op)
{
memcpy_fromio(op->data.buf.in, qspi->mm_base + op->addr.val,
op->data.nbytes);
return 0;
}
static int stm32_qspi_tx(struct stm32_qspi *qspi, const struct spi_mem_op *op)
{
if (!op->data.nbytes)
return 0;
if (qspi->fmode == CCR_FMODE_MM)
return stm32_qspi_tx_mm(qspi, op);
return stm32_qspi_tx_poll(qspi, op);
}
static int stm32_qspi_wait_nobusy(struct stm32_qspi *qspi)
{
u32 sr;
return readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_SR, sr,
!(sr & SR_BUSY), 1,
STM32_BUSY_TIMEOUT_US);
}
static int stm32_qspi_wait_cmd(struct stm32_qspi *qspi,
const struct spi_mem_op *op)
{
u32 cr, sr;
int err = 0;
if (!op->data.nbytes)
return stm32_qspi_wait_nobusy(qspi);
if (readl_relaxed(qspi->io_base + QSPI_SR) & SR_TCF)
goto out;
reinit_completion(&qspi->data_completion);
cr = readl_relaxed(qspi->io_base + QSPI_CR);
writel_relaxed(cr | CR_TCIE | CR_TEIE, qspi->io_base + QSPI_CR);
if (!wait_for_completion_interruptible_timeout(&qspi->data_completion,
msecs_to_jiffies(1000))) {
err = -ETIMEDOUT;
} else {
sr = readl_relaxed(qspi->io_base + QSPI_SR);
if (sr & SR_TEF)
err = -EIO;
}
out:
/* clear flags */
writel_relaxed(FCR_CTCF | FCR_CTEF, qspi->io_base + QSPI_FCR);
return err;
}
static int stm32_qspi_get_mode(struct stm32_qspi *qspi, u8 buswidth)
{
if (buswidth == 4)
return CCR_BUSWIDTH_4;
return buswidth;
}
static int stm32_qspi_send(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
struct stm32_qspi_flash *flash = &qspi->flash[mem->spi->chip_select];
u32 ccr, cr, addr_max;
int timeout, err = 0;
dev_dbg(qspi->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
op->dummy.buswidth, op->data.buswidth,
op->addr.val, op->data.nbytes);
err = stm32_qspi_wait_nobusy(qspi);
if (err)
goto abort;
addr_max = op->addr.val + op->data.nbytes + 1;
if (op->data.dir == SPI_MEM_DATA_IN) {
if (addr_max < qspi->mm_size &&
op->addr.buswidth)
qspi->fmode = CCR_FMODE_MM;
else
qspi->fmode = CCR_FMODE_INDR;
} else {
qspi->fmode = CCR_FMODE_INDW;
}
cr = readl_relaxed(qspi->io_base + QSPI_CR);
cr &= ~CR_PRESC_MASK & ~CR_FSEL;
cr |= FIELD_PREP(CR_PRESC_MASK, flash->presc);
cr |= FIELD_PREP(CR_FSEL, flash->cs);
writel_relaxed(cr, qspi->io_base + QSPI_CR);
if (op->data.nbytes)
writel_relaxed(op->data.nbytes - 1,
qspi->io_base + QSPI_DLR);
else
qspi->fmode = CCR_FMODE_INDW;
ccr = qspi->fmode;
ccr |= FIELD_PREP(CCR_INST_MASK, op->cmd.opcode);
ccr |= FIELD_PREP(CCR_IMODE_MASK,
stm32_qspi_get_mode(qspi, op->cmd.buswidth));
if (op->addr.nbytes) {
ccr |= FIELD_PREP(CCR_ADMODE_MASK,
stm32_qspi_get_mode(qspi, op->addr.buswidth));
ccr |= FIELD_PREP(CCR_ADSIZE_MASK, op->addr.nbytes - 1);
}
if (op->dummy.buswidth && op->dummy.nbytes)
ccr |= FIELD_PREP(CCR_DCYC_MASK,
op->dummy.nbytes * 8 / op->dummy.buswidth);
if (op->data.nbytes) {
ccr |= FIELD_PREP(CCR_DMODE_MASK,
stm32_qspi_get_mode(qspi, op->data.buswidth));
}
writel_relaxed(ccr, qspi->io_base + QSPI_CCR);
if (op->addr.nbytes && qspi->fmode != CCR_FMODE_MM)
writel_relaxed(op->addr.val, qspi->io_base + QSPI_AR);
err = stm32_qspi_tx(qspi, op);
/*
* Abort in:
* -error case
* -read memory map: prefetching must be stopped if we read the last
* byte of device (device size - fifo size). like device size is not
* knows, the prefetching is always stop.
*/
if (err || qspi->fmode == CCR_FMODE_MM)
goto abort;
/* wait end of tx in indirect mode */
err = stm32_qspi_wait_cmd(qspi, op);
if (err)
goto abort;
return 0;
abort:
cr = readl_relaxed(qspi->io_base + QSPI_CR) | CR_ABORT;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
/* wait clear of abort bit by hw */
timeout = readl_relaxed_poll_timeout_atomic(qspi->io_base + QSPI_CR,
cr, !(cr & CR_ABORT), 1,
STM32_ABT_TIMEOUT_US);
writel_relaxed(FCR_CTCF, qspi->io_base + QSPI_FCR);
if (err || timeout)
dev_err(qspi->dev, "%s err:%d abort timeout:%d\n",
__func__, err, timeout);
return err;
}
static int stm32_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct stm32_qspi *qspi = spi_controller_get_devdata(mem->spi->master);
int ret;
mutex_lock(&qspi->lock);
ret = stm32_qspi_send(mem, op);
mutex_unlock(&qspi->lock);
return ret;
}
static int stm32_qspi_setup(struct spi_device *spi)
{
struct spi_controller *ctrl = spi->master;
struct stm32_qspi *qspi = spi_controller_get_devdata(ctrl);
struct stm32_qspi_flash *flash;
u32 cr, presc;
if (ctrl->busy)
return -EBUSY;
if (!spi->max_speed_hz)
return -EINVAL;
presc = DIV_ROUND_UP(qspi->clk_rate, spi->max_speed_hz) - 1;
flash = &qspi->flash[spi->chip_select];
flash->qspi = qspi;
flash->cs = spi->chip_select;
flash->presc = presc;
mutex_lock(&qspi->lock);
writel_relaxed(LPTR_DFT_TIMEOUT, qspi->io_base + QSPI_LPTR);
cr = FIELD_PREP(CR_FTHRES_MASK, 3) | CR_TCEN | CR_SSHIFT | CR_EN;
writel_relaxed(cr, qspi->io_base + QSPI_CR);
/* set dcr fsize to max address */
writel_relaxed(DCR_FSIZE_MASK, qspi->io_base + QSPI_DCR);
mutex_unlock(&qspi->lock);
return 0;
}
/*
* no special host constraint, so use default spi_mem_default_supports_op
* to check supported mode.
*/
static const struct spi_controller_mem_ops stm32_qspi_mem_ops = {
.exec_op = stm32_qspi_exec_op,
};
static void stm32_qspi_release(struct stm32_qspi *qspi)
{
/* disable qspi */
writel_relaxed(0, qspi->io_base + QSPI_CR);
mutex_destroy(&qspi->lock);
clk_disable_unprepare(qspi->clk);
}
static int stm32_qspi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct spi_controller *ctrl;
struct reset_control *rstc;
struct stm32_qspi *qspi;
struct resource *res;
int ret, irq;
ctrl = spi_alloc_master(dev, sizeof(*qspi));
if (!ctrl)
return -ENOMEM;
qspi = spi_controller_get_devdata(ctrl);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi");
qspi->io_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->io_base))
return PTR_ERR(qspi->io_base);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm");
qspi->mm_base = devm_ioremap_resource(dev, res);
if (IS_ERR(qspi->mm_base))
return PTR_ERR(qspi->mm_base);
qspi->mm_size = resource_size(res);
if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
dev_name(dev), qspi);
if (ret) {
dev_err(dev, "failed to request irq\n");
return ret;
}
init_completion(&qspi->data_completion);
qspi->clk = devm_clk_get(dev, NULL);
if (IS_ERR(qspi->clk))
return PTR_ERR(qspi->clk);
qspi->clk_rate = clk_get_rate(qspi->clk);
if (!qspi->clk_rate)
return -EINVAL;
ret = clk_prepare_enable(qspi->clk);
if (ret) {
dev_err(dev, "can not enable the clock\n");
return ret;
}
rstc = devm_reset_control_get_exclusive(dev, NULL);
if (!IS_ERR(rstc)) {
reset_control_assert(rstc);
udelay(2);
reset_control_deassert(rstc);
}
qspi->dev = dev;
platform_set_drvdata(pdev, qspi);
mutex_init(&qspi->lock);
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
| SPI_TX_DUAL | SPI_TX_QUAD;
ctrl->setup = stm32_qspi_setup;
ctrl->bus_num = -1;
ctrl->mem_ops = &stm32_qspi_mem_ops;
ctrl->num_chipselect = STM32_QSPI_MAX_NORCHIP;
ctrl->dev.of_node = dev->of_node;
ret = devm_spi_register_master(dev, ctrl);
if (ret)
goto err_spi_register;
return 0;
err_spi_register:
stm32_qspi_release(qspi);
return ret;
}
static int stm32_qspi_remove(struct platform_device *pdev)
{
struct stm32_qspi *qspi = platform_get_drvdata(pdev);
stm32_qspi_release(qspi);
return 0;
}
static const struct of_device_id stm32_qspi_match[] = {
{.compatible = "st,stm32f469-qspi"},
{}
};
MODULE_DEVICE_TABLE(of, stm32_qspi_match);
static struct platform_driver stm32_qspi_driver = {
.probe = stm32_qspi_probe,
.remove = stm32_qspi_remove,
.driver = {
.name = "stm32-qspi",
.of_match_table = stm32_qspi_match,
},
};
module_platform_driver(stm32_qspi_driver);
MODULE_AUTHOR("Ludovic Barre <ludovic.barre@st.com>");
MODULE_DESCRIPTION("STMicroelectronics STM32 quad spi driver");
MODULE_LICENSE("GPL v2");

View File

@ -1,18 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* SPI init/core code
*
* Copyright (C) 2005 David Brownell
* Copyright (C) 2008 Secret Lab Technologies Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
@ -60,6 +51,7 @@ static void spidev_release(struct device *dev)
spi->controller->cleanup(spi);
spi_controller_put(spi->controller);
kfree(spi->driver_override);
kfree(spi);
}
@ -77,6 +69,51 @@ modalias_show(struct device *dev, struct device_attribute *a, char *buf)
}
static DEVICE_ATTR_RO(modalias);
static ssize_t driver_override_store(struct device *dev,
struct device_attribute *a,
const char *buf, size_t count)
{
struct spi_device *spi = to_spi_device(dev);
const char *end = memchr(buf, '\n', count);
const size_t len = end ? end - buf : count;
const char *driver_override, *old;
/* We need to keep extra room for a newline when displaying value */
if (len >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, len, GFP_KERNEL);
if (!driver_override)
return -ENOMEM;
device_lock(dev);
old = spi->driver_override;
if (len) {
spi->driver_override = driver_override;
} else {
/* Emptry string, disable driver override */
spi->driver_override = NULL;
kfree(driver_override);
}
device_unlock(dev);
kfree(old);
return count;
}
static ssize_t driver_override_show(struct device *dev,
struct device_attribute *a, char *buf)
{
const struct spi_device *spi = to_spi_device(dev);
ssize_t len;
device_lock(dev);
len = snprintf(buf, PAGE_SIZE, "%s\n", spi->driver_override ? : "");
device_unlock(dev);
return len;
}
static DEVICE_ATTR_RW(driver_override);
#define SPI_STATISTICS_ATTRS(field, file) \
static ssize_t spi_controller_##field##_show(struct device *dev, \
struct device_attribute *attr, \
@ -158,6 +195,7 @@ SPI_STATISTICS_SHOW(transfers_split_maxsize, "%lu");
static struct attribute *spi_dev_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_driver_override.attr,
NULL,
};
@ -305,6 +343,10 @@ static int spi_match_device(struct device *dev, struct device_driver *drv)
const struct spi_device *spi = to_spi_device(dev);
const struct spi_driver *sdrv = to_spi_driver(drv);
/* Check override first, and if set, only use the named driver */
if (spi->driver_override)
return strcmp(spi->driver_override, drv->name) == 0;
/* Attempt an OF style match */
if (of_driver_match_device(dev, drv))
return 1;
@ -733,7 +775,9 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
enable = !enable;
if (gpio_is_valid(spi->cs_gpio)) {
gpio_set_value(spi->cs_gpio, !enable);
/* Honour the SPI_NO_CS flag */
if (!(spi->mode & SPI_NO_CS))
gpio_set_value(spi->cs_gpio, !enable);
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
spi->controller->set_cs)
@ -2783,8 +2827,10 @@ int spi_setup(struct spi_device *spi)
return -EINVAL;
/* help drivers fail *cleanly* when they need options
* that aren't supported with their current controller
* SPI_CS_WORD has a fallback software implementation,
* so it is ignored here.
*/
bad_bits = spi->mode & ~spi->controller->mode_bits;
bad_bits = spi->mode & ~(spi->controller->mode_bits | SPI_CS_WORD);
ugly_bits = bad_bits &
(SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
if (ugly_bits) {
@ -2838,6 +2884,35 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
if (list_empty(&message->transfers))
return -EINVAL;
/* If an SPI controller does not support toggling the CS line on each
* transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
* for the CS line, we can emulate the CS-per-word hardware function by
* splitting transfers into one-word transfers and ensuring that
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
gpio_is_valid(spi->cs_gpio))) {
size_t maxsize;
int ret;
maxsize = (spi->bits_per_word + 7) / 8;
/* spi_split_transfers_maxsize() requires message->spi */
message->spi = spi;
ret = spi_split_transfers_maxsize(ctlr, message, maxsize,
GFP_KERNEL);
if (ret)
return ret;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
/* don't change cs_change on the last entry in the list */
if (list_is_last(&xfer->transfer_list, &message->transfers))
break;
xfer->cs_change = 1;
}
}
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by

View File

@ -669,6 +669,7 @@ static const struct of_device_id spidev_dt_ids[] = {
{ .compatible = "lineartechnology,ltc2488" },
{ .compatible = "ge,achc" },
{ .compatible = "semtech,sx1301" },
{ .compatible = "lwn,bk4" },
{},
};
MODULE_DEVICE_TABLE(of, spidev_dt_ids);
@ -724,11 +725,9 @@ static int spidev_probe(struct spi_device *spi)
* compatible string, it is a Linux implementation thing
* rather than a description of the hardware.
*/
if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n");
WARN_ON(spi->dev.of_node &&
!of_match_device(spidev_dt_ids, &spi->dev));
}
WARN(spi->dev.of_node &&
of_device_is_compatible(spi->dev.of_node, "spidev"),
"%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
spidev_probe_acpi(spi);

View File

@ -1705,6 +1705,10 @@ static inline int pci_irqd_intx_xlate(struct irq_domain *d,
unsigned long *out_hwirq,
unsigned int *out_type)
{ return -EINVAL; }
static inline const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
struct pci_dev *dev)
{ return NULL; }
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */

View File

@ -36,9 +36,6 @@ enum {
* @num_chipselect: number of chipselects supported by this SPI master
* @intr_line: interrupt line used to connect the SPI IP to the ARM interrupt
* controller withn the SoC. Possible values are 0 and 1.
* @chip_sel: list of GPIOs which can act as chip-selects for the SPI.
* SPI_INTERN_CS denotes internal SPI chip-select. Not necessary
* to populate if all chip-selects are internal.
* @cshold_bug: set this to true if the SPI controller on your chip requires
* a write to CSHOLD bit in between transfers (like in DM355).
* @dma_event_q: DMA event queue to use if SPI_IO_TYPE_DMA is used for any
@ -48,7 +45,6 @@ struct davinci_spi_platform_data {
u8 version;
u8 num_chipselect;
u8 intr_line;
u8 *chip_sel;
u8 prescaler_limit;
bool cshold_bug;
enum dma_event_q dma_event_q;

View File

@ -196,6 +196,7 @@ enum pxa_ssp_type {
PXA27x_SSP,
PXA3xx_SSP,
PXA168_SSP,
MMP2_SSP,
PXA910_SSP,
CE4100_SSP,
QUARK_X1000_SSP,
@ -217,7 +218,7 @@ struct ssp_device {
const char *label;
int port_id;
int type;
enum pxa_ssp_type type;
int use_count;
int irq;

View File

@ -225,19 +225,14 @@ struct geni_se {
#define HW_VER_MINOR_SHFT 16
#define HW_VER_STEP_MASK GENMASK(15, 0)
#define GENI_SE_VERSION_MAJOR(ver) ((ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT)
#define GENI_SE_VERSION_MINOR(ver) ((ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT)
#define GENI_SE_VERSION_STEP(ver) (ver & HW_VER_STEP_MASK)
#if IS_ENABLED(CONFIG_QCOM_GENI_SE)
u32 geni_se_get_qup_hw_version(struct geni_se *se);
#define geni_se_get_wrapper_version(se, major, minor, step) do { \
u32 ver; \
\
ver = geni_se_get_qup_hw_version(se); \
major = (ver & HW_VER_MAJOR_MASK) >> HW_VER_MAJOR_SHFT; \
minor = (ver & HW_VER_MINOR_MASK) >> HW_VER_MINOR_SHFT; \
step = version & HW_VER_STEP_MASK; \
} while (0)
/**
* geni_se_read_proto() - Read the protocol configured for a serial engine
* @se: Pointer to the concerned serial engine.

View File

@ -1,15 +1,6 @@
/*
/* SPDX-License-Identifier: GPL-2.0-or-later
*
* Copyright (C) 2005 David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __LINUX_SPI_H
@ -163,10 +154,12 @@ struct spi_device {
#define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
#define SPI_RX_DUAL 0x400 /* receive with 2 wires */
#define SPI_RX_QUAD 0x800 /* receive with 4 wires */
#define SPI_CS_WORD 0x1000 /* toggle cs after each word */
int irq;
void *controller_state;
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
int cs_gpio; /* chip select gpio */
/* the statistics */
@ -177,7 +170,6 @@ struct spi_device {
* the controller talks to each chip, like:
* - memory packing (12 bit samples into low bits, others zeroed)
* - priority
* - drop chipselect after each word
* - chipselect delays
* - ...
*/
@ -711,6 +703,8 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @delay_usecs: microseconds to delay after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
* @word_delay: clock cycles to inter word delay after each word size
* (set by bits_per_word) transmission.
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
@ -793,6 +787,7 @@ struct spi_transfer {
u8 bits_per_word;
u16 delay_usecs;
u32 speed_hz;
u16 word_delay;
struct list_head transfer_list;
};

View File

@ -73,12 +73,12 @@ static void hex_dump(const void *src, size_t length, size_t line_size,
while (i++ % line_size)
printf("__ ");
}
printf(" | "); /* right close */
printf(" |");
while (line < address) {
c = *line++;
printf("%c", (c < 33 || c == 255) ? 0x2E : c);
printf("%c", (c < 32 || c > 126) ? '.' : c);
}
printf("\n");
printf("|\n");
if (length > 0)
printf("%s | ", prefix);
}