Second set of DMA40 changes: refactorings and device tree

support for the DMA40. Now with MUSB and some platform
 data removal.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1.4.10 (GNU/Linux)
 
 iQIcBAABAgAGBQJRrctTAAoJEEEQszewGV1zoFkP/0ZS3Hw4sGX4CRcVZysFZXon
 tEWMtNq3WvugmAMos1BJtVYjbu9oaI0kPnTZOU83rF8YzEGvRUy+3AlZXwGmkyYq
 NgdfDElAY815F9pJ3ffFL74Dd36paWPtO55JuUEsdxLZE7c4/qOBHykURU0NzMtK
 at70fgVLaNo+mjB+Q2M1ouI5tBCrdwso+vI9SHYlof2wt8TiAmbBTOcKO35nUzlk
 brcrTeTtdWCc2foP7cFgmjrsct3CzB4Dfl49MiCv1zsiQL5a+qB0EAMd4xYyuVqb
 GE1WQDvDwzEDre0pAb/io/NwnlF81tgr6dhFTeFxB44knqbkptQFw9XCDnTQTy5C
 dda5HSJ31ES3N2IUj5K0tlSm3/cBywt5IU79FwhFN4Ndq/nzDtZQ+Y2/X0IA99is
 eTpLM+/20juOyTxt4vHhdp4aL4r4ZdmOb0GAsyAg/TtsG5LJhZPIctZ+xexKqAhX
 wPkvHubv87ruQC6AOjVXwez+3tzTLuU7H8a9IOHi+oHk4r+mdba3kAKTyEl717Bk
 44N0hcSLbZYktE9gdBJWuuySmfLXnbb/lc+2OjFYNWMHIgDbEwS2ZeVnKB8G+Eil
 iGDIMMnbCc9vA6fiDARZ3DvLFvahnUbO4BqcFozc/hHbhTceSn9xkXX3NcM8NCWF
 EAWWZlx4dtAbzB7ReTDm
 =rn7t
 -----END PGP SIGNATURE-----

Merge tag 'ux500-dma40-for-arm-soc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson into next/drivers

From Linus Walleij:
Second set of DMA40 changes: refactorings and device tree
support for the DMA40. Now with MUSB and some platform
data removal.

* tag 'ux500-dma40-for-arm-soc-2' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson:
  dmaengine: ste_dma40: Fetch disabled channels from DT
  dmaengine: ste_dma40: Fetch the number of physical channels from DT
  ARM: ux500: Stop passing DMA platform data though AUXDATA
  dmaengine: ste_dma40: Allow memcpy channels to be configured from DT
  dmaengine: ste_dma40_ll: Replace meaningless register set with comment
  dmaengine: ste_dma40: Convert data_width from register bit format to value
  dmaengine: ste_dma40_ll: Use the BIT macro to replace ugly '(1 << x)'s
  ARM: ux500: Remove recently unused stedma40_xfer_dir enums
  dmaengine: ste_dma40: Replace ST-E's home-brew DMA direction defs with generic ones
  ARM: ux500: Replace ST-E's home-brew DMA direction definition with the generic one
  dmaengine: ste_dma40: Use the BIT macro to replace ugly '(1 << x)'s
  ARM: ux500: Remove empty function u8500_of_init_devices()
  ARM: ux500: Remove ux500-musb platform registation when booting with DT
  usb: musb: ux500: add device tree probing support
  usb: musb: ux500: attempt to find channels by name before using pdata
  usb: musb: ux500: harden checks for platform data
  usb: musb: ux500: take the dma_mask from coherent_dma_mask
  usb: musb: ux500: move the MUSB HDRC configuration into the driver
  usb: musb: ux500: move channel number knowledge into the driver
This commit is contained in:
Olof Johansson 2013-06-14 16:53:54 -07:00
commit 2c3165ebb6
14 changed files with 374 additions and 250 deletions

View File

@ -6,10 +6,12 @@ Required properties:
- reg-names: Names of the above areas to use during resource look-up
- interrupt: Should contain the DMAC interrupt number
- #dma-cells: must be <3>
- memcpy-channels: Channels to be used for memcpy
Optional properties:
- dma-channels: Number of channels supported by hardware - if not present
the driver will attempt to obtain the information from H/W
- disabled-channels: Channels which can not be used
Example:
@ -21,6 +23,8 @@ Example:
interrupts = <0 25 0x4>;
#dma-cells = <2>;
memcpy-channels = <56 57 58 59 60>;
disabled-channels = <12>;
dma-channels = <8>;
};

View File

@ -0,0 +1,50 @@
Ux500 MUSB
Required properties:
- compatible : Should be "stericsson,db8500-musb"
- reg : Offset and length of registers
- interrupts : Interrupt; mode, number and trigger
- dr_mode : Dual-role; either host mode "host", peripheral mode "peripheral"
or both "otg"
Optional properties:
- dmas : A list of dma channels;
dma-controller, event-line, fixed-channel, flags
- dma-names : An ordered list of channel names affiliated to the above
Example:
usb_per5@a03e0000 {
compatible = "stericsson,db8500-musb", "mentor,musb";
reg = <0xa03e0000 0x10000>;
interrupts = <0 23 0x4>;
interrupt-names = "mc";
dr_mode = "otg";
dmas = <&dma 38 0 0x2>, /* Logical - DevToMem */
<&dma 38 0 0x0>, /* Logical - MemToDev */
<&dma 37 0 0x2>, /* Logical - DevToMem */
<&dma 37 0 0x0>, /* Logical - MemToDev */
<&dma 36 0 0x2>, /* Logical - DevToMem */
<&dma 36 0 0x0>, /* Logical - MemToDev */
<&dma 19 0 0x2>, /* Logical - DevToMem */
<&dma 19 0 0x0>, /* Logical - MemToDev */
<&dma 18 0 0x2>, /* Logical - DevToMem */
<&dma 18 0 0x0>, /* Logical - MemToDev */
<&dma 17 0 0x2>, /* Logical - DevToMem */
<&dma 17 0 0x0>, /* Logical - MemToDev */
<&dma 16 0 0x2>, /* Logical - DevToMem */
<&dma 16 0 0x0>, /* Logical - MemToDev */
<&dma 39 0 0x2>, /* Logical - DevToMem */
<&dma 39 0 0x0>; /* Logical - MemToDev */
dma-names = "iep_1_9", "oep_1_9",
"iep_2_10", "oep_2_10",
"iep_3_11", "oep_3_11",
"iep_4_12", "oep_4_12",
"iep_5_13", "oep_5_13",
"iep_6_14", "oep_6_14",
"iep_7_15", "oep_7_15",
"iep_8", "oep_8";
};

View File

@ -21,13 +21,13 @@
static struct stedma40_chan_cfg msp0_dma_rx = {
.high_priority = true,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV31_MSP0_SLIM0_CH0,
};
static struct stedma40_chan_cfg msp0_dma_tx = {
.high_priority = true,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV31_MSP0_SLIM0_CH0,
};
@ -39,13 +39,13 @@ struct msp_i2s_platform_data msp0_platform_data = {
static struct stedma40_chan_cfg msp1_dma_rx = {
.high_priority = true,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV30_MSP3,
};
static struct stedma40_chan_cfg msp1_dma_tx = {
.high_priority = true,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV30_MSP1,
};
@ -57,13 +57,13 @@ struct msp_i2s_platform_data msp1_platform_data = {
static struct stedma40_chan_cfg msp2_dma_rx = {
.high_priority = true,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV14_MSP2,
};
static struct stedma40_chan_cfg msp2_dma_tx = {
.high_priority = true,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV14_MSP2,
.use_fixed_channel = true,
.phy_channel = 1,

View File

@ -34,13 +34,13 @@
#ifdef CONFIG_STE_DMA40
struct stedma40_chan_cfg mop500_sdi0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV29_SD_MM0,
};
static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV29_SD_MM0,
};
#endif
@ -81,13 +81,13 @@ void mop500_sdi_tc35892_init(struct device *parent)
#ifdef CONFIG_STE_DMA40
static struct stedma40_chan_cfg sdi1_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV32_SD_MM1,
};
static struct stedma40_chan_cfg sdi1_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV32_SD_MM1,
};
#endif
@ -112,13 +112,13 @@ struct mmci_platform_data mop500_sdi1_data = {
#ifdef CONFIG_STE_DMA40
struct stedma40_chan_cfg mop500_sdi2_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV28_SD_MM2,
};
static struct stedma40_chan_cfg mop500_sdi2_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV28_SD_MM2,
};
#endif
@ -144,13 +144,13 @@ struct mmci_platform_data mop500_sdi2_data = {
#ifdef CONFIG_STE_DMA40
struct stedma40_chan_cfg mop500_sdi4_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV42_SD_MM4,
};
static struct stedma40_chan_cfg mop500_sdi4_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV42_SD_MM4,
};
#endif

View File

@ -424,19 +424,19 @@ void mop500_snowball_ethernet_clock_enable(void)
static struct cryp_platform_data u8500_cryp1_platform_data = {
.mem_to_engine = {
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV48_CAC1,
.mode = STEDMA40_MODE_LOGICAL,
},
.engine_to_mem = {
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV48_CAC1,
.mode = STEDMA40_MODE_LOGICAL,
}
};
static struct stedma40_chan_cfg u8500_hash_dma_cfg_tx = {
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV50_HAC1_TX,
.mode = STEDMA40_MODE_LOGICAL,
};
@ -455,13 +455,13 @@ static struct platform_device *mop500_platform_devs[] __initdata = {
#ifdef CONFIG_STE_DMA40
static struct stedma40_chan_cfg ssp0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV8_SSP0,
};
static struct stedma40_chan_cfg ssp0_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV8_SSP0,
};
#endif
@ -490,37 +490,37 @@ static void __init mop500_spi_init(struct device *parent)
#ifdef CONFIG_STE_DMA40
static struct stedma40_chan_cfg uart0_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV13_UART0,
};
static struct stedma40_chan_cfg uart0_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV13_UART0,
};
static struct stedma40_chan_cfg uart1_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV12_UART1,
};
static struct stedma40_chan_cfg uart1_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV12_UART1,
};
static struct stedma40_chan_cfg uart2_dma_cfg_rx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_PERIPH_TO_MEM,
.dir = DMA_DEV_TO_MEM,
.dev_type = DB8500_DMA_DEV11_UART2,
};
static struct stedma40_chan_cfg uart2_dma_cfg_tx = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_PERIPH,
.dir = DMA_MEM_TO_DEV,
.dev_type = DB8500_DMA_DEV11_UART2,
};
#endif

View File

@ -215,17 +215,6 @@ struct device * __init u8500_init_devices(void)
}
#ifdef CONFIG_MACH_UX500_DT
/* TODO: Once all pieces are DT:ed, remove completely. */
static struct device * __init u8500_of_init_devices(void)
{
struct device *parent = db8500_soc_device_init();
db8500_add_usb(parent, usb_db8500_dma_cfg, usb_db8500_dma_cfg);
return parent;
}
static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
/* Requires call-back bindings. */
OF_DEV_AUXDATA("arm,cortex-a9-pmu", 0, "arm-pmu", &db8500_pmu_platdata),
@ -269,8 +258,7 @@ static struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
OF_DEV_AUXDATA("stericsson,ux500-msp-i2s", 0x80125000,
"ux500-msp-i2s.3", &msp3_platform_data),
/* Requires clock name bindings and channel address lookup table. */
OF_DEV_AUXDATA("stericsson,db8500-dma40", 0x801C0000,
"dma40.0", &dma40_plat_data),
OF_DEV_AUXDATA("stericsson,db8500-dma40", 0x801C0000, "dma40.0", NULL),
{},
};
@ -284,7 +272,7 @@ static const struct of_device_id u8500_local_bus_nodes[] = {
static void __init u8500_init_machine(void)
{
struct device *parent = NULL;
struct device *parent = db8500_soc_device_init();
/* Pinmaps must be in place before devices register */
if (of_machine_is_compatible("st-ericsson,mop500"))
@ -297,9 +285,6 @@ static void __init u8500_init_machine(void)
else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
/* TODO: Add pinmaps for ccu9540 board. */
/* TODO: Export SoC, USB, cpu-freq and DMA40 */
parent = u8500_of_init_devices();
/* automatically probe child nodes of db8500 device */
of_platform_populate(NULL, u8500_local_bus_nodes, u8500_auxdata_lookup, parent);
}

View File

@ -14,15 +14,15 @@
#define MUSB_DMA40_RX_CH { \
.mode = STEDMA40_MODE_LOGICAL, \
.dir = STEDMA40_PERIPH_TO_MEM, \
.dir = DMA_DEV_TO_MEM, \
}
#define MUSB_DMA40_TX_CH { \
.mode = STEDMA40_MODE_LOGICAL, \
.dir = STEDMA40_MEM_TO_PERIPH, \
.dir = DMA_MEM_TO_DEV, \
}
static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_CHANNELS]
static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS]
= {
MUSB_DMA40_RX_CH,
MUSB_DMA40_RX_CH,
@ -34,7 +34,7 @@ static struct stedma40_chan_cfg musb_dma_rx_ch[UX500_MUSB_DMA_NUM_RX_CHANNELS]
MUSB_DMA40_RX_CH
};
static struct stedma40_chan_cfg musb_dma_tx_ch[UX500_MUSB_DMA_NUM_TX_CHANNELS]
static struct stedma40_chan_cfg musb_dma_tx_ch[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS]
= {
MUSB_DMA40_TX_CH,
MUSB_DMA40_TX_CH,
@ -46,7 +46,7 @@ static struct stedma40_chan_cfg musb_dma_tx_ch[UX500_MUSB_DMA_NUM_TX_CHANNELS]
MUSB_DMA40_TX_CH,
};
static void *ux500_dma_rx_param_array[UX500_MUSB_DMA_NUM_RX_CHANNELS] = {
static void *ux500_dma_rx_param_array[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS] = {
&musb_dma_rx_ch[0],
&musb_dma_rx_ch[1],
&musb_dma_rx_ch[2],
@ -57,7 +57,7 @@ static void *ux500_dma_rx_param_array[UX500_MUSB_DMA_NUM_RX_CHANNELS] = {
&musb_dma_rx_ch[7]
};
static void *ux500_dma_tx_param_array[UX500_MUSB_DMA_NUM_TX_CHANNELS] = {
static void *ux500_dma_tx_param_array[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS] = {
&musb_dma_tx_ch[0],
&musb_dma_tx_ch[1],
&musb_dma_tx_ch[2],
@ -71,23 +71,11 @@ static void *ux500_dma_tx_param_array[UX500_MUSB_DMA_NUM_TX_CHANNELS] = {
static struct ux500_musb_board_data musb_board_data = {
.dma_rx_param_array = ux500_dma_rx_param_array,
.dma_tx_param_array = ux500_dma_tx_param_array,
.num_rx_channels = UX500_MUSB_DMA_NUM_RX_CHANNELS,
.num_tx_channels = UX500_MUSB_DMA_NUM_TX_CHANNELS,
.dma_filter = stedma40_filter,
};
static u64 ux500_musb_dmamask = DMA_BIT_MASK(32);
static struct musb_hdrc_config musb_hdrc_config = {
.multipoint = true,
.dyn_fifo = true,
.num_eps = 16,
.ram_bits = 16,
};
static struct musb_hdrc_platform_data musb_platform_data = {
.mode = MUSB_OTG,
.config = &musb_hdrc_config,
.board_data = &musb_board_data,
};
@ -108,7 +96,6 @@ struct platform_device ux500_musb_device = {
.id = 0,
.dev = {
.platform_data = &musb_platform_data,
.dma_mask = &ux500_musb_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.num_resources = ARRAY_SIZE(usb_resources),
@ -119,7 +106,7 @@ static inline void ux500_usb_dma_update_rx_ch_config(int *dev_type)
{
u32 idx;
for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_CHANNELS; idx++)
for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; idx++)
musb_dma_rx_ch[idx].dev_type = dev_type[idx];
}
@ -127,7 +114,7 @@ static inline void ux500_usb_dma_update_tx_ch_config(int *dev_type)
{
u32 idx;
for (idx = 0; idx < UX500_MUSB_DMA_NUM_TX_CHANNELS; idx++)
for (idx = 0; idx < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; idx++)
musb_dma_tx_ch[idx].dev_type = dev_type[idx];
}

View File

@ -54,10 +54,12 @@
#define MAX_LCLA_ALLOC_ATTEMPTS 256
/* Bit markings for allocation map */
#define D40_ALLOC_FREE (1 << 31)
#define D40_ALLOC_PHY (1 << 30)
#define D40_ALLOC_FREE BIT(31)
#define D40_ALLOC_PHY BIT(30)
#define D40_ALLOC_LOG_FREE 0
#define D40_MEMCPY_MAX_CHANS 8
/* Reserved event lines for memcpy only. */
#define DB8500_DMA_MEMCPY_EV_0 51
#define DB8500_DMA_MEMCPY_EV_1 56
@ -78,13 +80,13 @@ static int dma40_memcpy_channels[] = {
/* Default configuration for physcial memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
.mode = STEDMA40_MODE_PHYSICAL,
.dir = STEDMA40_MEM_TO_MEM,
.dir = DMA_MEM_TO_MEM,
.src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.src_info.psize = STEDMA40_PSIZE_PHY_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.dst_info.psize = STEDMA40_PSIZE_PHY_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
};
@ -92,13 +94,13 @@ struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
/* Default configuration for logical memcpy */
struct stedma40_chan_cfg dma40_memcpy_conf_log = {
.mode = STEDMA40_MODE_LOGICAL,
.dir = STEDMA40_MEM_TO_MEM,
.dir = DMA_MEM_TO_MEM,
.src_info.data_width = STEDMA40_BYTE_WIDTH,
.src_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.src_info.psize = STEDMA40_PSIZE_LOG_1,
.src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
.dst_info.data_width = STEDMA40_BYTE_WIDTH,
.dst_info.data_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.dst_info.psize = STEDMA40_PSIZE_LOG_1,
.dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
};
@ -522,6 +524,8 @@ struct d40_gen_dmac {
* @phy_start: Physical memory start of the DMA registers.
* @phy_size: Size of the DMA register map.
* @irq: The IRQ number.
* @num_memcpy_chans: The number of channels used for memcpy (mem-to-mem
* transfers).
* @num_phy_chans: The number of physical channels. Read from HW. This
* is the number of available channels for this driver, not counting "Secure
* mode" allocated physical channels.
@ -565,6 +569,7 @@ struct d40_base {
phys_addr_t phy_start;
resource_size_t phy_size;
int irq;
int num_memcpy_chans;
int num_phy_chans;
int num_log_chans;
struct device_dma_parameters dma_parms;
@ -843,7 +848,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
* that uses linked lists.
*/
if (!(chan->phy_chan->use_soft_lli &&
chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
chan->dma_cfg.dir == DMA_DEV_TO_MEM))
curr_lcla = d40_lcla_alloc_one(chan, desc);
first_lcla = curr_lcla;
@ -1005,20 +1010,21 @@ static int d40_psize_2_burst_size(bool is_log, int psize)
/*
* The dma only supports transmitting packages up to
* STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
* dma elements required to send the entire sg list
* STEDMA40_MAX_SEG_SIZE * data_width, where data_width is stored in Bytes.
*
* Calculate the total number of dma elements required to send the entire sg list.
*/
static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
{
int dmalen;
u32 max_w = max(data_width1, data_width2);
u32 min_w = min(data_width1, data_width2);
u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
if (seg_max > STEDMA40_MAX_SEG_SIZE)
seg_max -= (1 << max_w);
seg_max -= max_w;
if (!IS_ALIGNED(size, 1 << max_w))
if (!IS_ALIGNED(size, max_w))
return -EINVAL;
if (size <= seg_max)
@ -1311,12 +1317,12 @@ static void d40_config_set_event(struct d40_chan *d40c,
u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
/* Enable event line connected to device (or memcpy) */
if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
(d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
__d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SSLNK);
if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
if (d40c->dma_cfg.dir != DMA_DEV_TO_MEM)
__d40_config_set_event(d40c, event_type, event,
D40_CHAN_REG_SDLNK);
}
@ -1464,7 +1470,7 @@ static u32 d40_residue(struct d40_chan *d40c)
>> D40_SREG_ELEM_PHY_ECNT_POS;
}
return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
return num_elt * d40c->dma_cfg.dst_info.data_width;
}
static bool d40_tx_is_linked(struct d40_chan *d40c)
@ -1740,7 +1746,7 @@ static irqreturn_t d40_handle_interrupt(int irq, void *data)
}
/* ACK interrupt */
writel(1 << idx, base->virtbase + il[row].clr);
writel(BIT(idx), base->virtbase + il[row].clr);
spin_lock(&d40c->lock);
@ -1776,7 +1782,7 @@ static int d40_validate_conf(struct d40_chan *d40c,
res = -EINVAL;
}
if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
if (conf->dir == DMA_DEV_TO_DEV) {
/*
* DMAC HW supports it. Will be added to this driver,
* in case any dma client requires it.
@ -1786,9 +1792,9 @@ static int d40_validate_conf(struct d40_chan *d40c,
}
if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
(1 << conf->src_info.data_width) !=
conf->src_info.data_width !=
d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
(1 << conf->dst_info.data_width)) {
conf->dst_info.data_width) {
/*
* The DMAC hardware only supports
* src (burst x width) == dst (burst x width)
@ -1830,8 +1836,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
if (phy->allocated_src == D40_ALLOC_FREE)
phy->allocated_src = D40_ALLOC_LOG_FREE;
if (!(phy->allocated_src & (1 << log_event_line))) {
phy->allocated_src |= 1 << log_event_line;
if (!(phy->allocated_src & BIT(log_event_line))) {
phy->allocated_src |= BIT(log_event_line);
goto found;
} else
goto not_found;
@ -1842,8 +1848,8 @@ static bool d40_alloc_mask_set(struct d40_phy_res *phy,
if (phy->allocated_dst == D40_ALLOC_FREE)
phy->allocated_dst = D40_ALLOC_LOG_FREE;
if (!(phy->allocated_dst & (1 << log_event_line))) {
phy->allocated_dst |= 1 << log_event_line;
if (!(phy->allocated_dst & BIT(log_event_line))) {
phy->allocated_dst |= BIT(log_event_line);
goto found;
} else
goto not_found;
@ -1873,11 +1879,11 @@ static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
/* Logical channel */
if (is_src) {
phy->allocated_src &= ~(1 << log_event_line);
phy->allocated_src &= ~BIT(log_event_line);
if (phy->allocated_src == D40_ALLOC_LOG_FREE)
phy->allocated_src = D40_ALLOC_FREE;
} else {
phy->allocated_dst &= ~(1 << log_event_line);
phy->allocated_dst &= ~BIT(log_event_line);
if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
phy->allocated_dst = D40_ALLOC_FREE;
}
@ -1907,11 +1913,11 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
phys = d40c->base->phy_res;
num_phy_chans = d40c->base->num_phy_chans;
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
log_num = 2 * dev_type;
is_src = true;
} else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
} else if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* dst event lines are used for logical memcpy */
log_num = 2 * dev_type + 1;
is_src = false;
@ -1922,7 +1928,7 @@ static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
event_line = D40_TYPE_TO_EVENT(dev_type);
if (!is_log) {
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
if (d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
/* Find physical half channel */
if (d40c->dma_cfg.use_fixed_channel) {
i = d40c->dma_cfg.phy_channel;
@ -2070,10 +2076,10 @@ static int d40_free_dma(struct d40_chan *d40c)
return -EINVAL;
}
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
d40c->dma_cfg.dir == DMA_MEM_TO_MEM)
is_src = false;
else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
is_src = true;
else {
chan_err(d40c, "Unknown direction\n");
@ -2135,10 +2141,10 @@ static bool d40_is_paused(struct d40_chan *d40c)
goto _exit;
}
if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
if (d40c->dma_cfg.dir == DMA_MEM_TO_DEV ||
d40c->dma_cfg.dir == DMA_MEM_TO_MEM) {
status = readl(chanbase + D40_CHAN_REG_SDLNK);
} else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
} else if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM) {
status = readl(chanbase + D40_CHAN_REG_SSLNK);
} else {
chan_err(d40c, "Unknown direction\n");
@ -2358,7 +2364,7 @@ static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
u32 rtreg;
u32 event = D40_TYPE_TO_EVENT(dev_type);
u32 group = D40_TYPE_TO_GROUP(dev_type);
u32 bit = 1 << event;
u32 bit = BIT(event);
u32 prioreg;
struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
@ -2389,12 +2395,12 @@ static void d40_set_prio_realtime(struct d40_chan *d40c)
if (d40c->base->rev < 3)
return;
if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
if ((d40c->dma_cfg.dir == DMA_DEV_TO_MEM) ||
(d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
(d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
if ((d40c->dma_cfg.dir == DMA_MEM_TO_DEV) ||
(d40c->dma_cfg.dir == DMA_DEV_TO_DEV))
__d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
}
@ -2425,11 +2431,11 @@ static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
switch (D40_DT_FLAGS_DIR(flags)) {
case 0:
cfg.dir = STEDMA40_MEM_TO_PERIPH;
cfg.dir = DMA_MEM_TO_DEV;
cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
break;
case 1:
cfg.dir = STEDMA40_PERIPH_TO_MEM;
cfg.dir = DMA_DEV_TO_MEM;
cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
break;
}
@ -2475,7 +2481,7 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
d40_set_prio_realtime(d40c);
if (chan_is_logical(d40c)) {
if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
if (d40c->dma_cfg.dir == DMA_DEV_TO_MEM)
d40c->lcpa = d40c->base->lcpa_base +
d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
else
@ -2675,33 +2681,10 @@ static void d40_terminate_all(struct dma_chan *chan)
static int
dma40_config_to_halfchannel(struct d40_chan *d40c,
struct stedma40_half_channel_info *info,
enum dma_slave_buswidth width,
u32 maxburst)
{
enum stedma40_periph_data_width addr_width;
int psize;
switch (width) {
case DMA_SLAVE_BUSWIDTH_1_BYTE:
addr_width = STEDMA40_BYTE_WIDTH;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES:
addr_width = STEDMA40_HALFWORD_WIDTH;
break;
case DMA_SLAVE_BUSWIDTH_4_BYTES:
addr_width = STEDMA40_WORD_WIDTH;
break;
case DMA_SLAVE_BUSWIDTH_8_BYTES:
addr_width = STEDMA40_DOUBLEWORD_WIDTH;
break;
default:
dev_err(d40c->base->dev,
"illegal peripheral address width "
"requested (%d)\n",
width);
return -EINVAL;
}
if (chan_is_logical(d40c)) {
if (maxburst >= 16)
psize = STEDMA40_PSIZE_LOG_16;
@ -2722,7 +2705,6 @@ dma40_config_to_halfchannel(struct d40_chan *d40c,
psize = STEDMA40_PSIZE_PHY_1;
}
info->data_width = addr_width;
info->psize = psize;
info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
@ -2748,12 +2730,12 @@ static int d40_set_runtime_config(struct dma_chan *chan,
if (config->direction == DMA_DEV_TO_MEM) {
config_addr = config->src_addr;
if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
if (cfg->dir != DMA_DEV_TO_MEM)
dev_dbg(d40c->base->dev,
"channel was not configured for peripheral "
"to memory transfer (%d) overriding\n",
cfg->dir);
cfg->dir = STEDMA40_PERIPH_TO_MEM;
cfg->dir = DMA_DEV_TO_MEM;
/* Configure the memory side */
if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@ -2764,12 +2746,12 @@ static int d40_set_runtime_config(struct dma_chan *chan,
} else if (config->direction == DMA_MEM_TO_DEV) {
config_addr = config->dst_addr;
if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
if (cfg->dir != DMA_MEM_TO_DEV)
dev_dbg(d40c->base->dev,
"channel was not configured for memory "
"to peripheral transfer (%d) overriding\n",
cfg->dir);
cfg->dir = STEDMA40_MEM_TO_PERIPH;
cfg->dir = DMA_MEM_TO_DEV;
/* Configure the memory side */
if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
@ -2806,14 +2788,24 @@ static int d40_set_runtime_config(struct dma_chan *chan,
src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
}
/* Only valid widths are; 1, 2, 4 and 8. */
if (src_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
src_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
dst_addr_width <= DMA_SLAVE_BUSWIDTH_UNDEFINED ||
dst_addr_width > DMA_SLAVE_BUSWIDTH_8_BYTES ||
((src_addr_width > 1) && (src_addr_width & 1)) ||
((dst_addr_width > 1) && (dst_addr_width & 1)))
return -EINVAL;
cfg->src_info.data_width = src_addr_width;
cfg->dst_info.data_width = dst_addr_width;
ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
src_addr_width,
src_maxburst);
if (ret)
return ret;
ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
dst_addr_width,
dst_maxburst);
if (ret)
return ret;
@ -2953,7 +2945,7 @@ static int __init d40_dmaengine_init(struct d40_base *base,
}
d40_chan_init(base, &base->dma_memcpy, base->log_chans,
base->num_log_chans, ARRAY_SIZE(dma40_memcpy_channels));
base->num_log_chans, base->num_memcpy_chans);
dma_cap_zero(base->dma_memcpy.cap_mask);
dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
@ -3154,6 +3146,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
struct d40_base *base = NULL;
int num_log_chans = 0;
int num_phy_chans;
int num_memcpy_chans;
int clk_ret = -EINVAL;
int i;
u32 pid;
@ -3224,6 +3217,12 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
else
num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
/* The number of channels used for memcpy */
if (plat_data->num_of_memcpy_chans)
num_memcpy_chans = plat_data->num_of_memcpy_chans;
else
num_memcpy_chans = ARRAY_SIZE(dma40_memcpy_channels);
num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
dev_info(&pdev->dev,
@ -3231,7 +3230,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
rev, res->start, num_phy_chans, num_log_chans);
base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
(num_phy_chans + num_log_chans + ARRAY_SIZE(dma40_memcpy_channels)) *
(num_phy_chans + num_log_chans + num_memcpy_chans) *
sizeof(struct d40_chan), GFP_KERNEL);
if (base == NULL) {
@ -3241,6 +3240,7 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
base->rev = rev;
base->clk = clk;
base->num_memcpy_chans = num_memcpy_chans;
base->num_phy_chans = num_phy_chans;
base->num_log_chans = num_log_chans;
base->phy_start = res->start;
@ -3484,12 +3484,8 @@ static int __init d40_of_probe(struct platform_device *pdev,
struct device_node *np)
{
struct stedma40_platform_data *pdata;
/*
* FIXME: Fill in this routine as more support is added.
* First platform enabled (u8500) doens't need any extra
* properties to run, so this is fairly sparce currently.
*/
int num_phy = 0, num_memcpy = 0, num_disabled = 0;
const const __be32 *list;
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct stedma40_platform_data),
@ -3497,6 +3493,41 @@ static int __init d40_of_probe(struct platform_device *pdev,
if (!pdata)
return -ENOMEM;
/* If absent this value will be obtained from h/w. */
of_property_read_u32(np, "dma-channels", &num_phy);
if (num_phy > 0)
pdata->num_of_phy_chans = num_phy;
list = of_get_property(np, "memcpy-channels", &num_memcpy);
num_memcpy /= sizeof(*list);
if (num_memcpy > D40_MEMCPY_MAX_CHANS || num_memcpy <= 0) {
d40_err(&pdev->dev,
"Invalid number of memcpy channels specified (%d)\n",
num_memcpy);
return -EINVAL;
}
pdata->num_of_memcpy_chans = num_memcpy;
of_property_read_u32_array(np, "memcpy-channels",
dma40_memcpy_channels,
num_memcpy);
list = of_get_property(np, "disabled-channels", &num_disabled);
num_disabled /= sizeof(*list);
if (num_disabled > STEDMA40_MAX_PHYS || num_disabled < 0) {
d40_err(&pdev->dev,
"Invalid number of disabled channels specified (%d)\n",
num_disabled);
return -EINVAL;
}
of_property_read_u32_array(np, "disabled-channels",
pdata->disabled_channels,
num_disabled);
pdata->disabled_channels[num_disabled] = -1;
pdev->dev.platform_data = pdata;
return 0;

View File

@ -10,6 +10,18 @@
#include "ste_dma40_ll.h"
u8 d40_width_to_bits(enum dma_slave_buswidth width)
{
if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
return STEDMA40_ESIZE_8_BIT;
else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
return STEDMA40_ESIZE_16_BIT;
else if (width == DMA_SLAVE_BUSWIDTH_8_BYTES)
return STEDMA40_ESIZE_64_BIT;
else
return STEDMA40_ESIZE_32_BIT;
}
/* Sets up proper LCSP1 and LCSP3 register for a logical channel */
void d40_log_cfg(struct stedma40_chan_cfg *cfg,
u32 *lcsp1, u32 *lcsp3)
@ -18,32 +30,34 @@ void d40_log_cfg(struct stedma40_chan_cfg *cfg,
u32 l1 = 0; /* src */
/* src is mem? -> increase address pos */
if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
cfg->dir == STEDMA40_MEM_TO_MEM)
l1 |= 1 << D40_MEM_LCSP1_SCFG_INCR_POS;
if (cfg->dir == DMA_MEM_TO_DEV ||
cfg->dir == DMA_MEM_TO_MEM)
l1 |= BIT(D40_MEM_LCSP1_SCFG_INCR_POS);
/* dst is mem? -> increase address pos */
if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
cfg->dir == STEDMA40_MEM_TO_MEM)
l3 |= 1 << D40_MEM_LCSP3_DCFG_INCR_POS;
if (cfg->dir == DMA_DEV_TO_MEM ||
cfg->dir == DMA_MEM_TO_MEM)
l3 |= BIT(D40_MEM_LCSP3_DCFG_INCR_POS);
/* src is hw? -> master port 1 */
if (cfg->dir == STEDMA40_PERIPH_TO_MEM ||
cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
l1 |= 1 << D40_MEM_LCSP1_SCFG_MST_POS;
if (cfg->dir == DMA_DEV_TO_MEM ||
cfg->dir == DMA_DEV_TO_DEV)
l1 |= BIT(D40_MEM_LCSP1_SCFG_MST_POS);
/* dst is hw? -> master port 1 */
if (cfg->dir == STEDMA40_MEM_TO_PERIPH ||
cfg->dir == STEDMA40_PERIPH_TO_PERIPH)
l3 |= 1 << D40_MEM_LCSP3_DCFG_MST_POS;
if (cfg->dir == DMA_MEM_TO_DEV ||
cfg->dir == DMA_DEV_TO_DEV)
l3 |= BIT(D40_MEM_LCSP3_DCFG_MST_POS);
l3 |= 1 << D40_MEM_LCSP3_DCFG_EIM_POS;
l3 |= BIT(D40_MEM_LCSP3_DCFG_EIM_POS);
l3 |= cfg->dst_info.psize << D40_MEM_LCSP3_DCFG_PSIZE_POS;
l3 |= cfg->dst_info.data_width << D40_MEM_LCSP3_DCFG_ESIZE_POS;
l3 |= d40_width_to_bits(cfg->dst_info.data_width)
<< D40_MEM_LCSP3_DCFG_ESIZE_POS;
l1 |= 1 << D40_MEM_LCSP1_SCFG_EIM_POS;
l1 |= BIT(D40_MEM_LCSP1_SCFG_EIM_POS);
l1 |= cfg->src_info.psize << D40_MEM_LCSP1_SCFG_PSIZE_POS;
l1 |= cfg->src_info.data_width << D40_MEM_LCSP1_SCFG_ESIZE_POS;
l1 |= d40_width_to_bits(cfg->src_info.data_width)
<< D40_MEM_LCSP1_SCFG_ESIZE_POS;
*lcsp1 = l1;
*lcsp3 = l3;
@ -55,59 +69,61 @@ void d40_phy_cfg(struct stedma40_chan_cfg *cfg, u32 *src_cfg, u32 *dst_cfg)
u32 src = 0;
u32 dst = 0;
if ((cfg->dir == STEDMA40_PERIPH_TO_MEM) ||
(cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
if ((cfg->dir == DMA_DEV_TO_MEM) ||
(cfg->dir == DMA_DEV_TO_DEV)) {
/* Set master port to 1 */
src |= 1 << D40_SREG_CFG_MST_POS;
src |= BIT(D40_SREG_CFG_MST_POS);
src |= D40_TYPE_TO_EVENT(cfg->dev_type);
if (cfg->src_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
src |= 1 << D40_SREG_CFG_PHY_TM_POS;
src |= BIT(D40_SREG_CFG_PHY_TM_POS);
else
src |= 3 << D40_SREG_CFG_PHY_TM_POS;
}
if ((cfg->dir == STEDMA40_MEM_TO_PERIPH) ||
(cfg->dir == STEDMA40_PERIPH_TO_PERIPH)) {
if ((cfg->dir == DMA_MEM_TO_DEV) ||
(cfg->dir == DMA_DEV_TO_DEV)) {
/* Set master port to 1 */
dst |= 1 << D40_SREG_CFG_MST_POS;
dst |= BIT(D40_SREG_CFG_MST_POS);
dst |= D40_TYPE_TO_EVENT(cfg->dev_type);
if (cfg->dst_info.flow_ctrl == STEDMA40_NO_FLOW_CTRL)
dst |= 1 << D40_SREG_CFG_PHY_TM_POS;
dst |= BIT(D40_SREG_CFG_PHY_TM_POS);
else
dst |= 3 << D40_SREG_CFG_PHY_TM_POS;
}
/* Interrupt on end of transfer for destination */
dst |= 1 << D40_SREG_CFG_TIM_POS;
dst |= BIT(D40_SREG_CFG_TIM_POS);
/* Generate interrupt on error */
src |= 1 << D40_SREG_CFG_EIM_POS;
dst |= 1 << D40_SREG_CFG_EIM_POS;
src |= BIT(D40_SREG_CFG_EIM_POS);
dst |= BIT(D40_SREG_CFG_EIM_POS);
/* PSIZE */
if (cfg->src_info.psize != STEDMA40_PSIZE_PHY_1) {
src |= 1 << D40_SREG_CFG_PHY_PEN_POS;
src |= BIT(D40_SREG_CFG_PHY_PEN_POS);
src |= cfg->src_info.psize << D40_SREG_CFG_PSIZE_POS;
}
if (cfg->dst_info.psize != STEDMA40_PSIZE_PHY_1) {
dst |= 1 << D40_SREG_CFG_PHY_PEN_POS;
dst |= BIT(D40_SREG_CFG_PHY_PEN_POS);
dst |= cfg->dst_info.psize << D40_SREG_CFG_PSIZE_POS;
}
/* Element size */
src |= cfg->src_info.data_width << D40_SREG_CFG_ESIZE_POS;
dst |= cfg->dst_info.data_width << D40_SREG_CFG_ESIZE_POS;
src |= d40_width_to_bits(cfg->src_info.data_width)
<< D40_SREG_CFG_ESIZE_POS;
dst |= d40_width_to_bits(cfg->dst_info.data_width)
<< D40_SREG_CFG_ESIZE_POS;
/* Set the priority bit to high for the physical channel */
if (cfg->high_priority) {
src |= 1 << D40_SREG_CFG_PRI_POS;
dst |= 1 << D40_SREG_CFG_PRI_POS;
src |= BIT(D40_SREG_CFG_PRI_POS);
dst |= BIT(D40_SREG_CFG_PRI_POS);
}
if (cfg->src_info.big_endian)
src |= 1 << D40_SREG_CFG_LBE_POS;
src |= BIT(D40_SREG_CFG_LBE_POS);
if (cfg->dst_info.big_endian)
dst |= 1 << D40_SREG_CFG_LBE_POS;
dst |= BIT(D40_SREG_CFG_LBE_POS);
*src_cfg = src;
*dst_cfg = dst;
@ -133,23 +149,22 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
num_elems = 2 << psize;
/* Must be aligned */
if (!IS_ALIGNED(data, 0x1 << data_width))
if (!IS_ALIGNED(data, data_width))
return -EINVAL;
/* Transfer size can't be smaller than (num_elms * elem_size) */
if (data_size < num_elems * (0x1 << data_width))
if (data_size < num_elems * data_width)
return -EINVAL;
/* The number of elements. IE now many chunks */
lli->reg_elt = (data_size >> data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
lli->reg_elt = (data_size / data_width) << D40_SREG_ELEM_PHY_ECNT_POS;
/*
* Distance to next element sized entry.
* Usually the size of the element unless you want gaps.
*/
if (addr_inc)
lli->reg_elt |= (0x1 << data_width) <<
D40_SREG_ELEM_PHY_EIDX_POS;
lli->reg_elt |= data_width << D40_SREG_ELEM_PHY_EIDX_POS;
/* Where the data is */
lli->reg_ptr = data;
@ -157,18 +172,20 @@ static int d40_phy_fill_lli(struct d40_phy_lli *lli,
/* If this scatter list entry is the last one, no next link */
if (next_lli == 0)
lli->reg_lnk = 0x1 << D40_SREG_LNK_PHY_TCP_POS;
lli->reg_lnk = BIT(D40_SREG_LNK_PHY_TCP_POS);
else
lli->reg_lnk = next_lli;
/* Set/clear interrupt generation on this link item.*/
if (term_int)
lli->reg_cfg |= 0x1 << D40_SREG_CFG_TIM_POS;
lli->reg_cfg |= BIT(D40_SREG_CFG_TIM_POS);
else
lli->reg_cfg &= ~(0x1 << D40_SREG_CFG_TIM_POS);
lli->reg_cfg &= ~BIT(D40_SREG_CFG_TIM_POS);
/* Post link */
lli->reg_lnk |= 0 << D40_SREG_LNK_PHY_PRE_POS;
/*
* Post link - D40_SREG_LNK_PHY_PRE_POS = 0
* Relink happens after transfer completion.
*/
return 0;
}
@ -177,16 +194,16 @@ static int d40_seg_size(int size, int data_width1, int data_width2)
{
u32 max_w = max(data_width1, data_width2);
u32 min_w = min(data_width1, data_width2);
u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE * min_w, max_w);
if (seg_max > STEDMA40_MAX_SEG_SIZE)
seg_max -= (1 << max_w);
seg_max -= max_w;
if (size <= seg_max)
return size;
if (size <= 2 * seg_max)
return ALIGN(size / 2, 1 << max_w);
return ALIGN(size / 2, max_w);
return seg_max;
}
@ -352,10 +369,10 @@ static void d40_log_fill_lli(struct d40_log_lli *lli,
lli->lcsp13 = reg_cfg;
/* The number of elements to transfer */
lli->lcsp02 = ((data_size >> data_width) <<
lli->lcsp02 = ((data_size / data_width) <<
D40_MEM_LCSP0_ECNT_POS) & D40_MEM_LCSP0_ECNT_MASK;
BUG_ON((data_size >> data_width) > STEDMA40_MAX_SEG_SIZE);
BUG_ON((data_size / data_width) > STEDMA40_MAX_SEG_SIZE);
/* 16 LSBs address of the current element */
lli->lcsp02 |= data & D40_MEM_LCSP0_SPTR_MASK;

View File

@ -25,11 +25,19 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/usb/musb-ux500.h>
#include "musb_core.h"
static struct musb_hdrc_config ux500_musb_hdrc_config = {
.multipoint = true,
.dyn_fifo = true,
.num_eps = 16,
.ram_bits = 16,
};
struct ux500_glue {
struct device *dev;
struct platform_device *musb;
@ -187,14 +195,57 @@ static const struct musb_platform_ops ux500_ops = {
.set_vbus = ux500_musb_set_vbus,
};
static struct musb_hdrc_platform_data *
ux500_of_probe(struct platform_device *pdev, struct device_node *np)
{
struct musb_hdrc_platform_data *pdata;
const char *mode;
int strlen;
pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
if (!pdata)
return NULL;
mode = of_get_property(np, "dr_mode", &strlen);
if (!mode) {
dev_err(&pdev->dev, "No 'dr_mode' property found\n");
return NULL;
}
if (strlen > 0) {
if (!strcmp(mode, "host"))
pdata->mode = MUSB_HOST;
if (!strcmp(mode, "otg"))
pdata->mode = MUSB_OTG;
if (!strcmp(mode, "peripheral"))
pdata->mode = MUSB_PERIPHERAL;
}
return pdata;
}
static int ux500_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
struct device_node *np = pdev->dev.of_node;
struct platform_device *musb;
struct ux500_glue *glue;
struct clk *clk;
int ret = -ENOMEM;
if (!pdata) {
if (np) {
pdata = ux500_of_probe(pdev, np);
if (!pdata)
goto err0;
pdev->dev.platform_data = pdata;
} else {
dev_err(&pdev->dev, "no pdata or device tree found\n");
goto err0;
}
}
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&pdev->dev, "failed to allocate glue context\n");
@ -221,14 +272,16 @@ static int ux500_probe(struct platform_device *pdev)
}
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = pdev->dev.dma_mask;
musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
musb->dev.of_node = pdev->dev.of_node;
glue->dev = &pdev->dev;
glue->musb = musb;
glue->clk = clk;
pdata->platform_ops = &ux500_ops;
pdata->config = &ux500_musb_hdrc_config;
platform_set_drvdata(pdev, glue);
@ -320,12 +373,18 @@ static const struct dev_pm_ops ux500_pm_ops = {
#define DEV_PM_OPS NULL
#endif
static const struct of_device_id ux500_match[] = {
{ .compatible = "stericsson,db8500-musb", },
{}
};
static struct platform_driver ux500_driver = {
.probe = ux500_probe,
.remove = ux500_remove,
.driver = {
.name = "musb-ux500",
.pm = DEV_PM_OPS,
.of_match_table = ux500_match,
},
};

View File

@ -34,6 +34,11 @@
#include <linux/platform_data/usb-musb-ux500.h>
#include "musb_core.h"
static const char *iep_chan_names[] = { "iep_1_9", "iep_2_10", "iep_3_11", "iep_4_12",
"iep_5_13", "iep_6_14", "iep_7_15", "iep_8" };
static const char *oep_chan_names[] = { "oep_1_9", "oep_2_10", "oep_3_11", "oep_4_12",
"oep_5_13", "oep_6_14", "oep_7_15", "oep_8" };
struct ux500_dma_channel {
struct dma_channel channel;
struct ux500_dma_controller *controller;
@ -48,10 +53,8 @@ struct ux500_dma_channel {
struct ux500_dma_controller {
struct dma_controller controller;
struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_CHANNELS];
struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_TX_CHANNELS];
u32 num_rx_channels;
u32 num_tx_channels;
struct ux500_dma_channel rx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
struct ux500_dma_channel tx_channel[UX500_MUSB_DMA_NUM_RX_TX_CHANNELS];
void *private_data;
dma_addr_t phy_base;
};
@ -144,19 +147,15 @@ static struct dma_channel *ux500_dma_channel_allocate(struct dma_controller *c,
struct ux500_dma_channel *ux500_channel = NULL;
struct musb *musb = controller->private_data;
u8 ch_num = hw_ep->epnum - 1;
u32 max_ch;
/* Max 8 DMA channels (0 - 7). Each DMA channel can only be allocated
/* 8 DMA channels (0 - 7). Each DMA channel can only be allocated
* to specified hw_ep. For example DMA channel 0 can only be allocated
* to hw_ep 1 and 9.
*/
if (ch_num > 7)
ch_num -= 8;
max_ch = is_tx ? controller->num_tx_channels :
controller->num_rx_channels;
if (ch_num >= max_ch)
if (ch_num >= UX500_MUSB_DMA_NUM_RX_TX_CHANNELS)
return NULL;
ux500_channel = is_tx ? &(controller->tx_channel[ch_num]) :
@ -264,7 +263,7 @@ static int ux500_dma_controller_stop(struct dma_controller *c)
struct dma_channel *channel;
u8 ch_num;
for (ch_num = 0; ch_num < controller->num_rx_channels; ch_num++) {
for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
channel = &controller->rx_channel[ch_num].channel;
ux500_channel = channel->private_data;
@ -274,7 +273,7 @@ static int ux500_dma_controller_stop(struct dma_controller *c)
dma_release_channel(ux500_channel->dma_chan);
}
for (ch_num = 0; ch_num < controller->num_tx_channels; ch_num++) {
for (ch_num = 0; ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS; ch_num++) {
channel = &controller->tx_channel[ch_num].channel;
ux500_channel = channel->private_data;
@ -295,34 +294,36 @@ static int ux500_dma_controller_start(struct dma_controller *c)
struct musb *musb = controller->private_data;
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *plat = dev->platform_data;
struct ux500_musb_board_data *data = plat->board_data;
struct ux500_musb_board_data *data;
struct dma_channel *dma_channel = NULL;
char **chan_names;
u32 ch_num;
u8 dir;
u8 is_tx = 0;
void **param_array;
struct ux500_dma_channel *channel_array;
u32 ch_count;
dma_cap_mask_t mask;
if ((data->num_rx_channels > UX500_MUSB_DMA_NUM_RX_CHANNELS) ||
(data->num_tx_channels > UX500_MUSB_DMA_NUM_TX_CHANNELS))
if (!plat) {
dev_err(musb->controller, "No platform data\n");
return -EINVAL;
}
controller->num_rx_channels = data->num_rx_channels;
controller->num_tx_channels = data->num_tx_channels;
data = plat->board_data;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
/* Prepare the loop for RX channels */
channel_array = controller->rx_channel;
ch_count = data->num_rx_channels;
param_array = data->dma_rx_param_array;
param_array = data ? data->dma_rx_param_array : NULL;
chan_names = (char **)iep_chan_names;
for (dir = 0; dir < 2; dir++) {
for (ch_num = 0; ch_num < ch_count; ch_num++) {
for (ch_num = 0;
ch_num < UX500_MUSB_DMA_NUM_RX_TX_CHANNELS;
ch_num++) {
ux500_channel = &channel_array[ch_num];
ux500_channel->controller = controller;
ux500_channel->ch_num = ch_num;
@ -333,9 +334,15 @@ static int ux500_dma_controller_start(struct dma_controller *c)
dma_channel->status = MUSB_DMA_STATUS_FREE;
dma_channel->max_len = SZ_16M;
ux500_channel->dma_chan = dma_request_channel(mask,
data->dma_filter,
param_array[ch_num]);
ux500_channel->dma_chan =
dma_request_slave_channel(dev, chan_names[ch_num]);
if (!ux500_channel->dma_chan)
ux500_channel->dma_chan =
dma_request_channel(mask,
data->dma_filter,
param_array[ch_num]);
if (!ux500_channel->dma_chan) {
ERR("Dma pipe allocation error dir=%d ch=%d\n",
dir, ch_num);
@ -350,8 +357,8 @@ static int ux500_dma_controller_start(struct dma_controller *c)
/* Prepare the loop for TX channels */
channel_array = controller->tx_channel;
ch_count = data->num_tx_channels;
param_array = data->dma_tx_param_array;
param_array = data ? data->dma_tx_param_array : NULL;
chan_names = (char **)oep_chan_names;
is_tx = 1;
}

View File

@ -70,21 +70,6 @@ enum stedma40_flow_ctrl {
STEDMA40_FLOW_CTRL,
};
enum stedma40_periph_data_width {
STEDMA40_BYTE_WIDTH = STEDMA40_ESIZE_8_BIT,
STEDMA40_HALFWORD_WIDTH = STEDMA40_ESIZE_16_BIT,
STEDMA40_WORD_WIDTH = STEDMA40_ESIZE_32_BIT,
STEDMA40_DOUBLEWORD_WIDTH = STEDMA40_ESIZE_64_BIT
};
enum stedma40_xfer_dir {
STEDMA40_MEM_TO_MEM = 1,
STEDMA40_MEM_TO_PERIPH,
STEDMA40_PERIPH_TO_MEM,
STEDMA40_PERIPH_TO_PERIPH
};
/**
* struct stedma40_half_channel_info - dst/src channel configuration
*
@ -95,7 +80,7 @@ enum stedma40_xfer_dir {
*/
struct stedma40_half_channel_info {
bool big_endian;
enum stedma40_periph_data_width data_width;
enum dma_slave_buswidth data_width;
int psize;
enum stedma40_flow_ctrl flow_ctrl;
};
@ -120,7 +105,7 @@ struct stedma40_half_channel_info {
*
*/
struct stedma40_chan_cfg {
enum stedma40_xfer_dir dir;
enum dma_transfer_direction dir;
bool high_priority;
bool realtime;
enum stedma40_mode mode;
@ -147,6 +132,7 @@ struct stedma40_chan_cfg {
* @num_of_soft_lli_chans: The number of channels that needs to be configured
* to use SoftLLI.
* @use_esram_lcla: flag for mapping the lcla into esram region
* @num_of_memcpy_chans: The number of channels reserved for memcpy.
* @num_of_phy_chans: The number of physical channels implemented in HW.
* 0 means reading the number of channels from DMA HW but this is only valid
* for 'multiple of 4' channels, like 8.
@ -156,6 +142,7 @@ struct stedma40_platform_data {
int *soft_lli_chans;
int num_of_soft_lli_chans;
bool use_esram_lcla;
int num_of_memcpy_chans;
int num_of_phy_chans;
};

View File

@ -9,14 +9,11 @@
#include <linux/dmaengine.h>
#define UX500_MUSB_DMA_NUM_RX_CHANNELS 8
#define UX500_MUSB_DMA_NUM_TX_CHANNELS 8
#define UX500_MUSB_DMA_NUM_RX_TX_CHANNELS 8
struct ux500_musb_board_data {
void **dma_rx_param_array;
void **dma_tx_param_array;
u32 num_rx_channels;
u32 num_tx_channels;
bool (*dma_filter)(struct dma_chan *chan, void *filter_param);
};

View File

@ -76,20 +76,20 @@ static struct dma_chan *ux500_pcm_request_chan(struct snd_soc_pcm_runtime *rtd,
dma_params = snd_soc_dai_get_dma_data(dai, substream);
dma_cfg = dma_params->dma_cfg;
mem_data_width = STEDMA40_HALFWORD_WIDTH;
mem_data_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
switch (dma_params->data_size) {
case 32:
per_data_width = STEDMA40_WORD_WIDTH;
per_data_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
break;
case 16:
per_data_width = STEDMA40_HALFWORD_WIDTH;
per_data_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
break;
case 8:
per_data_width = STEDMA40_BYTE_WIDTH;
per_data_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
break;
default:
per_data_width = STEDMA40_WORD_WIDTH;
per_data_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {