Merge remote-tracking branch 'lorenzo/pci/dwc' into next

* lorenzo/pci/dwc:
  PCI: exynos: Fix a potential init_clk_resources NULL pointer dereference
  PCI: iproc: Fix NULL pointer dereference for BCMA
  PCI: dra7xx: Iterate over INTx status bits
  PCI: dra7xx: Fix legacy INTD IRQ handling
  PCI: qcom: Account for const type of of_device_id.data
  PCI: dwc: artpec6: Fix return value check in artpec6_add_pcie_ep()
  PCI: exynos: Remove deprecated PHY initialization code
  PCI: dwc: artpec6: Add support for the ARTPEC-7 SoC
  bindings: PCI: artpec: Add support for the ARTPEC-7 SoC
  PCI: dwc: artpec6: Deassert the core before waiting for PHY
  PCI: dwc: Make cpu_addr_fixup take struct dw_pcie as argument
  PCI: dwc: artpec6: Add support for endpoint mode
  bindings: PCI: artpec: Add support for endpoint mode
  PCI: dwc: artpec6: Split artpec6_pcie_establish_link() into smaller functions
  PCI: dwc: artpec6: Use BIT and GENMASK macros
  PCI: dwc: artpec6: Remove unused defines
  PCI: dwc: dra7xx: Help compiler to remove unused code
  PCI: dwc: dra7xx: Assign pp->ops in dra7xx_add_pcie_port() rather than in probe
  PCI: dwc: dra7xx: Refactor Kconfig and Makefile handling for host/ep mode
  PCI: designware-ep: Add generic function for raising MSI irq
  PCI: designware-ep: Remove static keyword from dw_pcie_ep_reset_bar()
  PCI: designware-ep: Pre-allocate memory for MSI in dw_pcie_ep_init
  PCI: designware-ep: Read-only registers need DBI_RO_WR_EN to be writable
  PCI: designware-ep: dw_pcie_ep_set_msi() should only set MMC bits
  PCI: dwc: Use the DMA-API to get the MSI address
  pci: dwc: pci-dra7xx: Make shutdown handler static

Includes resolution to conflict between:

  4494738de0 ("PCI: endpoint: Add the function number as argument to EPC ops")
  6f6d787371 ("PCI: designware-ep: Add generic function for raising MSI irq")

The resolution is due to Niklas Cassel <niklas.cassel@axis.com>:
https://lkml.kernel.org/r/20180201085608.GA22568@axis.com
This commit is contained in:
Bjorn Helgaas 2018-02-01 11:36:07 -06:00 committed by Bjorn Helgaas
commit 16093362d6
15 changed files with 612 additions and 391 deletions

View File

@ -4,7 +4,10 @@ This PCIe host controller is based on the Synopsys DesignWare PCIe IP
and thus inherits all the common properties defined in designware-pcie.txt.
Required properties:
- compatible: "axis,artpec6-pcie", "snps,dw-pcie"
- compatible: "axis,artpec6-pcie", "snps,dw-pcie" for ARTPEC-6 in RC mode;
"axis,artpec6-pcie-ep", "snps,dw-pcie" for ARTPEC-6 in EP mode;
"axis,artpec7-pcie", "snps,dw-pcie" for ARTPEC-7 in RC mode;
"axis,artpec7-pcie-ep", "snps,dw-pcie" for ARTPEC-7 in EP mode;
- reg: base addresses and lengths of the PCIe controller (DBI),
the PHY controller, and configuration address space.
- reg-names: Must include the following entries:

View File

@ -6,9 +6,6 @@ and thus inherits all the common properties defined in designware-pcie.txt.
Required properties:
- compatible: "samsung,exynos5440-pcie"
- reg: base addresses and lengths of the PCIe controller,
the PHY controller, additional register for the PHY controller.
(Registers for the PHY controller are DEPRECATED.
Use the PHY framework.)
- reg-names : First name should be set to "elbi".
And use the "config" instead of getting the configuration address space
from "ranges".
@ -23,49 +20,8 @@ For other common properties, refer to
Example:
SoC-specific DT Entry:
SoC-specific DT Entry (with using PHY framework):
pcie@290000 {
compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
reg = <0x290000 0x1000
0x270000 0x1000
0x271000 0x40>;
interrupts = <0 20 0>, <0 21 0>, <0 22 0>;
clocks = <&clock 28>, <&clock 27>;
clock-names = "pcie", "pcie_bus";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
ranges = <0x00000800 0 0x40000000 0x40000000 0 0x00001000 /* configuration space */
0x81000000 0 0 0x40001000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x40011000 0x40011000 0 0x1ffef000>; /* non-prefetchable memory */
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <4>;
};
pcie@2a0000 {
compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
reg = <0x2a0000 0x1000
0x272000 0x1000
0x271040 0x40>;
interrupts = <0 23 0>, <0 24 0>, <0 25 0>;
clocks = <&clock 29>, <&clock 27>;
clock-names = "pcie", "pcie_bus";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
ranges = <0x00000800 0 0x60000000 0x60000000 0 0x00001000 /* configuration space */
0x81000000 0 0 0x60001000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>; /* non-prefetchable memory */
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <4>;
};
With using PHY framework:
pcie_phy0: pcie-phy@270000 {
...
reg = <0x270000 0x1000>, <0x271000 0x40>;
@ -74,13 +30,21 @@ With using PHY framework:
};
pcie@290000 {
...
compatible = "samsung,exynos5440-pcie", "snps,dw-pcie";
reg = <0x290000 0x1000>, <0x40000000 0x1000>;
reg-names = "elbi", "config";
clocks = <&clock 28>, <&clock 27>;
clock-names = "pcie", "pcie_bus";
#address-cells = <3>;
#size-cells = <2>;
device_type = "pci";
phys = <&pcie_phy0>;
ranges = <0x81000000 0 0 0x60001000 0 0x00010000
0x82000000 0 0x60011000 0x60011000 0 0x1ffef000>;
...
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
interrupt-map = <0 0 0 0 &gic GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>;
num-lanes = <4>;
};
Board-specific DT Entry:

View File

@ -15,39 +15,38 @@ config PCIE_DW_EP
select PCIE_DW
config PCI_DRA7XX
bool "TI DRA7xx PCIe controller"
depends on SOC_DRA7XX || COMPILE_TEST
depends on (PCI && PCI_MSI_IRQ_DOMAIN) || PCI_ENDPOINT
depends on OF && HAS_IOMEM && TI_PIPE3
help
Enables support for the PCIe controller in the DRA7xx SoC. There
are two instances of PCIe controller in DRA7xx. This controller can
work either as EP or RC. In order to enable host-specific features
PCI_DRA7XX_HOST must be selected and in order to enable device-
specific features PCI_DRA7XX_EP must be selected. This uses
the DesignWare core.
if PCI_DRA7XX
bool
config PCI_DRA7XX_HOST
bool "PCI DRA7xx Host Mode"
depends on PCI
depends on PCI_MSI_IRQ_DOMAIN
bool "TI DRA7xx PCIe controller Host Mode"
depends on SOC_DRA7XX || COMPILE_TEST
depends on PCI && PCI_MSI_IRQ_DOMAIN
depends on OF && HAS_IOMEM && TI_PIPE3
select PCIE_DW_HOST
select PCI_DRA7XX
default y
help
Enables support for the PCIe controller in the DRA7xx SoC to work in
host mode.
Enables support for the PCIe controller in the DRA7xx SoC to work in
host mode. There are two instances of PCIe controller in DRA7xx.
This controller can work either as EP or RC. In order to enable
host-specific features PCI_DRA7XX_HOST must be selected and in order
to enable device-specific features PCI_DRA7XX_EP must be selected.
This uses the DesignWare core.
config PCI_DRA7XX_EP
bool "PCI DRA7xx Endpoint Mode"
bool "TI DRA7xx PCIe controller Endpoint Mode"
depends on SOC_DRA7XX || COMPILE_TEST
depends on PCI_ENDPOINT
depends on OF && HAS_IOMEM && TI_PIPE3
select PCIE_DW_EP
select PCI_DRA7XX
help
Enables support for the PCIe controller in the DRA7xx SoC to work in
endpoint mode.
endif
Enables support for the PCIe controller in the DRA7xx SoC to work in
endpoint mode. There are two instances of PCIe controller in DRA7xx.
This controller can work either as EP or RC. In order to enable
host-specific features PCI_DRA7XX_HOST must be selected and in order
to enable device-specific features PCI_DRA7XX_EP must be selected.
This uses the DesignWare core.
config PCIE_DW_PLAT
bool "Platform bus based DesignWare PCIe Controller"
@ -149,15 +148,28 @@ config PCIE_ARMADA_8K
DesignWare core functions to implement the driver.
config PCIE_ARTPEC6
bool "Axis ARTPEC-6 PCIe controller"
depends on PCI
bool
config PCIE_ARTPEC6_HOST
bool "Axis ARTPEC-6 PCIe controller Host Mode"
depends on MACH_ARTPEC6
depends on PCI_MSI_IRQ_DOMAIN
depends on PCI && PCI_MSI_IRQ_DOMAIN
select PCIEPORTBUS
select PCIE_DW_HOST
select PCIE_ARTPEC6
help
Say Y here to enable PCIe controller support on Axis ARTPEC-6
SoCs. This PCIe controller uses the DesignWare core.
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
host mode. This uses the DesignWare core.
config PCIE_ARTPEC6_EP
bool "Axis ARTPEC-6 PCIe controller Endpoint Mode"
depends on MACH_ARTPEC6
depends on PCI_ENDPOINT
select PCIE_DW_EP
select PCIE_ARTPEC6
help
Enables support for the PCIe controller in the ARTPEC-6 SoC to work in
endpoint mode. This uses the DesignWare core.
config PCIE_KIRIN
depends on OF && ARM64

View File

@ -3,9 +3,7 @@ obj-$(CONFIG_PCIE_DW) += pcie-designware.o
obj-$(CONFIG_PCIE_DW_HOST) += pcie-designware-host.o
obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
ifneq ($(filter y,$(CONFIG_PCI_DRA7XX_HOST) $(CONFIG_PCI_DRA7XX_EP)),)
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
endif
obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
obj-$(CONFIG_PCI_IMX6) += pci-imx6.o
obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o

View File

@ -110,7 +110,7 @@ static inline void dra7xx_pcie_writel(struct dra7xx_pcie *pcie, u32 offset,
writel(value, pcie->base + offset);
}
static u64 dra7xx_pcie_cpu_addr_fixup(u64 pci_addr)
static u64 dra7xx_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
{
return pci_addr & DRA7XX_CPU_TO_BUS_ADDR;
}
@ -226,6 +226,7 @@ static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
static const struct irq_domain_ops intx_domain_ops = {
.map = dra7xx_pcie_intx_map,
.xlate = pci_irqd_intx_xlate,
};
static int dra7xx_pcie_init_irq_domain(struct pcie_port *pp)
@ -256,7 +257,8 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
struct dra7xx_pcie *dra7xx = arg;
struct dw_pcie *pci = dra7xx->pci;
struct pcie_port *pp = &pci->pp;
u32 reg;
unsigned long reg;
u32 virq, bit;
reg = dra7xx_pcie_readl(dra7xx, PCIECTRL_DRA7XX_CONF_IRQSTATUS_MSI);
@ -268,8 +270,11 @@ static irqreturn_t dra7xx_pcie_msi_irq_handler(int irq, void *arg)
case INTB:
case INTC:
case INTD:
generic_handle_irq(irq_find_mapping(dra7xx->irq_domain,
ffs(reg)));
for_each_set_bit(bit, &reg, PCI_NUM_INTX) {
virq = irq_find_mapping(dra7xx->irq_domain, bit);
if (virq)
generic_handle_irq(virq);
}
break;
}
@ -337,15 +342,6 @@ static irqreturn_t dra7xx_pcie_irq_handler(int irq, void *arg)
return IRQ_HANDLED;
}
static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
u32 reg;
reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_writel_dbi2(pci, reg, 0x0);
dw_pcie_writel_dbi(pci, reg, 0x0);
}
static void dra7xx_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@ -375,7 +371,7 @@ static void dra7xx_pcie_raise_msi_irq(struct dra7xx_pcie *dra7xx,
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_MSI_XMT, reg);
}
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep,
static int dra7xx_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
@ -470,6 +466,8 @@ static int __init dra7xx_add_pcie_port(struct dra7xx_pcie *dra7xx,
if (!pci->dbi_base)
return -ENOMEM;
pp->ops = &dra7xx_pcie_host_ops;
ret = dw_pcie_host_init(pp);
if (ret) {
dev_err(dev, "failed to initialize host\n");
@ -599,7 +597,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
void __iomem *base;
struct resource *res;
struct dw_pcie *pci;
struct pcie_port *pp;
struct dra7xx_pcie *dra7xx;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
@ -627,9 +624,6 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
pci->dev = dev;
pci->ops = &dw_pcie_ops;
pp = &pci->pp;
pp->ops = &dra7xx_pcie_host_ops;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(dev, "missing IRQ resource: %d\n", irq);
@ -705,6 +699,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
switch (mode) {
case DW_PCIE_RC_TYPE:
if (!IS_ENABLED(CONFIG_PCI_DRA7XX_HOST)) {
ret = -ENODEV;
goto err_gpio;
}
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
DEVICE_TYPE_RC);
ret = dra7xx_add_pcie_port(dra7xx, pdev);
@ -712,6 +711,11 @@ static int __init dra7xx_pcie_probe(struct platform_device *pdev)
goto err_gpio;
break;
case DW_PCIE_EP_TYPE:
if (!IS_ENABLED(CONFIG_PCI_DRA7XX_EP)) {
ret = -ENODEV;
goto err_gpio;
}
dra7xx_pcie_writel(dra7xx, PCIECTRL_TI_CONF_DEVICE_TYPE,
DEVICE_TYPE_EP);
@ -810,7 +814,7 @@ static int dra7xx_pcie_resume_noirq(struct device *dev)
}
#endif
void dra7xx_pcie_shutdown(struct platform_device *pdev)
static void dra7xx_pcie_shutdown(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct dra7xx_pcie *dra7xx = dev_get_drvdata(dev);

View File

@ -55,49 +55,8 @@
#define PCIE_ELBI_SLV_ARMISC 0x120
#define PCIE_ELBI_SLV_DBI_ENABLE BIT(21)
/* PCIe Purple registers */
#define PCIE_PHY_GLOBAL_RESET 0x000
#define PCIE_PHY_COMMON_RESET 0x004
#define PCIE_PHY_CMN_REG 0x008
#define PCIE_PHY_MAC_RESET 0x00c
#define PCIE_PHY_PLL_LOCKED 0x010
#define PCIE_PHY_TRSVREG_RESET 0x020
#define PCIE_PHY_TRSV_RESET 0x024
/* PCIe PHY registers */
#define PCIE_PHY_IMPEDANCE 0x004
#define PCIE_PHY_PLL_DIV_0 0x008
#define PCIE_PHY_PLL_BIAS 0x00c
#define PCIE_PHY_DCC_FEEDBACK 0x014
#define PCIE_PHY_PLL_DIV_1 0x05c
#define PCIE_PHY_COMMON_POWER 0x064
#define PCIE_PHY_COMMON_PD_CMN BIT(3)
#define PCIE_PHY_TRSV0_EMP_LVL 0x084
#define PCIE_PHY_TRSV0_DRV_LVL 0x088
#define PCIE_PHY_TRSV0_RXCDR 0x0ac
#define PCIE_PHY_TRSV0_POWER 0x0c4
#define PCIE_PHY_TRSV0_PD_TSV BIT(7)
#define PCIE_PHY_TRSV0_LVCC 0x0dc
#define PCIE_PHY_TRSV1_EMP_LVL 0x144
#define PCIE_PHY_TRSV1_RXCDR 0x16c
#define PCIE_PHY_TRSV1_POWER 0x184
#define PCIE_PHY_TRSV1_PD_TSV BIT(7)
#define PCIE_PHY_TRSV1_LVCC 0x19c
#define PCIE_PHY_TRSV2_EMP_LVL 0x204
#define PCIE_PHY_TRSV2_RXCDR 0x22c
#define PCIE_PHY_TRSV2_POWER 0x244
#define PCIE_PHY_TRSV2_PD_TSV BIT(7)
#define PCIE_PHY_TRSV2_LVCC 0x25c
#define PCIE_PHY_TRSV3_EMP_LVL 0x2c4
#define PCIE_PHY_TRSV3_RXCDR 0x2ec
#define PCIE_PHY_TRSV3_POWER 0x304
#define PCIE_PHY_TRSV3_PD_TSV BIT(7)
#define PCIE_PHY_TRSV3_LVCC 0x31c
struct exynos_pcie_mem_res {
void __iomem *elbi_base; /* DT 0th resource: PCIe CTRL */
void __iomem *phy_base; /* DT 1st resource: PHY CTRL */
void __iomem *block_base; /* DT 2nd resource: PHY ADDITIONAL CTRL */
};
struct exynos_pcie_clk_res {
@ -112,8 +71,6 @@ struct exynos_pcie {
const struct exynos_pcie_ops *ops;
int reset_gpio;
/* For Generic PHY Framework */
bool using_phy;
struct phy *phy;
};
@ -141,20 +98,6 @@ static int exynos5440_pcie_get_mem_resources(struct platform_device *pdev,
if (IS_ERR(ep->mem_res->elbi_base))
return PTR_ERR(ep->mem_res->elbi_base);
/* If using the PHY framework, doesn't need to get other resource */
if (ep->using_phy)
return 0;
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
ep->mem_res->phy_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ep->mem_res->phy_base))
return PTR_ERR(ep->mem_res->phy_base);
res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
ep->mem_res->block_base = devm_ioremap_resource(dev, res);
if (IS_ERR(ep->mem_res->block_base))
return PTR_ERR(ep->mem_res->block_base);
return 0;
}
@ -279,111 +222,6 @@ static void exynos_pcie_deassert_core_reset(struct exynos_pcie *ep)
exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_NONSTICKY_RESET);
exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_APP_INIT_RESET);
exynos_pcie_writel(ep->mem_res->elbi_base, 0, PCIE_APP_INIT_RESET);
exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_MAC_RESET);
}
static void exynos_pcie_assert_phy_reset(struct exynos_pcie *ep)
{
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_MAC_RESET);
exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_GLOBAL_RESET);
}
static void exynos_pcie_deassert_phy_reset(struct exynos_pcie *ep)
{
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_GLOBAL_RESET);
exynos_pcie_writel(ep->mem_res->elbi_base, 1, PCIE_PWR_RESET);
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_COMMON_RESET);
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_CMN_REG);
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_TRSVREG_RESET);
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_TRSV_RESET);
}
static void exynos_pcie_power_on_phy(struct exynos_pcie *ep)
{
u32 val;
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_COMMON_POWER);
val &= ~PCIE_PHY_COMMON_PD_CMN;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_COMMON_POWER);
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV0_POWER);
val &= ~PCIE_PHY_TRSV0_PD_TSV;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV0_POWER);
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV1_POWER);
val &= ~PCIE_PHY_TRSV1_PD_TSV;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV1_POWER);
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV2_POWER);
val &= ~PCIE_PHY_TRSV2_PD_TSV;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV2_POWER);
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV3_POWER);
val &= ~PCIE_PHY_TRSV3_PD_TSV;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV3_POWER);
}
static void exynos_pcie_power_off_phy(struct exynos_pcie *ep)
{
u32 val;
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_COMMON_POWER);
val |= PCIE_PHY_COMMON_PD_CMN;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_COMMON_POWER);
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV0_POWER);
val |= PCIE_PHY_TRSV0_PD_TSV;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV0_POWER);
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV1_POWER);
val |= PCIE_PHY_TRSV1_PD_TSV;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV1_POWER);
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV2_POWER);
val |= PCIE_PHY_TRSV2_PD_TSV;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV2_POWER);
val = exynos_pcie_readl(ep->mem_res->phy_base, PCIE_PHY_TRSV3_POWER);
val |= PCIE_PHY_TRSV3_PD_TSV;
exynos_pcie_writel(ep->mem_res->phy_base, val, PCIE_PHY_TRSV3_POWER);
}
static void exynos_pcie_init_phy(struct exynos_pcie *ep)
{
/* DCC feedback control off */
exynos_pcie_writel(ep->mem_res->phy_base, 0x29, PCIE_PHY_DCC_FEEDBACK);
/* set TX/RX impedance */
exynos_pcie_writel(ep->mem_res->phy_base, 0xd5, PCIE_PHY_IMPEDANCE);
/* set 50Mhz PHY clock */
exynos_pcie_writel(ep->mem_res->phy_base, 0x14, PCIE_PHY_PLL_DIV_0);
exynos_pcie_writel(ep->mem_res->phy_base, 0x12, PCIE_PHY_PLL_DIV_1);
/* set TX Differential output for lane 0 */
exynos_pcie_writel(ep->mem_res->phy_base, 0x7f, PCIE_PHY_TRSV0_DRV_LVL);
/* set TX Pre-emphasis Level Control for lane 0 to minimum */
exynos_pcie_writel(ep->mem_res->phy_base, 0x0, PCIE_PHY_TRSV0_EMP_LVL);
/* set RX clock and data recovery bandwidth */
exynos_pcie_writel(ep->mem_res->phy_base, 0xe7, PCIE_PHY_PLL_BIAS);
exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV0_RXCDR);
exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV1_RXCDR);
exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV2_RXCDR);
exynos_pcie_writel(ep->mem_res->phy_base, 0x82, PCIE_PHY_TRSV3_RXCDR);
/* change TX Pre-emphasis Level Control for lanes */
exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV0_EMP_LVL);
exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV1_EMP_LVL);
exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV2_EMP_LVL);
exynos_pcie_writel(ep->mem_res->phy_base, 0x39, PCIE_PHY_TRSV3_EMP_LVL);
/* set LVCC */
exynos_pcie_writel(ep->mem_res->phy_base, 0x20, PCIE_PHY_TRSV0_LVCC);
exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV1_LVCC);
exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV2_LVCC);
exynos_pcie_writel(ep->mem_res->phy_base, 0xa0, PCIE_PHY_TRSV3_LVCC);
}
static void exynos_pcie_assert_reset(struct exynos_pcie *ep)
@ -401,7 +239,6 @@ static int exynos_pcie_establish_link(struct exynos_pcie *ep)
struct dw_pcie *pci = ep->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
u32 val;
if (dw_pcie_link_up(pci)) {
dev_err(dev, "Link already up\n");
@ -410,32 +247,13 @@ static int exynos_pcie_establish_link(struct exynos_pcie *ep)
exynos_pcie_assert_core_reset(ep);
if (ep->using_phy) {
phy_reset(ep->phy);
phy_reset(ep->phy);
exynos_pcie_writel(ep->mem_res->elbi_base, 1,
PCIE_PWR_RESET);
exynos_pcie_writel(ep->mem_res->elbi_base, 1,
PCIE_PWR_RESET);
phy_power_on(ep->phy);
phy_init(ep->phy);
} else {
exynos_pcie_assert_phy_reset(ep);
exynos_pcie_deassert_phy_reset(ep);
exynos_pcie_power_on_phy(ep);
exynos_pcie_init_phy(ep);
/* pulse for common reset */
exynos_pcie_writel(ep->mem_res->block_base, 1,
PCIE_PHY_COMMON_RESET);
udelay(500);
exynos_pcie_writel(ep->mem_res->block_base, 0,
PCIE_PHY_COMMON_RESET);
}
/* pulse for common reset */
exynos_pcie_writel(ep->mem_res->block_base, 1, PCIE_PHY_COMMON_RESET);
udelay(500);
exynos_pcie_writel(ep->mem_res->block_base, 0, PCIE_PHY_COMMON_RESET);
phy_power_on(ep->phy);
phy_init(ep->phy);
exynos_pcie_deassert_core_reset(ep);
dw_pcie_setup_rc(pp);
@ -449,18 +267,7 @@ static int exynos_pcie_establish_link(struct exynos_pcie *ep)
if (!dw_pcie_wait_for_link(pci))
return 0;
if (ep->using_phy) {
phy_power_off(ep->phy);
return -ETIMEDOUT;
}
while (exynos_pcie_readl(ep->mem_res->phy_base,
PCIE_PHY_PLL_LOCKED) == 0) {
val = exynos_pcie_readl(ep->mem_res->block_base,
PCIE_PHY_PLL_LOCKED);
dev_info(dev, "PLL Locked: 0x%x\n", val);
}
exynos_pcie_power_off_phy(ep);
phy_power_off(ep->phy);
return -ETIMEDOUT;
}
@ -678,16 +485,13 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
ep->reset_gpio = of_get_named_gpio(np, "reset-gpio", 0);
/* Assume that controller doesn't use the PHY framework */
ep->using_phy = false;
ep->phy = devm_of_phy_get(dev, np, NULL);
if (IS_ERR(ep->phy)) {
if (PTR_ERR(ep->phy) == -EPROBE_DEFER)
return PTR_ERR(ep->phy);
dev_warn(dev, "Use the 'phy' property. Current DT of pci-exynos was deprecated!!\n");
} else
ep->using_phy = true;
ep->phy = NULL;
}
if (ep->ops && ep->ops->get_mem_resources) {
ret = ep->ops->get_mem_resources(pdev, ep);
@ -695,7 +499,8 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
return ret;
}
if (ep->ops && ep->ops->get_clk_resources) {
if (ep->ops && ep->ops->get_clk_resources &&
ep->ops->init_clk_resources) {
ret = ep->ops->get_clk_resources(ep);
if (ret)
return ret;
@ -713,8 +518,7 @@ static int __init exynos_pcie_probe(struct platform_device *pdev)
return 0;
fail_probe:
if (ep->using_phy)
phy_exit(ep->phy);
phy_exit(ep->phy);
if (ep->ops && ep->ops->deinit_clk_resources)
ep->ops->deinit_clk_resources(ep);

View File

@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/of_device.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/resource.h>
@ -26,44 +27,72 @@
#define to_artpec6_pcie(x) dev_get_drvdata((x)->dev)
enum artpec_pcie_variants {
ARTPEC6,
ARTPEC7,
};
struct artpec6_pcie {
struct dw_pcie *pci;
struct regmap *regmap; /* DT axis,syscon-pcie */
void __iomem *phy_base; /* DT phy */
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
};
struct artpec_pcie_of_data {
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
};
static const struct of_device_id artpec6_pcie_of_match[];
/* PCIe Port Logic registers (memory-mapped) */
#define PL_OFFSET 0x700
#define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
#define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
#define MISC_CONTROL_1_OFF (PL_OFFSET + 0x1bc)
#define DBI_RO_WR_EN 1
#define ACK_F_ASPM_CTRL_OFF (PL_OFFSET + 0xc)
#define ACK_N_FTS_MASK GENMASK(15, 8)
#define ACK_N_FTS(x) (((x) << 8) & ACK_N_FTS_MASK)
#define FAST_TRAINING_SEQ_MASK GENMASK(7, 0)
#define FAST_TRAINING_SEQ(x) (((x) << 0) & FAST_TRAINING_SEQ_MASK)
/* ARTPEC-6 specific registers */
#define PCIECFG 0x18
#define PCIECFG_DBG_OEN (1 << 24)
#define PCIECFG_CORE_RESET_REQ (1 << 21)
#define PCIECFG_LTSSM_ENABLE (1 << 20)
#define PCIECFG_CLKREQ_B (1 << 11)
#define PCIECFG_REFCLK_ENABLE (1 << 10)
#define PCIECFG_PLL_ENABLE (1 << 9)
#define PCIECFG_PCLK_ENABLE (1 << 8)
#define PCIECFG_RISRCREN (1 << 4)
#define PCIECFG_MODE_TX_DRV_EN (1 << 3)
#define PCIECFG_CISRREN (1 << 2)
#define PCIECFG_MACRO_ENABLE (1 << 0)
#define PCIECFG_DBG_OEN BIT(24)
#define PCIECFG_CORE_RESET_REQ BIT(21)
#define PCIECFG_LTSSM_ENABLE BIT(20)
#define PCIECFG_DEVICE_TYPE_MASK GENMASK(19, 16)
#define PCIECFG_CLKREQ_B BIT(11)
#define PCIECFG_REFCLK_ENABLE BIT(10)
#define PCIECFG_PLL_ENABLE BIT(9)
#define PCIECFG_PCLK_ENABLE BIT(8)
#define PCIECFG_RISRCREN BIT(4)
#define PCIECFG_MODE_TX_DRV_EN BIT(3)
#define PCIECFG_CISRREN BIT(2)
#define PCIECFG_MACRO_ENABLE BIT(0)
/* ARTPEC-7 specific fields */
#define PCIECFG_REFCLKSEL BIT(23)
#define PCIECFG_NOC_RESET BIT(3)
#define PCIESTAT 0x1c
/* ARTPEC-7 specific fields */
#define PCIESTAT_EXTREFCLK BIT(3)
#define NOCCFG 0x40
#define NOCCFG_ENABLE_CLK_PCIE (1 << 4)
#define NOCCFG_POWER_PCIE_IDLEACK (1 << 3)
#define NOCCFG_POWER_PCIE_IDLE (1 << 2)
#define NOCCFG_POWER_PCIE_IDLEREQ (1 << 1)
#define NOCCFG_ENABLE_CLK_PCIE BIT(4)
#define NOCCFG_POWER_PCIE_IDLEACK BIT(3)
#define NOCCFG_POWER_PCIE_IDLE BIT(2)
#define NOCCFG_POWER_PCIE_IDLEREQ BIT(1)
#define PHY_STATUS 0x118
#define PHY_COSPLLLOCK (1 << 0)
#define PHY_COSPLLLOCK BIT(0)
#define ARTPEC6_CPU_TO_BUS_ADDR 0x0fffffff
#define PHY_TX_ASIC_OUT 0x4040
#define PHY_TX_ASIC_OUT_TX_ACK BIT(0)
#define PHY_RX_ASIC_OUT 0x405c
#define PHY_RX_ASIC_OUT_ACK BIT(0)
static u32 artpec6_pcie_readl(struct artpec6_pcie *artpec6_pcie, u32 offset)
{
@ -78,22 +107,123 @@ static void artpec6_pcie_writel(struct artpec6_pcie *artpec6_pcie, u32 offset, u
regmap_write(artpec6_pcie->regmap, offset, val);
}
static u64 artpec6_pcie_cpu_addr_fixup(u64 pci_addr)
static u64 artpec6_pcie_cpu_addr_fixup(struct dw_pcie *pci, u64 pci_addr)
{
return pci_addr & ARTPEC6_CPU_TO_BUS_ADDR;
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
struct pcie_port *pp = &pci->pp;
struct dw_pcie_ep *ep = &pci->ep;
switch (artpec6_pcie->mode) {
case DW_PCIE_RC_TYPE:
return pci_addr - pp->cfg0_base;
case DW_PCIE_EP_TYPE:
return pci_addr - ep->phys_base;
default:
dev_err(pci->dev, "UNKNOWN device type\n");
}
return pci_addr;
}
static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
static int artpec6_pcie_establish_link(struct dw_pcie *pci)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
u32 val;
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_LTSSM_ENABLE;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
return 0;
}
static void artpec6_pcie_stop_link(struct dw_pcie *pci)
{
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
u32 val;
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_LTSSM_ENABLE;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
}
static const struct dw_pcie_ops dw_pcie_ops = {
.cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
.start_link = artpec6_pcie_establish_link,
.stop_link = artpec6_pcie_stop_link,
};
static void artpec6_pcie_wait_for_phy_a6(struct artpec6_pcie *artpec6_pcie)
{
struct dw_pcie *pci = artpec6_pcie->pci;
struct pcie_port *pp = &pci->pp;
struct device *dev = pci->dev;
u32 val;
unsigned int retries;
/* Hold DW core in reset */
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_CORE_RESET_REQ;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
retries = 50;
do {
usleep_range(1000, 2000);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
retries--;
} while (retries &&
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
if (!retries)
dev_err(dev, "PCIe clock manager did not leave idle state\n");
retries = 50;
do {
usleep_range(1000, 2000);
val = readl(artpec6_pcie->phy_base + PHY_STATUS);
retries--;
} while (retries && !(val & PHY_COSPLLLOCK));
if (!retries)
dev_err(dev, "PHY PLL did not lock\n");
}
static void artpec6_pcie_wait_for_phy_a7(struct artpec6_pcie *artpec6_pcie)
{
struct dw_pcie *pci = artpec6_pcie->pci;
struct device *dev = pci->dev;
u32 val;
u16 phy_status_tx, phy_status_rx;
unsigned int retries;
retries = 50;
do {
usleep_range(1000, 2000);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
retries--;
} while (retries &&
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
if (!retries)
dev_err(dev, "PCIe clock manager did not leave idle state\n");
retries = 50;
do {
usleep_range(1000, 2000);
phy_status_tx = readw(artpec6_pcie->phy_base + PHY_TX_ASIC_OUT);
phy_status_rx = readw(artpec6_pcie->phy_base + PHY_RX_ASIC_OUT);
retries--;
} while (retries && ((phy_status_tx & PHY_TX_ASIC_OUT_TX_ACK) ||
(phy_status_rx & PHY_RX_ASIC_OUT_ACK)));
if (!retries)
dev_err(dev, "PHY did not enter Pn state\n");
}
static void artpec6_pcie_wait_for_phy(struct artpec6_pcie *artpec6_pcie)
{
switch (artpec6_pcie->variant) {
case ARTPEC6:
artpec6_pcie_wait_for_phy_a6(artpec6_pcie);
break;
case ARTPEC7:
artpec6_pcie_wait_for_phy_a7(artpec6_pcie);
break;
}
}
static void artpec6_pcie_init_phy_a6(struct artpec6_pcie *artpec6_pcie)
{
u32 val;
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
@ -119,45 +249,110 @@ static int artpec6_pcie_establish_link(struct artpec6_pcie *artpec6_pcie)
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
}
retries = 50;
do {
usleep_range(1000, 2000);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
retries--;
} while (retries &&
(val & (NOCCFG_POWER_PCIE_IDLEACK | NOCCFG_POWER_PCIE_IDLE)));
static void artpec6_pcie_init_phy_a7(struct artpec6_pcie *artpec6_pcie)
{
struct dw_pcie *pci = artpec6_pcie->pci;
u32 val;
bool extrefclk;
retries = 50;
do {
usleep_range(1000, 2000);
val = readl(artpec6_pcie->phy_base + PHY_STATUS);
retries--;
} while (retries && !(val & PHY_COSPLLLOCK));
/* Check if external reference clock is connected */
val = artpec6_pcie_readl(artpec6_pcie, PCIESTAT);
extrefclk = !!(val & PCIESTAT_EXTREFCLK);
dev_dbg(pci->dev, "Using reference clock: %s\n",
extrefclk ? "external" : "internal");
/* Take DW core out of reset */
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_CORE_RESET_REQ;
val |= PCIECFG_RISRCREN | /* Receiver term. 50 Ohm */
PCIECFG_PCLK_ENABLE;
if (extrefclk)
val |= PCIECFG_REFCLKSEL;
else
val &= ~PCIECFG_REFCLKSEL;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(10, 20);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val |= NOCCFG_ENABLE_CLK_PCIE;
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
usleep_range(20, 30);
val = artpec6_pcie_readl(artpec6_pcie, NOCCFG);
val &= ~NOCCFG_POWER_PCIE_IDLEREQ;
artpec6_pcie_writel(artpec6_pcie, NOCCFG, val);
}
static void artpec6_pcie_init_phy(struct artpec6_pcie *artpec6_pcie)
{
switch (artpec6_pcie->variant) {
case ARTPEC6:
artpec6_pcie_init_phy_a6(artpec6_pcie);
break;
case ARTPEC7:
artpec6_pcie_init_phy_a7(artpec6_pcie);
break;
}
}
static void artpec6_pcie_set_nfts(struct artpec6_pcie *artpec6_pcie)
{
struct dw_pcie *pci = artpec6_pcie->pci;
u32 val;
if (artpec6_pcie->variant != ARTPEC7)
return;
/*
* Increase the N_FTS (Number of Fast Training Sequences)
* to be transmitted when transitioning from L0s to L0.
*/
val = dw_pcie_readl_dbi(pci, ACK_F_ASPM_CTRL_OFF);
val &= ~ACK_N_FTS_MASK;
val |= ACK_N_FTS(180);
dw_pcie_writel_dbi(pci, ACK_F_ASPM_CTRL_OFF, val);
/*
* Set the Number of Fast Training Sequences that the core
* advertises as its N_FTS during Gen2 or Gen3 link training.
*/
val = dw_pcie_readl_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL);
val &= ~FAST_TRAINING_SEQ_MASK;
val |= FAST_TRAINING_SEQ(180);
dw_pcie_writel_dbi(pci, PCIE_LINK_WIDTH_SPEED_CONTROL, val);
}
static void artpec6_pcie_assert_core_reset(struct artpec6_pcie *artpec6_pcie)
{
u32 val;
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
switch (artpec6_pcie->variant) {
case ARTPEC6:
val |= PCIECFG_CORE_RESET_REQ;
break;
case ARTPEC7:
val &= ~PCIECFG_NOC_RESET;
break;
}
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
}
static void artpec6_pcie_deassert_core_reset(struct artpec6_pcie *artpec6_pcie)
{
u32 val;
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
switch (artpec6_pcie->variant) {
case ARTPEC6:
val &= ~PCIECFG_CORE_RESET_REQ;
break;
case ARTPEC7:
val |= PCIECFG_NOC_RESET;
break;
}
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
usleep_range(100, 200);
/* setup root complex */
dw_pcie_setup_rc(pp);
/* assert LTSSM enable */
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val |= PCIECFG_LTSSM_ENABLE;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
/* check if the link is up or not */
if (!dw_pcie_wait_for_link(pci))
return 0;
dev_dbg(pci->dev, "DEBUG_R0: 0x%08x, DEBUG_R1: 0x%08x\n",
dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R0),
dw_pcie_readl_dbi(pci, PCIE_PHY_DEBUG_R1));
return -ETIMEDOUT;
}
static void artpec6_pcie_enable_interrupts(struct artpec6_pcie *artpec6_pcie)
@ -174,7 +369,14 @@ static int artpec6_pcie_host_init(struct pcie_port *pp)
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
artpec6_pcie_establish_link(artpec6_pcie);
artpec6_pcie_assert_core_reset(artpec6_pcie);
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
artpec6_pcie_set_nfts(artpec6_pcie);
dw_pcie_setup_rc(pp);
artpec6_pcie_establish_link(pci);
dw_pcie_wait_for_link(pci);
artpec6_pcie_enable_interrupts(artpec6_pcie);
return 0;
@ -230,10 +432,78 @@ static int artpec6_add_pcie_port(struct artpec6_pcie *artpec6_pcie,
return 0;
}
static const struct dw_pcie_ops dw_pcie_ops = {
.cpu_addr_fixup = artpec6_pcie_cpu_addr_fixup,
static void artpec6_pcie_ep_init(struct dw_pcie_ep *ep)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct artpec6_pcie *artpec6_pcie = to_artpec6_pcie(pci);
enum pci_barno bar;
artpec6_pcie_assert_core_reset(artpec6_pcie);
artpec6_pcie_init_phy(artpec6_pcie);
artpec6_pcie_deassert_core_reset(artpec6_pcie);
artpec6_pcie_wait_for_phy(artpec6_pcie);
artpec6_pcie_set_nfts(artpec6_pcie);
for (bar = BAR_0; bar <= BAR_5; bar++)
dw_pcie_ep_reset_bar(pci, bar);
}
static int artpec6_pcie_raise_irq(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
switch (type) {
case PCI_EPC_IRQ_LEGACY:
dev_err(pci->dev, "EP cannot trigger legacy IRQs\n");
return -EINVAL;
case PCI_EPC_IRQ_MSI:
return dw_pcie_ep_raise_msi_irq(ep, func_no, interrupt_num);
default:
dev_err(pci->dev, "UNKNOWN IRQ type\n");
}
return 0;
}
static struct dw_pcie_ep_ops pcie_ep_ops = {
.ep_init = artpec6_pcie_ep_init,
.raise_irq = artpec6_pcie_raise_irq,
};
static int artpec6_add_pcie_ep(struct artpec6_pcie *artpec6_pcie,
struct platform_device *pdev)
{
int ret;
struct dw_pcie_ep *ep;
struct resource *res;
struct device *dev = &pdev->dev;
struct dw_pcie *pci = artpec6_pcie->pci;
ep = &pci->ep;
ep->ops = &pcie_ep_ops;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi2");
pci->dbi_base2 = devm_ioremap(dev, res->start, resource_size(res));
if (!pci->dbi_base2)
return -ENOMEM;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "addr_space");
if (!res)
return -EINVAL;
ep->phys_base = res->start;
ep->addr_size = resource_size(res);
ret = dw_pcie_ep_init(ep);
if (ret) {
dev_err(dev, "failed to initialize endpoint\n");
return ret;
}
return 0;
}
static int artpec6_pcie_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@ -242,6 +512,18 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
struct resource *dbi_base;
struct resource *phy_base;
int ret;
const struct of_device_id *match;
const struct artpec_pcie_of_data *data;
enum artpec_pcie_variants variant;
enum dw_pcie_device_mode mode;
match = of_match_device(artpec6_pcie_of_match, dev);
if (!match)
return -EINVAL;
data = (struct artpec_pcie_of_data *)match->data;
variant = (enum artpec_pcie_variants)data->variant;
mode = (enum dw_pcie_device_mode)data->mode;
artpec6_pcie = devm_kzalloc(dev, sizeof(*artpec6_pcie), GFP_KERNEL);
if (!artpec6_pcie)
@ -255,6 +537,8 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
pci->ops = &dw_pcie_ops;
artpec6_pcie->pci = pci;
artpec6_pcie->variant = variant;
artpec6_pcie->mode = mode;
dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
pci->dbi_base = devm_ioremap_resource(dev, dbi_base);
@ -274,15 +558,73 @@ static int artpec6_pcie_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, artpec6_pcie);
ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
if (ret < 0)
return ret;
switch (artpec6_pcie->mode) {
case DW_PCIE_RC_TYPE:
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_HOST))
return -ENODEV;
ret = artpec6_add_pcie_port(artpec6_pcie, pdev);
if (ret < 0)
return ret;
break;
case DW_PCIE_EP_TYPE: {
u32 val;
if (!IS_ENABLED(CONFIG_PCIE_ARTPEC6_EP))
return -ENODEV;
val = artpec6_pcie_readl(artpec6_pcie, PCIECFG);
val &= ~PCIECFG_DEVICE_TYPE_MASK;
artpec6_pcie_writel(artpec6_pcie, PCIECFG, val);
ret = artpec6_add_pcie_ep(artpec6_pcie, pdev);
if (ret < 0)
return ret;
break;
}
default:
dev_err(dev, "INVALID device type %d\n", artpec6_pcie->mode);
}
return 0;
}
static const struct artpec_pcie_of_data artpec6_pcie_rc_of_data = {
.variant = ARTPEC6,
.mode = DW_PCIE_RC_TYPE,
};
static const struct artpec_pcie_of_data artpec6_pcie_ep_of_data = {
.variant = ARTPEC6,
.mode = DW_PCIE_EP_TYPE,
};
static const struct artpec_pcie_of_data artpec7_pcie_rc_of_data = {
.variant = ARTPEC7,
.mode = DW_PCIE_RC_TYPE,
};
static const struct artpec_pcie_of_data artpec7_pcie_ep_of_data = {
.variant = ARTPEC7,
.mode = DW_PCIE_EP_TYPE,
};
static const struct of_device_id artpec6_pcie_of_match[] = {
{ .compatible = "axis,artpec6-pcie", },
{
.compatible = "axis,artpec6-pcie",
.data = &artpec6_pcie_rc_of_data,
},
{
.compatible = "axis,artpec6-pcie-ep",
.data = &artpec6_pcie_ep_of_data,
},
{
.compatible = "axis,artpec7-pcie",
.data = &artpec7_pcie_rc_of_data,
},
{
.compatible = "axis,artpec7-pcie-ep",
.data = &artpec7_pcie_ep_of_data,
},
{},
};

View File

@ -30,13 +30,15 @@ void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
pci_epc_linkup(epc);
}
static void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
u32 reg;
reg = PCI_BASE_ADDRESS_0 + (4 * bar);
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, 0x0);
dw_pcie_writel_dbi(pci, reg, 0x0);
dw_pcie_dbi_ro_wr_dis(pci);
}
static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
@ -45,6 +47,7 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, PCI_VENDOR_ID, hdr->vendorid);
dw_pcie_writew_dbi(pci, PCI_DEVICE_ID, hdr->deviceid);
dw_pcie_writeb_dbi(pci, PCI_REVISION_ID, hdr->revid);
@ -58,6 +61,7 @@ static int dw_pcie_ep_write_header(struct pci_epc *epc, u8 func_no,
dw_pcie_writew_dbi(pci, PCI_SUBSYSTEM_ID, hdr->subsys_id);
dw_pcie_writeb_dbi(pci, PCI_INTERRUPT_PIN,
hdr->interrupt_pin);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@ -144,8 +148,10 @@ static int dw_pcie_ep_set_bar(struct pci_epc *epc, u8 func_no,
if (ret)
return ret;
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writel_dbi2(pci, reg, size - 1);
dw_pcie_writel_dbi(pci, reg, flags);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@ -224,8 +230,12 @@ static int dw_pcie_ep_set_msi(struct pci_epc *epc, u8 func_no, u8 encode_int)
struct dw_pcie_ep *ep = epc_get_drvdata(epc);
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
val = (encode_int << MSI_CAP_MMC_SHIFT);
val = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
val &= ~MSI_CAP_MMC_MASK;
val |= (encode_int << MSI_CAP_MMC_SHIFT) & MSI_CAP_MMC_MASK;
dw_pcie_dbi_ro_wr_en(pci);
dw_pcie_writew_dbi(pci, MSI_MESSAGE_CONTROL, val);
dw_pcie_dbi_ro_wr_dis(pci);
return 0;
}
@ -238,7 +248,7 @@ static int dw_pcie_ep_raise_irq(struct pci_epc *epc, u8 func_no,
if (!ep->ops->raise_irq)
return -EINVAL;
return ep->ops->raise_irq(ep, type, interrupt_num);
return ep->ops->raise_irq(ep, func_no, type, interrupt_num);
}
static void dw_pcie_ep_stop(struct pci_epc *epc)
@ -276,10 +286,48 @@ static const struct pci_epc_ops epc_ops = {
.stop = dw_pcie_ep_stop,
};
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
struct dw_pcie *pci = to_dw_pcie_from_ep(ep);
struct pci_epc *epc = ep->epc;
u16 msg_ctrl, msg_data;
u32 msg_addr_lower, msg_addr_upper;
u64 msg_addr;
bool has_upper;
int ret;
/* Raise MSI per the PCI Local Bus Specification Revision 3.0, 6.8.1. */
msg_ctrl = dw_pcie_readw_dbi(pci, MSI_MESSAGE_CONTROL);
has_upper = !!(msg_ctrl & PCI_MSI_FLAGS_64BIT);
msg_addr_lower = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_L32);
if (has_upper) {
msg_addr_upper = dw_pcie_readl_dbi(pci, MSI_MESSAGE_ADDR_U32);
msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_64);
} else {
msg_addr_upper = 0;
msg_data = dw_pcie_readw_dbi(pci, MSI_MESSAGE_DATA_32);
}
msg_addr = ((u64) msg_addr_upper) << 32 | msg_addr_lower;
ret = dw_pcie_ep_map_addr(epc, func_no, ep->msi_mem_phys, msg_addr,
epc->mem->page_size);
if (ret)
return ret;
writel(msg_data | (interrupt_num - 1), ep->msi_mem);
dw_pcie_ep_unmap_addr(epc, func_no, ep->msi_mem_phys);
return 0;
}
void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
struct pci_epc *epc = ep->epc;
pci_epc_mem_free_addr(epc, ep->msi_mem_phys, ep->msi_mem,
epc->mem->page_size);
pci_epc_mem_exit(epc);
}
@ -335,6 +383,13 @@ int dw_pcie_ep_init(struct dw_pcie_ep *ep)
return ret;
}
ep->msi_mem = pci_epc_mem_alloc_addr(epc, &ep->msi_mem_phys,
epc->mem->page_size);
if (!ep->msi_mem) {
dev_err(dev, "Failed to reserve memory for MSI\n");
return -ENOMEM;
}
ep->epc = epc;
epc_set_drvdata(epc, ep);
dw_pcie_setup(pci);

View File

@ -83,10 +83,19 @@ irqreturn_t dw_handle_msi_irq(struct pcie_port *pp)
void dw_pcie_msi_init(struct pcie_port *pp)
{
struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
struct device *dev = pci->dev;
struct page *page;
u64 msi_target;
pp->msi_data = __get_free_pages(GFP_KERNEL, 0);
msi_target = virt_to_phys((void *)pp->msi_data);
page = alloc_page(GFP_KERNEL);
pp->msi_data = dma_map_page(dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(dev, pp->msi_data)) {
dev_err(dev, "failed to map MSI data\n");
__free_page(page);
return;
}
msi_target = (u64)pp->msi_data;
/* program the msi_data */
dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4,
@ -187,7 +196,7 @@ static void dw_msi_setup_msg(struct pcie_port *pp, unsigned int irq, u32 pos)
if (pp->ops->get_msi_addr)
msi_target = pp->ops->get_msi_addr(pp);
else
msi_target = virt_to_phys((void *)pp->msi_data);
msi_target = (u64)pp->msi_data;
msg.address_lo = (u32)(msi_target & 0xffffffff);
msg.address_hi = (u32)(msi_target >> 32 & 0xffffffff);

View File

@ -149,7 +149,7 @@ void dw_pcie_prog_outbound_atu(struct dw_pcie *pci, int index, int type,
u32 retries, val;
if (pci->ops->cpu_addr_fixup)
cpu_addr = pci->ops->cpu_addr_fixup(cpu_addr);
cpu_addr = pci->ops->cpu_addr_fixup(pci, cpu_addr);
if (pci->iatu_unroll_enabled) {
dw_pcie_prog_outbound_atu_unroll(pci, index, type, cpu_addr,

View File

@ -14,6 +14,7 @@
#ifndef _PCIE_DESIGNWARE_H
#define _PCIE_DESIGNWARE_H
#include <linux/dma-mapping.h>
#include <linux/irq.h>
#include <linux/msi.h>
#include <linux/pci.h>
@ -100,10 +101,13 @@
#define MSI_MESSAGE_CONTROL 0x52
#define MSI_CAP_MMC_SHIFT 1
#define MSI_CAP_MMC_MASK (7 << MSI_CAP_MMC_SHIFT)
#define MSI_CAP_MME_SHIFT 4
#define MSI_CAP_MME_MASK (7 << MSI_CAP_MME_SHIFT)
#define MSI_MESSAGE_ADDR_L32 0x54
#define MSI_MESSAGE_ADDR_U32 0x58
#define MSI_MESSAGE_DATA_32 0x58
#define MSI_MESSAGE_DATA_64 0x5C
/*
* Maximum number of MSI IRQs can be 256 per controller. But keep
@ -168,7 +172,7 @@ struct pcie_port {
const struct dw_pcie_host_ops *ops;
int msi_irq;
struct irq_domain *irq_domain;
unsigned long msi_data;
dma_addr_t msi_data;
DECLARE_BITMAP(msi_irq_in_use, MAX_MSI_IRQS);
};
@ -180,8 +184,8 @@ enum dw_pcie_as_type {
struct dw_pcie_ep_ops {
void (*ep_init)(struct dw_pcie_ep *ep);
int (*raise_irq)(struct dw_pcie_ep *ep, enum pci_epc_irq_type type,
u8 interrupt_num);
int (*raise_irq)(struct dw_pcie_ep *ep, u8 func_no,
enum pci_epc_irq_type type, u8 interrupt_num);
};
struct dw_pcie_ep {
@ -196,10 +200,12 @@ struct dw_pcie_ep {
unsigned long ob_window_map;
u32 num_ib_windows;
u32 num_ob_windows;
void __iomem *msi_mem;
phys_addr_t msi_mem_phys;
};
struct dw_pcie_ops {
u64 (*cpu_addr_fixup)(u64 cpu_addr);
u64 (*cpu_addr_fixup)(struct dw_pcie *pcie, u64 cpu_addr);
u32 (*read_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
size_t size);
void (*write_dbi)(struct dw_pcie *pcie, void __iomem *base, u32 reg,
@ -334,6 +340,9 @@ static inline int dw_pcie_host_init(struct pcie_port *pp)
void dw_pcie_ep_linkup(struct dw_pcie_ep *ep);
int dw_pcie_ep_init(struct dw_pcie_ep *ep);
void dw_pcie_ep_exit(struct dw_pcie_ep *ep);
int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num);
void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar);
#else
static inline void dw_pcie_ep_linkup(struct dw_pcie_ep *ep)
{
@ -347,5 +356,15 @@ static inline int dw_pcie_ep_init(struct dw_pcie_ep *ep)
static inline void dw_pcie_ep_exit(struct dw_pcie_ep *ep)
{
}
static inline int dw_pcie_ep_raise_msi_irq(struct dw_pcie_ep *ep, u8 func_no,
u8 interrupt_num)
{
return 0;
}
static inline void dw_pcie_ep_reset_bar(struct dw_pcie *pci, enum pci_barno bar)
{
}
#endif
#endif /* _PCIE_DESIGNWARE_H */

View File

@ -171,7 +171,7 @@ struct qcom_pcie {
union qcom_pcie_resources res;
struct phy *phy;
struct gpio_desc *reset;
struct qcom_pcie_ops *ops;
const struct qcom_pcie_ops *ops;
};
#define to_qcom_pcie(x) dev_get_drvdata((x)->dev)
@ -1234,7 +1234,7 @@ static int qcom_pcie_probe(struct platform_device *pdev)
pcie->pci = pci;
pcie->ops = (struct qcom_pcie_ops *)of_device_get_match_data(dev);
pcie->ops = of_device_get_match_data(dev);
pcie->reset = devm_gpiod_get_optional(dev, "perst", GPIOD_OUT_LOW);
if (IS_ERR(pcie->reset))

View File

@ -92,6 +92,13 @@ static int iproc_pcie_pltfm_probe(struct platform_device *pdev)
pcie->need_ob_cfg = true;
}
/*
* DT nodes are not used by all platforms that use the iProc PCIe
* core driver. For platforms that require explict inbound mapping
* configuration, "dma-ranges" would have been present in DT
*/
pcie->need_ib_cfg = of_property_read_bool(np, "dma-ranges");
/* PHY use is optional */
pcie->phy = devm_phy_get(dev, "pcie-phy");
if (IS_ERR(pcie->phy)) {

View File

@ -1378,9 +1378,11 @@ int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res)
}
}
ret = iproc_pcie_map_dma_ranges(pcie);
if (ret && ret != -ENOENT)
goto err_power_off_phy;
if (pcie->need_ib_cfg) {
ret = iproc_pcie_map_dma_ranges(pcie);
if (ret && ret != -ENOENT)
goto err_power_off_phy;
}
#ifdef CONFIG_ARM
pcie->sysdata.private_data = pcie;

View File

@ -74,6 +74,7 @@ struct iproc_msi;
* @ob: outbound mapping related parameters
* @ob_map: outbound mapping related parameters specific to the controller
*
* @need_ib_cfg: indicates SW needs to configure the inbound mapping window
* @ib: inbound mapping related parameters
* @ib_map: outbound mapping region related parameters
*
@ -101,6 +102,7 @@ struct iproc_pcie {
struct iproc_pcie_ob ob;
const struct iproc_pcie_ob_map *ob_map;
bool need_ib_cfg;
struct iproc_pcie_ib ib;
const struct iproc_pcie_ib_map *ib_map;