summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorHou Zhiqiang <Zhiqiang.Hou@nxp.com>2019-04-08 10:15:46 +0000
committerPrabhakar Kushwaha <prabhakar.kushwaha@nxp.com>2019-05-22 12:24:24 +0530
commit07ce19f5e9ad637caa8cb2b6db45a6a28d2d69a1 (patch)
treed6cf4c547cc4b3fd1022f13ea4469c444c5b00a7 /drivers
parent059d942283eb79bad369dd66512a5135dff05f4d (diff)
pci: Add PCIe Gen4 controller driver for NXP Layerscape SoCs
Add PCIe Gen4 driver for the NXP Layerscape SoCs. This PCIe controller is based on the Mobiveil IP, which is compatible with the PCI Express™ Base Specification, Revision 4.0. Signed-off-by: Hou Zhiqiang <Zhiqiang.Hou@nxp.com> Signed-off-by: Bao Xiaowei <Xiaowei.Bao@nxp.com> Reviewed-by: Bin Meng <bmeng.cn@gmail.com> Reviewed-by: Prabhakar Kushwaha <prabhakar.kushwaha@nxp.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/pci/Kconfig8
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/pcie_layerscape_gen4.c577
-rw-r--r--drivers/pci/pcie_layerscape_gen4.h264
4 files changed, 850 insertions, 0 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 1521885bdeb..763bd500d40 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -105,6 +105,14 @@ config PCIE_LAYERSCAPE
PCIe controllers. The PCIe may works in RC or EP mode according to
RCW[HOST_AGT_PEX] setting.
+config PCIE_LAYERSCAPE_GEN4
+ bool "Layerscape Gen4 PCIe support"
+ depends on DM_PCI
+ help
+ Support PCIe Gen4 on NXP Layerscape SoCs, which may have one or
+ several PCIe controllers. The PCIe controller can work in RC or
+ EP mode according to RCW[HOST_AGT_PEX] setting.
+
config PCIE_INTEL_FPGA
bool "Intel FPGA PCIe support"
depends on DM_PCI
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 49236418959..7f585aad550 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -32,5 +32,6 @@ obj-$(CONFIG_PCI_AARDVARK) += pci-aardvark.o
obj-$(CONFIG_PCIE_DW_MVEBU) += pcie_dw_mvebu.o
obj-$(CONFIG_PCIE_LAYERSCAPE) += pcie_layerscape.o
obj-$(CONFIG_PCIE_LAYERSCAPE) += pcie_layerscape_fixup.o
+obj-$(CONFIG_PCIE_LAYERSCAPE_GEN4) += pcie_layerscape_gen4.o
obj-$(CONFIG_PCI_XILINX) += pcie_xilinx.o
obj-$(CONFIG_PCIE_INTEL_FPGA) += pcie_intel_fpga.o
diff --git a/drivers/pci/pcie_layerscape_gen4.c b/drivers/pci/pcie_layerscape_gen4.c
new file mode 100644
index 00000000000..da77caccfd9
--- /dev/null
+++ b/drivers/pci/pcie_layerscape_gen4.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0+ OR X11
+/*
+ * Copyright 2018-2019 NXP
+ *
+ * PCIe Gen4 driver for NXP Layerscape SoCs
+ * Author: Hou Zhiqiang <Minder.Hou@gmail.com>
+ */
+
+#include <common.h>
+#include <asm/arch/fsl_serdes.h>
+#include <pci.h>
+#include <asm/io.h>
+#include <errno.h>
+#include <malloc.h>
+#include <dm.h>
+#include <linux/sizes.h>
+
+#include "pcie_layerscape_gen4.h"
+
+DECLARE_GLOBAL_DATA_PTR;
+
+LIST_HEAD(ls_pcie_g4_list);
+
+static u64 bar_size[4] = {
+ PCIE_BAR0_SIZE,
+ PCIE_BAR1_SIZE,
+ PCIE_BAR2_SIZE,
+ PCIE_BAR4_SIZE
+};
+
+static int ls_pcie_g4_ltssm(struct ls_pcie_g4 *pcie)
+{
+ u32 state;
+
+ state = pf_ctrl_readl(pcie, PCIE_LTSSM_STA) & LTSSM_STATE_MASK;
+
+ return state;
+}
+
+static int ls_pcie_g4_link_up(struct ls_pcie_g4 *pcie)
+{
+ int ltssm;
+
+ ltssm = ls_pcie_g4_ltssm(pcie);
+ if (ltssm != LTSSM_PCIE_L0)
+ return 0;
+
+ return 1;
+}
+
+static void ls_pcie_g4_ep_enable_cfg(struct ls_pcie_g4 *pcie)
+{
+ ccsr_writel(pcie, GPEX_CFG_READY, PCIE_CONFIG_READY);
+}
+
+static void ls_pcie_g4_cfg_set_target(struct ls_pcie_g4 *pcie, u32 target)
+{
+ ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(0), target);
+ ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(0), 0);
+}
+
+static int ls_pcie_g4_outbound_win_set(struct ls_pcie_g4 *pcie, int idx,
+ int type, u64 phys, u64 bus_addr,
+ pci_size_t size)
+{
+ u32 val;
+ u32 size_h, size_l;
+
+ if (idx >= PAB_WINS_NUM)
+ return -EINVAL;
+
+ size_h = upper_32_bits(~(size - 1));
+ size_l = lower_32_bits(~(size - 1));
+
+ val = ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(idx));
+ val &= ~((AXI_AMAP_CTRL_TYPE_MASK << AXI_AMAP_CTRL_TYPE_SHIFT) |
+ (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT) |
+ AXI_AMAP_CTRL_EN);
+ val |= ((type & AXI_AMAP_CTRL_TYPE_MASK) << AXI_AMAP_CTRL_TYPE_SHIFT) |
+ ((size_l >> AXI_AMAP_CTRL_SIZE_SHIFT) <<
+ AXI_AMAP_CTRL_SIZE_SHIFT) | AXI_AMAP_CTRL_EN;
+
+ ccsr_writel(pcie, PAB_AXI_AMAP_CTRL(idx), val);
+
+ ccsr_writel(pcie, PAB_AXI_AMAP_AXI_WIN(idx), lower_32_bits(phys));
+ ccsr_writel(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(idx), upper_32_bits(phys));
+ ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr));
+ ccsr_writel(pcie, PAB_AXI_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr));
+ ccsr_writel(pcie, PAB_EXT_AXI_AMAP_SIZE(idx), size_h);
+
+ return 0;
+}
+
+static int ls_pcie_g4_rc_inbound_win_set(struct ls_pcie_g4 *pcie, int idx,
+ int type, u64 phys, u64 bus_addr,
+ pci_size_t size)
+{
+ u32 val;
+ pci_size_t win_size = ~(size - 1);
+
+ val = ccsr_readl(pcie, PAB_PEX_AMAP_CTRL(idx));
+
+ val &= ~(PEX_AMAP_CTRL_TYPE_MASK << PEX_AMAP_CTRL_TYPE_SHIFT);
+ val &= ~(PEX_AMAP_CTRL_EN_MASK << PEX_AMAP_CTRL_EN_SHIFT);
+ val = (val | (type << PEX_AMAP_CTRL_TYPE_SHIFT));
+ val = (val | (1 << PEX_AMAP_CTRL_EN_SHIFT));
+
+ ccsr_writel(pcie, PAB_PEX_AMAP_CTRL(idx),
+ val | lower_32_bits(win_size));
+
+ ccsr_writel(pcie, PAB_EXT_PEX_AMAP_SIZE(idx), upper_32_bits(win_size));
+ ccsr_writel(pcie, PAB_PEX_AMAP_AXI_WIN(idx), lower_32_bits(phys));
+ ccsr_writel(pcie, PAB_EXT_PEX_AMAP_AXI_WIN(idx), upper_32_bits(phys));
+ ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_L(idx), lower_32_bits(bus_addr));
+ ccsr_writel(pcie, PAB_PEX_AMAP_PEX_WIN_H(idx), upper_32_bits(bus_addr));
+
+ return 0;
+}
+
+static void ls_pcie_g4_dump_wins(struct ls_pcie_g4 *pcie, int wins)
+{
+ int i;
+
+ for (i = 0; i < wins; i++) {
+ debug("APIO Win%d:\n", i);
+ debug("\tLOWER PHYS: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_AXI_WIN(i)));
+ debug("\tUPPER PHYS: 0x%08x\n",
+ ccsr_readl(pcie, PAB_EXT_AXI_AMAP_AXI_WIN(i)));
+ debug("\tLOWER BUS: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_L(i)));
+ debug("\tUPPER BUS: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_PEX_WIN_H(i)));
+ debug("\tSIZE: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i)) &
+ (AXI_AMAP_CTRL_SIZE_MASK << AXI_AMAP_CTRL_SIZE_SHIFT));
+ debug("\tEXT_SIZE: 0x%08x\n",
+ ccsr_readl(pcie, PAB_EXT_AXI_AMAP_SIZE(i)));
+ debug("\tPARAM: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(i)));
+ debug("\tCTRL: 0x%08x\n",
+ ccsr_readl(pcie, PAB_AXI_AMAP_CTRL(i)));
+ }
+}
+
+static void ls_pcie_g4_setup_wins(struct ls_pcie_g4 *pcie)
+{
+ struct pci_region *io, *mem, *pref;
+ int idx = 1;
+
+ /* INBOUND WIN */
+ ls_pcie_g4_rc_inbound_win_set(pcie, 0, IB_TYPE_MEM_F, 0, 0, SIZE_1T);
+
+ /* OUTBOUND WIN 0: CFG */
+ ls_pcie_g4_outbound_win_set(pcie, 0, PAB_AXI_TYPE_CFG,
+ pcie->cfg_res.start, 0,
+ fdt_resource_size(&pcie->cfg_res));
+
+ pci_get_regions(pcie->bus, &io, &mem, &pref);
+
+ if (io)
+ /* OUTBOUND WIN: IO */
+ ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_IO,
+ io->phys_start, io->bus_start,
+ io->size);
+
+ if (mem)
+ /* OUTBOUND WIN: MEM */
+ ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM,
+ mem->phys_start, mem->bus_start,
+ mem->size);
+
+ if (pref)
+ /* OUTBOUND WIN: perf MEM */
+ ls_pcie_g4_outbound_win_set(pcie, idx++, PAB_AXI_TYPE_MEM,
+ pref->phys_start, pref->bus_start,
+ pref->size);
+
+ ls_pcie_g4_dump_wins(pcie, idx);
+}
+
+/* Return 0 if the address is valid, -errno if not valid */
+static int ls_pcie_g4_addr_valid(struct ls_pcie_g4 *pcie, pci_dev_t bdf)
+{
+ struct udevice *bus = pcie->bus;
+
+ if (pcie->mode == PCI_HEADER_TYPE_NORMAL)
+ return -ENODEV;
+
+ if (!pcie->enabled)
+ return -ENXIO;
+
+ if (PCI_BUS(bdf) < bus->seq)
+ return -EINVAL;
+
+ if ((PCI_BUS(bdf) > bus->seq) && (!ls_pcie_g4_link_up(pcie)))
+ return -EINVAL;
+
+ if (PCI_BUS(bdf) <= (bus->seq + 1) && (PCI_DEV(bdf) > 0))
+ return -EINVAL;
+
+ return 0;
+}
+
+void *ls_pcie_g4_conf_address(struct ls_pcie_g4 *pcie, pci_dev_t bdf,
+ int offset)
+{
+ struct udevice *bus = pcie->bus;
+ u32 target;
+
+ if (PCI_BUS(bdf) == bus->seq) {
+ if (offset < INDIRECT_ADDR_BNDRY) {
+ ccsr_set_page(pcie, 0);
+ return pcie->ccsr + offset;
+ }
+
+ ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset));
+ return pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset);
+ }
+
+ target = PAB_TARGET_BUS(PCI_BUS(bdf) - bus->seq) |
+ PAB_TARGET_DEV(PCI_DEV(bdf)) |
+ PAB_TARGET_FUNC(PCI_FUNC(bdf));
+
+ ls_pcie_g4_cfg_set_target(pcie, target);
+
+ return pcie->cfg + offset;
+}
+
+static int ls_pcie_g4_read_config(struct udevice *bus, pci_dev_t bdf,
+ uint offset, ulong *valuep,
+ enum pci_size_t size)
+{
+ struct ls_pcie_g4 *pcie = dev_get_priv(bus);
+ void *address;
+ int ret = 0;
+
+ if (ls_pcie_g4_addr_valid(pcie, bdf)) {
+ *valuep = pci_get_ff(size);
+ return 0;
+ }
+
+ address = ls_pcie_g4_conf_address(pcie, bdf, offset);
+
+ switch (size) {
+ case PCI_SIZE_8:
+ *valuep = readb(address);
+ break;
+ case PCI_SIZE_16:
+ *valuep = readw(address);
+ break;
+ case PCI_SIZE_32:
+ *valuep = readl(address);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ls_pcie_g4_write_config(struct udevice *bus, pci_dev_t bdf,
+ uint offset, ulong value,
+ enum pci_size_t size)
+{
+ struct ls_pcie_g4 *pcie = dev_get_priv(bus);
+ void *address;
+
+ if (ls_pcie_g4_addr_valid(pcie, bdf))
+ return 0;
+
+ address = ls_pcie_g4_conf_address(pcie, bdf, offset);
+
+ switch (size) {
+ case PCI_SIZE_8:
+ writeb(value, address);
+ return 0;
+ case PCI_SIZE_16:
+ writew(value, address);
+ return 0;
+ case PCI_SIZE_32:
+ writel(value, address);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ls_pcie_g4_setup_ctrl(struct ls_pcie_g4 *pcie)
+{
+ u32 val;
+
+ /* Fix class code */
+ val = ccsr_readl(pcie, GPEX_CLASSCODE);
+ val &= ~(GPEX_CLASSCODE_MASK << GPEX_CLASSCODE_SHIFT);
+ val |= PCI_CLASS_BRIDGE_PCI << GPEX_CLASSCODE_SHIFT;
+ ccsr_writel(pcie, GPEX_CLASSCODE, val);
+
+ /* Enable APIO and Memory/IO/CFG Wins */
+ val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0));
+ val |= APIO_EN | MEM_WIN_EN | IO_WIN_EN | CFG_WIN_EN;
+ ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val);
+
+ ls_pcie_g4_setup_wins(pcie);
+
+ pcie->stream_id_cur = 0;
+}
+
+static void ls_pcie_g4_ep_inbound_win_set(struct ls_pcie_g4 *pcie, int pf,
+ int bar, u64 phys)
+{
+ u32 val;
+
+ /* PF BAR1 is for MSI-X and only need to enable */
+ if (bar == 1) {
+ ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), BAR_AMAP_EN);
+ return;
+ }
+
+ val = upper_32_bits(phys);
+ ccsr_writel(pcie, PAB_EXT_PEX_BAR_AMAP(pf, bar), val);
+ val = lower_32_bits(phys) | BAR_AMAP_EN;
+ ccsr_writel(pcie, PAB_PEX_BAR_AMAP(pf, bar), val);
+}
+
+static void ls_pcie_g4_ep_setup_wins(struct ls_pcie_g4 *pcie, int pf)
+{
+ u64 phys;
+ int bar;
+ u32 val;
+
+ if ((!pcie->sriov_support && pf > LS_G4_PF0) || pf > LS_G4_PF1)
+ return;
+
+ phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + PCIE_BAR_SIZE * 4 * pf;
+ for (bar = 0; bar < PF_BAR_NUM; bar++) {
+ ls_pcie_g4_ep_inbound_win_set(pcie, pf, bar, phys);
+ phys += PCIE_BAR_SIZE;
+ }
+
+ /* OUTBOUND: map MEM */
+ ls_pcie_g4_outbound_win_set(pcie, pf, PAB_AXI_TYPE_MEM,
+ pcie->cfg_res.start +
+ CONFIG_SYS_PCI_MEMORY_SIZE * pf, 0x0,
+ CONFIG_SYS_PCI_MEMORY_SIZE);
+
+ val = ccsr_readl(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf));
+ val &= ~FUNC_NUM_PCIE_MASK;
+ val |= pf;
+ ccsr_writel(pcie, PAB_AXI_AMAP_PCI_HDR_PARAM(pf), val);
+}
+
+static void ls_pcie_g4_ep_enable_bar(struct ls_pcie_g4 *pcie, int pf,
+ int bar, bool vf_bar, bool enable)
+{
+ u32 val;
+ u32 bar_pos = BAR_POS(bar, pf, vf_bar);
+
+ val = ccsr_readl(pcie, GPEX_BAR_ENABLE);
+ if (enable)
+ val |= 1 << bar_pos;
+ else
+ val &= ~(1 << bar_pos);
+ ccsr_writel(pcie, GPEX_BAR_ENABLE, val);
+}
+
+static void ls_pcie_g4_ep_set_bar_size(struct ls_pcie_g4 *pcie, int pf,
+ int bar, bool vf_bar, u64 size)
+{
+ u32 bar_pos = BAR_POS(bar, pf, vf_bar);
+ u32 mask_l = lower_32_bits(~(size - 1));
+ u32 mask_h = upper_32_bits(~(size - 1));
+
+ ccsr_writel(pcie, GPEX_BAR_SELECT, bar_pos);
+ ccsr_writel(pcie, GPEX_BAR_SIZE_LDW, mask_l);
+ ccsr_writel(pcie, GPEX_BAR_SIZE_UDW, mask_h);
+}
+
+static void ls_pcie_g4_ep_setup_bar(struct ls_pcie_g4 *pcie, int pf,
+ int bar, bool vf_bar, u64 size)
+{
+ bool en = size ? true : false;
+
+ ls_pcie_g4_ep_enable_bar(pcie, pf, bar, vf_bar, en);
+ ls_pcie_g4_ep_set_bar_size(pcie, pf, bar, vf_bar, size);
+}
+
+static void ls_pcie_g4_ep_setup_bars(struct ls_pcie_g4 *pcie, int pf)
+{
+ int bar;
+
+ /* Setup PF BARs */
+ for (bar = 0; bar < PF_BAR_NUM; bar++)
+ ls_pcie_g4_ep_setup_bar(pcie, pf, bar, false, bar_size[bar]);
+
+ if (!pcie->sriov_support)
+ return;
+
+ /* Setup VF BARs */
+ for (bar = 0; bar < VF_BAR_NUM; bar++)
+ ls_pcie_g4_ep_setup_bar(pcie, pf, bar, true, bar_size[bar]);
+}
+
+static void ls_pcie_g4_set_sriov(struct ls_pcie_g4 *pcie, int pf)
+{
+ unsigned int val;
+
+ val = ccsr_readl(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf));
+ val &= ~(TTL_VF_MASK << TTL_VF_SHIFT);
+ val |= PCIE_VF_NUM << TTL_VF_SHIFT;
+ val &= ~(INI_VF_MASK << INI_VF_SHIFT);
+ val |= PCIE_VF_NUM << INI_VF_SHIFT;
+ ccsr_writel(pcie, GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf), val);
+
+ val = ccsr_readl(pcie, PCIE_SRIOV_VF_OFFSET_STRIDE);
+ val += PCIE_VF_NUM * pf - pf;
+ ccsr_writel(pcie, GPEX_SRIOV_VF_OFFSET_STRIDE(pf), val);
+}
+
+static void ls_pcie_g4_setup_ep(struct ls_pcie_g4 *pcie)
+{
+ u32 pf, sriov;
+ u32 val;
+ int i;
+
+ /* Enable APIO and Memory Win */
+ val = ccsr_readl(pcie, PAB_AXI_PIO_CTRL(0));
+ val |= APIO_EN | MEM_WIN_EN;
+ ccsr_writel(pcie, PAB_AXI_PIO_CTRL(0), val);
+
+ sriov = ccsr_readl(pcie, PCIE_SRIOV_CAPABILITY);
+ if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV)
+ pcie->sriov_support = 1;
+
+ pf = pcie->sriov_support ? PCIE_PF_NUM : 1;
+
+ for (i = 0; i < pf; i++) {
+ ls_pcie_g4_ep_setup_bars(pcie, i);
+ ls_pcie_g4_ep_setup_wins(pcie, i);
+ if (pcie->sriov_support)
+ ls_pcie_g4_set_sriov(pcie, i);
+ }
+
+ ls_pcie_g4_ep_enable_cfg(pcie);
+ ls_pcie_g4_dump_wins(pcie, pf);
+}
+
+static int ls_pcie_g4_probe(struct udevice *dev)
+{
+ struct ls_pcie_g4 *pcie = dev_get_priv(dev);
+ const void *fdt = gd->fdt_blob;
+ int node = dev_of_offset(dev);
+ u32 link_ctrl_sta;
+ u32 val;
+ int ret;
+
+ pcie->bus = dev;
+
+ ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
+ "ccsr", &pcie->ccsr_res);
+ if (ret) {
+ printf("ls-pcie-g4: resource \"ccsr\" not found\n");
+ return ret;
+ }
+
+ pcie->idx = (pcie->ccsr_res.start - PCIE_SYS_BASE_ADDR) /
+ PCIE_CCSR_SIZE;
+
+ list_add(&pcie->list, &ls_pcie_g4_list);
+
+ pcie->enabled = is_serdes_configured(PCIE_SRDS_PRTCL(pcie->idx));
+ if (!pcie->enabled) {
+ printf("PCIe%d: %s disabled\n", pcie->idx, dev->name);
+ return 0;
+ }
+
+ pcie->ccsr = map_physmem(pcie->ccsr_res.start,
+ fdt_resource_size(&pcie->ccsr_res),
+ MAP_NOCACHE);
+
+ ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
+ "config", &pcie->cfg_res);
+ if (ret) {
+ printf("%s: resource \"config\" not found\n", dev->name);
+ return ret;
+ }
+
+ pcie->cfg = map_physmem(pcie->cfg_res.start,
+ fdt_resource_size(&pcie->cfg_res),
+ MAP_NOCACHE);
+
+ ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
+ "lut", &pcie->lut_res);
+ if (ret) {
+ printf("ls-pcie-g4: resource \"lut\" not found\n");
+ return ret;
+ }
+
+ pcie->lut = map_physmem(pcie->lut_res.start,
+ fdt_resource_size(&pcie->lut_res),
+ MAP_NOCACHE);
+
+ ret = fdt_get_named_resource(fdt, node, "reg", "reg-names",
+ "pf_ctrl", &pcie->pf_ctrl_res);
+ if (ret) {
+ printf("ls-pcie-g4: resource \"pf_ctrl\" not found\n");
+ return ret;
+ }
+
+ pcie->pf_ctrl = map_physmem(pcie->pf_ctrl_res.start,
+ fdt_resource_size(&pcie->pf_ctrl_res),
+ MAP_NOCACHE);
+
+ pcie->big_endian = fdtdec_get_bool(fdt, node, "big-endian");
+
+ debug("%s ccsr:%lx, cfg:0x%lx, big-endian:%d\n",
+ dev->name, (unsigned long)pcie->ccsr, (unsigned long)pcie->cfg,
+ pcie->big_endian);
+
+ pcie->mode = readb(pcie->ccsr + PCI_HEADER_TYPE) & 0x7f;
+
+ if (pcie->mode == PCI_HEADER_TYPE_NORMAL) {
+ printf("PCIe%u: %s %s", pcie->idx, dev->name, "Endpoint");
+ ls_pcie_g4_setup_ep(pcie);
+ } else {
+ printf("PCIe%u: %s %s", pcie->idx, dev->name, "Root Complex");
+ ls_pcie_g4_setup_ctrl(pcie);
+ }
+
+ /* Enable Amba & PEX PIO */
+ val = ccsr_readl(pcie, PAB_CTRL);
+ val |= PAB_CTRL_APIO_EN | PAB_CTRL_PPIO_EN;
+ ccsr_writel(pcie, PAB_CTRL, val);
+
+ val = ccsr_readl(pcie, PAB_PEX_PIO_CTRL(0));
+ val |= PPIO_EN;
+ ccsr_writel(pcie, PAB_PEX_PIO_CTRL(0), val);
+
+ if (!ls_pcie_g4_link_up(pcie)) {
+ /* Let the user know there's no PCIe link */
+ printf(": no link\n");
+ return 0;
+ }
+
+ /* Print the negotiated PCIe link width */
+ link_ctrl_sta = ccsr_readl(pcie, PCIE_LINK_CTRL_STA);
+ printf(": x%d gen%d\n",
+ (link_ctrl_sta >> PCIE_LINK_WIDTH_SHIFT & PCIE_LINK_WIDTH_MASK),
+ (link_ctrl_sta >> PCIE_LINK_SPEED_SHIFT) & PCIE_LINK_SPEED_MASK);
+
+ return 0;
+}
+
+static const struct dm_pci_ops ls_pcie_g4_ops = {
+ .read_config = ls_pcie_g4_read_config,
+ .write_config = ls_pcie_g4_write_config,
+};
+
+static const struct udevice_id ls_pcie_g4_ids[] = {
+ { .compatible = "fsl,lx2160a-pcie" },
+ { }
+};
+
+U_BOOT_DRIVER(pcie_layerscape_gen4) = {
+ .name = "pcie_layerscape_gen4",
+ .id = UCLASS_PCI,
+ .of_match = ls_pcie_g4_ids,
+ .ops = &ls_pcie_g4_ops,
+ .probe = ls_pcie_g4_probe,
+ .priv_auto_alloc_size = sizeof(struct ls_pcie_g4),
+};
+
+/* No any fixup so far */
+void ft_pci_setup(void *blob, bd_t *bd)
+{
+}
diff --git a/drivers/pci/pcie_layerscape_gen4.h b/drivers/pci/pcie_layerscape_gen4.h
new file mode 100644
index 00000000000..27c2d09332c
--- /dev/null
+++ b/drivers/pci/pcie_layerscape_gen4.h
@@ -0,0 +1,264 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018-2019 NXP
+ *
+ * PCIe Gen4 driver for NXP Layerscape SoCs
+ * Author: Hou Zhiqiang <Minder.Hou@gmail.com>
+ */
+
+#ifndef _PCIE_LAYERSCAPE_GEN4_H_
+#define _PCIE_LAYERSCAPE_GEN4_H_
+#include <pci.h>
+#include <dm.h>
+
+#ifndef CONFIG_SYS_PCI_MEMORY_SIZE
+#define CONFIG_SYS_PCI_MEMORY_SIZE (4 * 1024 * 1024 * 1024ULL)
+#endif
+
+#ifndef CONFIG_SYS_PCI_EP_MEMORY_BASE
+#define CONFIG_SYS_PCI_EP_MEMORY_BASE CONFIG_SYS_LOAD_ADDR
+#endif
+
+#define PCIE_PF_NUM 2
+#define PCIE_VF_NUM 32
+
+#define LS_G4_PF0 0
+#define LS_G4_PF1 1
+#define PF_BAR_NUM 4
+#define VF_BAR_NUM 4
+#define PCIE_BAR_SIZE (8 * 1024) /* 8K */
+#define PCIE_BAR0_SIZE PCIE_BAR_SIZE
+#define PCIE_BAR1_SIZE PCIE_BAR_SIZE
+#define PCIE_BAR2_SIZE PCIE_BAR_SIZE
+#define PCIE_BAR4_SIZE PCIE_BAR_SIZE
+#define SIZE_1T (1024 * 1024 * 1024 * 1024ULL)
+
+/* GPEX CSR */
+#define GPEX_CLASSCODE 0x474
+#define GPEX_CLASSCODE_SHIFT 16
+#define GPEX_CLASSCODE_MASK 0xffff
+
+#define GPEX_CFG_READY 0x4b0
+#define PCIE_CONFIG_READY BIT(0)
+
+#define GPEX_BAR_ENABLE 0x4d4
+#define GPEX_BAR_SIZE_LDW 0x4d8
+#define GPEX_BAR_SIZE_UDW 0x4dC
+#define GPEX_BAR_SELECT 0x4e0
+
+#define BAR_POS(bar, pf, vf_bar) \
+ ((bar) + (pf) * PF_BAR_NUM + (vf_bar) * PCIE_PF_NUM * PF_BAR_NUM)
+
+#define GPEX_SRIOV_INIT_VFS_TOTAL_VF(pf) (0x644 + (pf) * 4)
+#define TTL_VF_MASK 0xffff
+#define TTL_VF_SHIFT 16
+#define INI_VF_MASK 0xffff
+#define INI_VF_SHIFT 0
+#define GPEX_SRIOV_VF_OFFSET_STRIDE(pf) (0x704 + (pf) * 4)
+
+/* PAB CSR */
+#define PAB_CTRL 0x808
+#define PAB_CTRL_APIO_EN BIT(0)
+#define PAB_CTRL_PPIO_EN BIT(1)
+#define PAB_CTRL_MAX_BRST_LEN_SHIFT 4
+#define PAB_CTRL_MAX_BRST_LEN_MASK 0x3
+#define PAB_CTRL_PAGE_SEL_SHIFT 13
+#define PAB_CTRL_PAGE_SEL_MASK 0x3f
+#define PAB_CTRL_FUNC_SEL_SHIFT 19
+#define PAB_CTRL_FUNC_SEL_MASK 0x1ff
+
+#define PAB_RST_CTRL 0x820
+#define PAB_BR_STAT 0x80c
+
+/* AXI PIO Engines */
+#define PAB_AXI_PIO_CTRL(idx) (0x840 + 0x10 * (idx))
+#define APIO_EN BIT(0)
+#define MEM_WIN_EN BIT(1)
+#define IO_WIN_EN BIT(2)
+#define CFG_WIN_EN BIT(3)
+#define PAB_AXI_PIO_STAT(idx) (0x844 + 0x10 * (idx))
+#define PAB_AXI_PIO_SL_CMD_STAT(idx) (0x848 + 0x10 * (idx))
+#define PAB_AXI_PIO_SL_ADDR_STAT(idx) (0x84c + 0x10 * (idx))
+#define PAB_AXI_PIO_SL_EXT_ADDR_STAT(idx) (0xb8a0 + 0x4 * (idx))
+
+/* PEX PIO Engines */
+#define PAB_PEX_PIO_CTRL(idx) (0x8c0 + 0x10 * (idx))
+#define PPIO_EN BIT(0)
+#define PAB_PEX_PIO_STAT(idx) (0x8c4 + 0x10 * (idx))
+#define PAB_PEX_PIO_MT_STAT(idx) (0x8c8 + 0x10 * (idx))
+
+#define INDIRECT_ADDR_BNDRY 0xc00
+#define PAGE_IDX_SHIFT 10
+#define PAGE_ADDR_MASK 0x3ff
+
+#define OFFSET_TO_PAGE_IDX(off) \
+ (((off) >> PAGE_IDX_SHIFT) & PAB_CTRL_PAGE_SEL_MASK)
+
+#define OFFSET_TO_PAGE_ADDR(off) \
+ (((off) & PAGE_ADDR_MASK) | INDIRECT_ADDR_BNDRY)
+
+/* APIO WINs */
+#define PAB_AXI_AMAP_CTRL(idx) (0xba0 + 0x10 * (idx))
+#define PAB_EXT_AXI_AMAP_SIZE(idx) (0xbaf0 + 0x4 * (idx))
+#define PAB_AXI_AMAP_AXI_WIN(idx) (0xba4 + 0x10 * (idx))
+#define PAB_EXT_AXI_AMAP_AXI_WIN(idx) (0x80a0 + 0x4 * (idx))
+#define PAB_AXI_AMAP_PEX_WIN_L(idx) (0xba8 + 0x10 * (idx))
+#define PAB_AXI_AMAP_PEX_WIN_H(idx) (0xbac + 0x10 * (idx))
+#define PAB_AXI_AMAP_PCI_HDR_PARAM(idx) (0x5ba0 + 0x4 * (idx))
+#define FUNC_NUM_PCIE_MASK GENMASK(7, 0)
+
+#define AXI_AMAP_CTRL_EN BIT(0)
+#define AXI_AMAP_CTRL_TYPE_SHIFT 1
+#define AXI_AMAP_CTRL_TYPE_MASK 0x3
+#define AXI_AMAP_CTRL_SIZE_SHIFT 10
+#define AXI_AMAP_CTRL_SIZE_MASK 0x3fffff
+
+#define PAB_TARGET_BUS(x) (((x) & 0xff) << 24)
+#define PAB_TARGET_DEV(x) (((x) & 0x1f) << 19)
+#define PAB_TARGET_FUNC(x) (((x) & 0x7) << 16)
+
+#define PAB_AXI_TYPE_CFG 0x00
+#define PAB_AXI_TYPE_IO 0x01
+#define PAB_AXI_TYPE_MEM 0x02
+#define PAB_AXI_TYPE_ATOM 0x03
+
+#define PAB_WINS_NUM 256
+
+/* PPIO WINs RC mode */
+#define PAB_PEX_AMAP_CTRL(idx) (0x4ba0 + 0x10 * (idx))
+#define PAB_EXT_PEX_AMAP_SIZE(idx) (0xbef0 + 0x04 * (idx))
+#define PAB_PEX_AMAP_AXI_WIN(idx) (0x4ba4 + 0x10 * (idx))
+#define PAB_EXT_PEX_AMAP_AXI_WIN(idx) (0xb4a0 + 0x04 * (idx))
+#define PAB_PEX_AMAP_PEX_WIN_L(idx) (0x4ba8 + 0x10 * (idx))
+#define PAB_PEX_AMAP_PEX_WIN_H(idx) (0x4bac + 0x10 * (idx))
+
+#define IB_TYPE_MEM_F 0x2
+#define IB_TYPE_MEM_NF 0x3
+
+#define PEX_AMAP_CTRL_TYPE_SHIFT 0x1
+#define PEX_AMAP_CTRL_EN_SHIFT 0x0
+#define PEX_AMAP_CTRL_TYPE_MASK 0x3
+#define PEX_AMAP_CTRL_EN_MASK 0x1
+
+/* PPIO WINs EP mode */
+#define PAB_PEX_BAR_AMAP(pf, bar) \
+ (0x1ba0 + 0x20 * (pf) + 4 * (bar))
+#define BAR_AMAP_EN BIT(0)
+#define PAB_EXT_PEX_BAR_AMAP(pf, bar) \
+ (0x84a0 + 0x20 * (pf) + 4 * (bar))
+
+/* CCSR registers */
+#define PCIE_LINK_CTRL_STA 0x5c
+#define PCIE_LINK_SPEED_SHIFT 16
+#define PCIE_LINK_SPEED_MASK 0x0f
+#define PCIE_LINK_WIDTH_SHIFT 20
+#define PCIE_LINK_WIDTH_MASK 0x3f
+#define PCIE_SRIOV_CAPABILITY 0x2a0
+#define PCIE_SRIOV_VF_OFFSET_STRIDE 0x2b4
+
+/* LUT registers */
+#define PCIE_LUT_UDR(n) (0x800 + (n) * 8)
+#define PCIE_LUT_LDR(n) (0x804 + (n) * 8)
+#define PCIE_LUT_ENABLE BIT(31)
+#define PCIE_LUT_ENTRY_COUNT 32
+
+/* PF control registers */
+#define PCIE_LTSSM_STA 0x7fc
+#define LTSSM_STATE_MASK 0x7f
+#define LTSSM_PCIE_L0 0x2d /* L0 state */
+
+#define PCIE_SRDS_PRTCL(idx) (PCIE1 + (idx))
+#define PCIE_SYS_BASE_ADDR 0x3400000
+#define PCIE_CCSR_SIZE 0x0100000
+
+struct ls_pcie_g4 {
+ int idx;
+ struct list_head list;
+ struct udevice *bus;
+ struct fdt_resource ccsr_res;
+ struct fdt_resource cfg_res;
+ struct fdt_resource lut_res;
+ struct fdt_resource pf_ctrl_res;
+ void __iomem *ccsr;
+ void __iomem *cfg;
+ void __iomem *lut;
+ void __iomem *pf_ctrl;
+ bool big_endian;
+ bool enabled;
+ int next_lut_index;
+ struct pci_controller hose;
+ int stream_id_cur;
+ int mode;
+ int sriov_support;
+};
+
+extern struct list_head ls_pcie_g4_list;
+
+static inline void lut_writel(struct ls_pcie_g4 *pcie, unsigned int value,
+ unsigned int offset)
+{
+ if (pcie->big_endian)
+ out_be32(pcie->lut + offset, value);
+ else
+ out_le32(pcie->lut + offset, value);
+}
+
+static inline u32 lut_readl(struct ls_pcie_g4 *pcie, unsigned int offset)
+{
+ if (pcie->big_endian)
+ return in_be32(pcie->lut + offset);
+ else
+ return in_le32(pcie->lut + offset);
+}
+
+static inline void ccsr_set_page(struct ls_pcie_g4 *pcie, u8 pg_idx)
+{
+ u32 val;
+
+ val = in_le32(pcie->ccsr + PAB_CTRL);
+ val &= ~(PAB_CTRL_PAGE_SEL_MASK << PAB_CTRL_PAGE_SEL_SHIFT);
+ val |= (pg_idx & PAB_CTRL_PAGE_SEL_MASK) << PAB_CTRL_PAGE_SEL_SHIFT;
+
+ out_le32(pcie->ccsr + PAB_CTRL, val);
+}
+
+static inline unsigned int ccsr_readl(struct ls_pcie_g4 *pcie, u32 offset)
+{
+ if (offset < INDIRECT_ADDR_BNDRY) {
+ ccsr_set_page(pcie, 0);
+ return in_le32(pcie->ccsr + offset);
+ }
+
+ ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset));
+ return in_le32(pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset));
+}
+
+static inline void ccsr_writel(struct ls_pcie_g4 *pcie, u32 offset, u32 value)
+{
+ if (offset < INDIRECT_ADDR_BNDRY) {
+ ccsr_set_page(pcie, 0);
+ out_le32(pcie->ccsr + offset, value);
+ } else {
+ ccsr_set_page(pcie, OFFSET_TO_PAGE_IDX(offset));
+ out_le32(pcie->ccsr + OFFSET_TO_PAGE_ADDR(offset), value);
+ }
+}
+
+static inline unsigned int pf_ctrl_readl(struct ls_pcie_g4 *pcie, u32 offset)
+{
+ if (pcie->big_endian)
+ return in_be32(pcie->pf_ctrl + offset);
+ else
+ return in_le32(pcie->pf_ctrl + offset);
+}
+
+static inline void pf_ctrl_writel(struct ls_pcie_g4 *pcie, u32 offset,
+ u32 value)
+{
+ if (pcie->big_endian)
+ out_be32(pcie->pf_ctrl + offset, value);
+ else
+ out_le32(pcie->pf_ctrl + offset, value);
+}
+
+#endif /* _PCIE_LAYERSCAPE_GEN4_H_ */