summaryrefslogtreecommitdiff
path: root/drivers/staging
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/Kconfig3
-rw-r--r--drivers/staging/Makefile4
-rw-r--r--drivers/staging/android/Kconfig4
-rw-r--r--drivers/staging/android/ion/ion_cma_heap.c25
-rw-r--r--drivers/staging/fsl-dpaa2/Kconfig3
-rw-r--r--drivers/staging/fsl-dpaa2/Makefile2
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.c50
-rw-r--r--drivers/staging/fsl-dpaa2/ethsw/ethsw.h5
-rw-r--r--drivers/staging/fsl-dpaa2/evb/Kconfig7
-rw-r--r--drivers/staging/fsl-dpaa2/evb/Makefile10
-rw-r--r--drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h279
-rw-r--r--drivers/staging/fsl-dpaa2/evb/dpdmux.c1111
-rw-r--r--drivers/staging/fsl-dpaa2/evb/dpdmux.h453
-rw-r--r--drivers/staging/fsl-dpaa2/evb/evb.c1356
-rw-r--r--drivers/staging/fsl-dpaa2/mac/Kconfig23
-rw-r--r--drivers/staging/fsl-dpaa2/mac/Makefile10
-rw-r--r--drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h196
-rw-r--r--drivers/staging/fsl-dpaa2/mac/dpmac.c689
-rw-r--r--drivers/staging/fsl-dpaa2/mac/dpmac.h379
-rw-r--r--drivers/staging/fsl-dpaa2/mac/mac.c819
-rw-r--r--drivers/staging/fsl_ppfe/Kconfig21
-rw-r--r--drivers/staging/fsl_ppfe/Makefile20
-rw-r--r--drivers/staging/fsl_ppfe/TODO2
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/cbus.h78
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h55
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h289
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h242
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h86
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h100
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h50
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h168
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h61
-rw-r--r--drivers/staging/fsl_ppfe/include/pfe/pfe.h372
-rw-r--r--drivers/staging/fsl_ppfe/pfe_cdev.c258
-rw-r--r--drivers/staging/fsl_ppfe/pfe_cdev.h41
-rw-r--r--drivers/staging/fsl_ppfe/pfe_ctrl.c226
-rw-r--r--drivers/staging/fsl_ppfe/pfe_ctrl.h100
-rw-r--r--drivers/staging/fsl_ppfe/pfe_debugfs.c99
-rw-r--r--drivers/staging/fsl_ppfe/pfe_debugfs.h13
-rw-r--r--drivers/staging/fsl_ppfe/pfe_eth.c2569
-rw-r--r--drivers/staging/fsl_ppfe/pfe_eth.h175
-rw-r--r--drivers/staging/fsl_ppfe/pfe_firmware.c302
-rw-r--r--drivers/staging/fsl_ppfe/pfe_firmware.h20
-rw-r--r--drivers/staging/fsl_ppfe/pfe_hal.c1517
-rw-r--r--drivers/staging/fsl_ppfe/pfe_hif.c1060
-rw-r--r--drivers/staging/fsl_ppfe/pfe_hif.h199
-rw-r--r--drivers/staging/fsl_ppfe/pfe_hif_lib.c628
-rw-r--r--drivers/staging/fsl_ppfe/pfe_hif_lib.h229
-rw-r--r--drivers/staging/fsl_ppfe/pfe_hw.c164
-rw-r--r--drivers/staging/fsl_ppfe/pfe_hw.h15
-rw-r--r--drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c383
-rw-r--r--drivers/staging/fsl_ppfe/pfe_mod.c158
-rw-r--r--drivers/staging/fsl_ppfe/pfe_mod.h103
-rw-r--r--drivers/staging/fsl_ppfe/pfe_perfmon.h26
-rw-r--r--drivers/staging/fsl_ppfe/pfe_sysfs.c806
-rw-r--r--drivers/staging/fsl_ppfe/pfe_sysfs.h17
-rw-r--r--drivers/staging/fsl_qbman/Kconfig228
-rw-r--r--drivers/staging/fsl_qbman/Makefile32
-rw-r--r--drivers/staging/fsl_qbman/bman_config.c720
-rw-r--r--drivers/staging/fsl_qbman/bman_debugfs.c125
-rw-r--r--drivers/staging/fsl_qbman/bman_driver.c559
-rw-r--r--drivers/staging/fsl_qbman/bman_high.c1145
-rw-r--r--drivers/staging/fsl_qbman/bman_low.h565
-rw-r--r--drivers/staging/fsl_qbman/bman_private.h166
-rw-r--r--drivers/staging/fsl_qbman/bman_test.c56
-rw-r--r--drivers/staging/fsl_qbman/bman_test.h44
-rw-r--r--drivers/staging/fsl_qbman/bman_test_high.c183
-rw-r--r--drivers/staging/fsl_qbman/bman_test_thresh.c196
-rw-r--r--drivers/staging/fsl_qbman/dpa_alloc.c706
-rw-r--r--drivers/staging/fsl_qbman/dpa_sys.h258
-rw-r--r--drivers/staging/fsl_qbman/dpa_sys_arm.h95
-rw-r--r--drivers/staging/fsl_qbman/dpa_sys_arm64.h102
-rw-r--r--drivers/staging/fsl_qbman/dpa_sys_ppc32.h70
-rw-r--r--drivers/staging/fsl_qbman/dpa_sys_ppc64.h79
-rw-r--r--drivers/staging/fsl_qbman/fsl_usdpaa.c2284
-rw-r--r--drivers/staging/fsl_qbman/fsl_usdpaa_irq.c289
-rw-r--r--drivers/staging/fsl_qbman/qbman_driver.c91
-rw-r--r--drivers/staging/fsl_qbman/qman_config.c1224
-rw-r--r--drivers/staging/fsl_qbman/qman_debugfs.c1597
-rw-r--r--drivers/staging/fsl_qbman/qman_driver.c962
-rw-r--r--drivers/staging/fsl_qbman/qman_high.c5660
-rw-r--r--drivers/staging/fsl_qbman/qman_low.h1445
-rw-r--r--drivers/staging/fsl_qbman/qman_private.h398
-rw-r--r--drivers/staging/fsl_qbman/qman_test.c57
-rw-r--r--drivers/staging/fsl_qbman/qman_test.h45
-rw-r--r--drivers/staging/fsl_qbman/qman_test_high.c216
-rw-r--r--drivers/staging/fsl_qbman/qman_test_hotpotato.c502
-rw-r--r--drivers/staging/fsl_qbman/qman_utility.c129
-rw-r--r--drivers/staging/media/Makefile2
-rw-r--r--drivers/staging/media/imx/Kconfig84
-rw-r--r--drivers/staging/media/imx/Makefile11
-rw-r--r--drivers/staging/media/imx/gmsl-max9286.c3344
-rw-r--r--drivers/staging/media/imx/imx8-common.h99
-rw-r--r--drivers/staging/media/imx/imx8-isi-cap.c1795
-rw-r--r--drivers/staging/media/imx/imx8-isi-core.c621
-rw-r--r--drivers/staging/media/imx/imx8-isi-core.h411
-rw-r--r--drivers/staging/media/imx/imx8-isi-hw.c840
-rw-r--r--drivers/staging/media/imx/imx8-isi-hw.h484
-rw-r--r--drivers/staging/media/imx/imx8-isi-m2m.c1201
-rw-r--r--drivers/staging/media/imx/imx8-media-dev.c1079
-rw-r--r--drivers/staging/media/imx/imx8-mipi-csi2-sam.c1739
-rw-r--r--drivers/staging/media/imx/imx8-mipi-csi2.c1170
-rw-r--r--drivers/staging/media/imx/imx8-parallel-csi.c837
103 files changed, 49992 insertions, 33 deletions
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 6f1fa4c849a1..85916a1b782a 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -125,4 +125,7 @@ source "drivers/staging/exfat/Kconfig"
source "drivers/staging/qlge/Kconfig"
+source "drivers/staging/fsl_qbman/Kconfig"
+source "drivers/staging/fsl_ppfe/Kconfig"
+
endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index a90f9b308c8d..c1f08fb939ac 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_FB_SM750) += sm750fb/
obj-$(CONFIG_USB_EMXX) += emxx_udc/
obj-$(CONFIG_SPEAKUP) += speakup/
obj-$(CONFIG_MFD_NVEC) += nvec/
-obj-$(CONFIG_ANDROID) += android/
+obj-y += android/
obj-$(CONFIG_STAGING_BOARD) += board/
obj-$(CONFIG_LTE_GDM724X) += gdm724x/
obj-$(CONFIG_FIREWIRE_SERIAL) += fwserial/
@@ -53,3 +53,5 @@ obj-$(CONFIG_UWB) += uwb/
obj-$(CONFIG_USB_WUSB) += wusbcore/
obj-$(CONFIG_EXFAT_FS) += exfat/
obj-$(CONFIG_QLGE) += qlge/
+obj-$(CONFIG_FSL_SDK_DPA) += fsl_qbman/
+obj-$(CONFIG_FSL_PPFE) += fsl_ppfe/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index d6d605d5cbde..a9c0a20c33d2 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -22,8 +22,8 @@ config ANDROID_VSOC
a 'cuttlefish' Android image inside QEmu. The driver interacts with
a QEmu ivshmem device. If built as a module, it will be called vsoc.
-source "drivers/staging/android/ion/Kconfig"
-
endif # if ANDROID
+source "drivers/staging/android/ion/Kconfig"
+
endmenu
diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c
index bf65e67ef9d8..25099ffe15b2 100644
--- a/drivers/staging/android/ion/ion_cma_heap.c
+++ b/drivers/staging/android/ion/ion_cma_heap.c
@@ -13,6 +13,10 @@
#include <linux/cma.h>
#include <linux/scatterlist.h>
#include <linux/highmem.h>
+#include <asm/cacheflush.h>
+#ifdef CONFIG_ARM
+#include <asm/outercache.h>
+#endif
#include "ion.h"
@@ -46,17 +50,36 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
if (PageHighMem(pages)) {
unsigned long nr_clear_pages = nr_pages;
struct page *page = pages;
+#ifdef CONFIG_ARM
+ phys_addr_t base = __pfn_to_phys(page_to_pfn(pages));
+ phys_addr_t end = base + size;
+#endif
while (nr_clear_pages > 0) {
void *vaddr = kmap_atomic(page);
memset(vaddr, 0, PAGE_SIZE);
+#ifdef CONFIG_ARM
+ __cpuc_flush_dcache_area(vaddr,PAGE_SIZE);
+#else
+ __flush_dcache_area(vaddr,PAGE_SIZE);
+#endif
kunmap_atomic(vaddr);
page++;
nr_clear_pages--;
}
+#ifdef CONFIG_ARM
+ outer_flush_range(base, end);
+#endif
} else {
- memset(page_address(pages), 0, size);
+ void *ptr = page_address(pages);
+ memset(ptr, 0, size);
+#ifdef CONFIG_ARM
+ __cpuc_flush_dcache_area(ptr,size);
+ outer_flush_range(__pa(ptr), __pa(ptr) + size);
+#else
+ __flush_dcache_area(ptr,size);
+#endif
}
table = kmalloc(sizeof(*table), GFP_KERNEL);
diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig
index 244237bb068a..1553a125e2b2 100644
--- a/drivers/staging/fsl-dpaa2/Kconfig
+++ b/drivers/staging/fsl-dpaa2/Kconfig
@@ -17,3 +17,6 @@ config FSL_DPAA2_ETHSW
help
Driver for Freescale DPAA2 Ethernet Switch. Select
BRIDGE to have support for bridge tools.
+
+source "drivers/staging/fsl-dpaa2/mac/Kconfig"
+source "drivers/staging/fsl-dpaa2/evb/Kconfig"
diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile
index 9645db7689c9..2959f3fff0f4 100644
--- a/drivers/staging/fsl-dpaa2/Makefile
+++ b/drivers/staging/fsl-dpaa2/Makefile
@@ -4,3 +4,5 @@
#
obj-$(CONFIG_FSL_DPAA2_ETHSW) += ethsw/
+obj-$(CONFIG_FSL_DPAA2_MAC) += mac/
+obj-$(CONFIG_FSL_DPAA2_EVB) += evb/
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
index 14a9eebf687e..39c0fe347188 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.c
@@ -18,8 +18,6 @@
#include "ethsw.h"
-static struct workqueue_struct *ethsw_owq;
-
/* Minimal supported DPSW version */
#define DPSW_MIN_VER_MAJOR 8
#define DPSW_MIN_VER_MINOR 1
@@ -1174,10 +1172,6 @@ static int port_netdevice_event(struct notifier_block *unused,
return notifier_from_errno(err);
}
-static struct notifier_block port_nb __read_mostly = {
- .notifier_call = port_netdevice_event,
-};
-
struct ethsw_switchdev_event_work {
struct work_struct work;
struct switchdev_notifier_fdb_info fdb_info;
@@ -1233,8 +1227,10 @@ static int port_switchdev_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ struct ethsw_port_priv *port_priv = netdev_priv(dev);
struct ethsw_switchdev_event_work *switchdev_work;
struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
if (!ethsw_port_dev_check(dev))
return NOTIFY_DONE;
@@ -1270,7 +1266,7 @@ static int port_switchdev_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
- queue_work(ethsw_owq, &switchdev_work->work);
+ queue_work(ethsw->workqueue, &switchdev_work->work);
return NOTIFY_DONE;
@@ -1318,31 +1314,27 @@ static int port_switchdev_blocking_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
-static struct notifier_block port_switchdev_nb = {
- .notifier_call = port_switchdev_event,
-};
-
-static struct notifier_block port_switchdev_blocking_nb = {
- .notifier_call = port_switchdev_blocking_event,
-};
-
static int ethsw_register_notifier(struct device *dev)
{
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
int err;
- err = register_netdevice_notifier(&port_nb);
+ ethsw->port_nb.notifier_call = port_netdevice_event;
+ err = register_netdevice_notifier(&ethsw->port_nb);
if (err) {
dev_err(dev, "Failed to register netdev notifier\n");
return err;
}
- err = register_switchdev_notifier(&port_switchdev_nb);
+ ethsw->port_switchdev_nb.notifier_call = port_switchdev_event;
+ err = register_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err) {
dev_err(dev, "Failed to register switchdev notifier\n");
goto err_switchdev_nb;
}
- err = register_switchdev_blocking_notifier(&port_switchdev_blocking_nb);
+ ethsw->port_switchdevb_nb.notifier_call = port_switchdev_blocking_event;
+ err = register_switchdev_blocking_notifier(&ethsw->port_switchdevb_nb);
if (err) {
dev_err(dev, "Failed to register switchdev blocking notifier\n");
goto err_switchdev_blocking_nb;
@@ -1351,9 +1343,9 @@ static int ethsw_register_notifier(struct device *dev)
return 0;
err_switchdev_blocking_nb:
- unregister_switchdev_notifier(&port_switchdev_nb);
+ unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
err_switchdev_nb:
- unregister_netdevice_notifier(&port_nb);
+ unregister_netdevice_notifier(&ethsw->port_nb);
return err;
}
@@ -1435,9 +1427,10 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
}
}
- ethsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
- "ethsw");
- if (!ethsw_owq) {
+ ethsw->workqueue = alloc_ordered_workqueue("%s_%d_ordered",
+ WQ_MEM_RECLAIM, "ethsw",
+ ethsw->sw_attr.id);
+ if (!ethsw->workqueue) {
err = -ENOMEM;
goto err_close;
}
@@ -1449,7 +1442,7 @@ static int ethsw_init(struct fsl_mc_device *sw_dev)
return 0;
err_destroy_ordered_workqueue:
- destroy_workqueue(ethsw_owq);
+ destroy_workqueue(ethsw->workqueue);
err_close:
dpsw_close(ethsw->mc_io, 0, ethsw->dpsw_handle);
@@ -1491,21 +1484,22 @@ static int ethsw_port_init(struct ethsw_port_priv *port_priv, u16 port)
static void ethsw_unregister_notifier(struct device *dev)
{
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
struct notifier_block *nb;
int err;
- nb = &port_switchdev_blocking_nb;
+ nb = &ethsw->port_switchdevb_nb;
err = unregister_switchdev_blocking_notifier(nb);
if (err)
dev_err(dev,
"Failed to unregister switchdev blocking notifier (%d)\n", err);
- err = unregister_switchdev_notifier(&port_switchdev_nb);
+ err = unregister_switchdev_notifier(&ethsw->port_switchdev_nb);
if (err)
dev_err(dev,
"Failed to unregister switchdev notifier (%d)\n", err);
- err = unregister_netdevice_notifier(&port_nb);
+ err = unregister_netdevice_notifier(&ethsw->port_nb);
if (err)
dev_err(dev,
"Failed to unregister netdev notifier (%d)\n", err);
@@ -1536,7 +1530,7 @@ static int ethsw_remove(struct fsl_mc_device *sw_dev)
ethsw_teardown_irqs(sw_dev);
- destroy_workqueue(ethsw_owq);
+ destroy_workqueue(ethsw->workqueue);
dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle);
diff --git a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
index 3ea8a0ad8c10..a0244f7d5003 100644
--- a/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
+++ b/drivers/staging/fsl-dpaa2/ethsw/ethsw.h
@@ -66,6 +66,11 @@ struct ethsw_core {
u8 vlans[VLAN_VID_MASK + 1];
bool learning;
+
+ struct notifier_block port_nb;
+ struct notifier_block port_switchdev_nb;
+ struct notifier_block port_switchdevb_nb;
+ struct workqueue_struct *workqueue;
};
#endif /* __ETHSW_H */
diff --git a/drivers/staging/fsl-dpaa2/evb/Kconfig b/drivers/staging/fsl-dpaa2/evb/Kconfig
new file mode 100644
index 000000000000..3534f6975053
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/Kconfig
@@ -0,0 +1,7 @@
+config FSL_DPAA2_EVB
+ tristate "DPAA2 Edge Virtual Bridge"
+ depends on FSL_MC_BUS && FSL_DPAA2
+ select VLAN_8021Q
+ default y
+ ---help---
+ Prototype driver for DPAA2 Edge Virtual Bridge.
diff --git a/drivers/staging/fsl-dpaa2/evb/Makefile b/drivers/staging/fsl-dpaa2/evb/Makefile
new file mode 100644
index 000000000000..ecc529d74434
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/Makefile
@@ -0,0 +1,10 @@
+
+obj-$(CONFIG_FSL_DPAA2_EVB) += dpaa2-evb.o
+
+dpaa2-evb-objs := evb.o dpdmux.o
+
+all:
+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
+
+clean:
+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
new file mode 100644
index 000000000000..66306804eb3a
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux-cmd.h
@@ -0,0 +1,279 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPDMUX_CMD_H
+#define _FSL_DPDMUX_CMD_H
+
+/* DPDMUX Version */
+#define DPDMUX_VER_MAJOR 6
+#define DPDMUX_VER_MINOR 1
+
+#define DPDMUX_CMD_BASE_VER 1
+#define DPDMUX_CMD_ID_OFFSET 4
+
+#define DPDMUX_CMD(id) (((id) << DPDMUX_CMD_ID_OFFSET) | DPDMUX_CMD_BASE_VER)
+
+/* Command IDs */
+#define DPDMUX_CMDID_CLOSE DPDMUX_CMD(0x800)
+#define DPDMUX_CMDID_OPEN DPDMUX_CMD(0x806)
+#define DPDMUX_CMDID_CREATE DPDMUX_CMD(0x906)
+#define DPDMUX_CMDID_DESTROY DPDMUX_CMD(0x986)
+#define DPDMUX_CMDID_GET_API_VERSION DPDMUX_CMD(0xa06)
+
+#define DPDMUX_CMDID_ENABLE DPDMUX_CMD(0x002)
+#define DPDMUX_CMDID_DISABLE DPDMUX_CMD(0x003)
+#define DPDMUX_CMDID_GET_ATTR DPDMUX_CMD(0x004)
+#define DPDMUX_CMDID_RESET DPDMUX_CMD(0x005)
+#define DPDMUX_CMDID_IS_ENABLED DPDMUX_CMD(0x006)
+
+#define DPDMUX_CMDID_SET_IRQ_ENABLE DPDMUX_CMD(0x012)
+#define DPDMUX_CMDID_GET_IRQ_ENABLE DPDMUX_CMD(0x013)
+#define DPDMUX_CMDID_SET_IRQ_MASK DPDMUX_CMD(0x014)
+#define DPDMUX_CMDID_GET_IRQ_MASK DPDMUX_CMD(0x015)
+#define DPDMUX_CMDID_GET_IRQ_STATUS DPDMUX_CMD(0x016)
+#define DPDMUX_CMDID_CLEAR_IRQ_STATUS DPDMUX_CMD(0x017)
+
+#define DPDMUX_CMDID_SET_MAX_FRAME_LENGTH DPDMUX_CMD(0x0a1)
+
+#define DPDMUX_CMDID_UL_RESET_COUNTERS DPDMUX_CMD(0x0a3)
+
+#define DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES DPDMUX_CMD(0x0a7)
+#define DPDMUX_CMDID_IF_GET_ATTR DPDMUX_CMD(0x0a8)
+#define DPDMUX_CMDID_IF_ENABLE DPDMUX_CMD(0x0a9)
+#define DPDMUX_CMDID_IF_DISABLE DPDMUX_CMD(0x0aa)
+
+#define DPDMUX_CMDID_IF_ADD_L2_RULE DPDMUX_CMD(0x0b0)
+#define DPDMUX_CMDID_IF_REMOVE_L2_RULE DPDMUX_CMD(0x0b1)
+#define DPDMUX_CMDID_IF_GET_COUNTER DPDMUX_CMD(0x0b2)
+#define DPDMUX_CMDID_IF_SET_LINK_CFG DPDMUX_CMD(0x0b3)
+#define DPDMUX_CMDID_IF_GET_LINK_STATE DPDMUX_CMD(0x0b4)
+
+#define DPDMUX_CMDID_SET_CUSTOM_KEY DPDMUX_CMD(0x0b5)
+#define DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b6)
+#define DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY DPDMUX_CMD(0x0b7)
+
+#define DPDMUX_MASK(field) \
+ GENMASK(DPDMUX_##field##_SHIFT + DPDMUX_##field##_SIZE - 1, \
+ DPDMUX_##field##_SHIFT)
+#define dpdmux_set_field(var, field, val) \
+ ((var) |= (((val) << DPDMUX_##field##_SHIFT) & DPDMUX_MASK(field)))
+#define dpdmux_get_field(var, field) \
+ (((var) & DPDMUX_MASK(field)) >> DPDMUX_##field##_SHIFT)
+
+struct dpdmux_cmd_open {
+ u32 dpdmux_id;
+};
+
+struct dpdmux_cmd_create {
+ u8 method;
+ u8 manip;
+ u16 num_ifs;
+ u32 pad;
+
+ u16 adv_max_dmat_entries;
+ u16 adv_max_mc_groups;
+ u16 adv_max_vlan_ids;
+ u16 pad1;
+
+ u64 options;
+};
+
+struct dpdmux_cmd_destroy {
+ u32 dpdmux_id;
+};
+
+#define DPDMUX_ENABLE_SHIFT 0
+#define DPDMUX_ENABLE_SIZE 1
+
+struct dpdmux_rsp_is_enabled {
+ u8 en;
+};
+
+struct dpdmux_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpdmux_cmd_get_irq_enable {
+ u32 pad;
+ u8 irq_index;
+};
+
+struct dpdmux_rsp_get_irq_enable {
+ u8 enable;
+};
+
+struct dpdmux_cmd_set_irq_mask {
+ u32 mask;
+ u8 irq_index;
+};
+
+struct dpdmux_cmd_get_irq_mask {
+ u32 pad;
+ u8 irq_index;
+};
+
+struct dpdmux_rsp_get_irq_mask {
+ u32 mask;
+};
+
+struct dpdmux_cmd_get_irq_status {
+ u32 status;
+ u8 irq_index;
+};
+
+struct dpdmux_rsp_get_irq_status {
+ u32 status;
+};
+
+struct dpdmux_cmd_clear_irq_status {
+ u32 status;
+ u8 irq_index;
+};
+
+struct dpdmux_rsp_get_attr {
+ u8 method;
+ u8 manip;
+ u16 num_ifs;
+ u16 mem_size;
+ u16 pad;
+
+ u64 pad1;
+
+ u32 id;
+ u32 pad2;
+
+ u64 options;
+};
+
+struct dpdmux_cmd_set_max_frame_length {
+ u16 max_frame_length;
+};
+
+#define DPDMUX_ACCEPTED_FRAMES_TYPE_SHIFT 0
+#define DPDMUX_ACCEPTED_FRAMES_TYPE_SIZE 4
+#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SHIFT 4
+#define DPDMUX_UNACCEPTED_FRAMES_ACTION_SIZE 4
+
+struct dpdmux_cmd_if_set_accepted_frames {
+ u16 if_id;
+ u8 frames_options;
+};
+
+struct dpdmux_cmd_if {
+ u16 if_id;
+};
+
+struct dpdmux_rsp_if_get_attr {
+ u8 pad[3];
+ u8 enabled;
+ u8 pad1[3];
+ u8 accepted_frames_type;
+ u32 rate;
+};
+
+struct dpdmux_cmd_if_l2_rule {
+ u16 if_id;
+ u8 mac_addr5;
+ u8 mac_addr4;
+ u8 mac_addr3;
+ u8 mac_addr2;
+ u8 mac_addr1;
+ u8 mac_addr0;
+
+ u32 pad;
+ u16 vlan_id;
+};
+
+struct dpdmux_cmd_if_get_counter {
+ u16 if_id;
+ u8 counter_type;
+};
+
+struct dpdmux_rsp_if_get_counter {
+ u64 pad;
+ u64 counter;
+};
+
+struct dpdmux_cmd_if_set_link_cfg {
+ u16 if_id;
+ u16 pad[3];
+
+ u32 rate;
+ u32 pad1;
+
+ u64 options;
+};
+
+struct dpdmux_cmd_if_get_link_state {
+ u16 if_id;
+};
+
+struct dpdmux_rsp_if_get_link_state {
+ u32 pad;
+ u8 up;
+ u8 pad1[3];
+
+ u32 rate;
+ u32 pad2;
+
+ u64 options;
+};
+
+struct dpdmux_rsp_get_api_version {
+ u16 major;
+ u16 minor;
+};
+
+struct dpdmux_set_custom_key {
+ u64 pad[6];
+ u64 key_cfg_iova;
+};
+
+struct dpdmux_cmd_add_custom_cls_entry {
+ u8 pad[3];
+ u8 key_size;
+ u16 pad1;
+ u16 dest_if;
+ u64 key_iova;
+ u64 mask_iova;
+};
+
+struct dpdmux_cmd_remove_custom_cls_entry {
+ u8 pad[3];
+ u8 key_size;
+ u32 pad1;
+ u64 key_iova;
+ u64 mask_iova;
+};
+
+#endif /* _FSL_DPDMUX_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux.c b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
new file mode 100644
index 000000000000..4c4a059e726c
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.c
@@ -0,0 +1,1111 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/fsl/mc.h>
+#include "dpdmux.h"
+#include "dpdmux-cmd.h"
+
+/**
+ * dpdmux_open() - Open a control session for the specified object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpdmux_id: DPDMUX unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpdmux_create() function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpdmux_id,
+ u16 *token)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_open *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpdmux_cmd_open *)cmd.params;
+ cmd_params->dpdmux_id = cpu_to_le32(dpdmux_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmux_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLOSE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_create() - Create the DPDMUX object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: returned object id
+ *
+ * Create the DPDMUX object, allocate required resources and
+ * perform required initialization.
+ *
+ * The object can be created either by declaring it in the
+ * DPL file, or by calling this function.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dpdmux_cfg *cfg,
+ u32 *obj_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_create *cmd_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmux_cmd_create *)cmd.params;
+ cmd_params->method = cfg->method;
+ cmd_params->manip = cfg->manip;
+ cmd_params->num_ifs = cpu_to_le16(cfg->num_ifs);
+ cmd_params->adv_max_dmat_entries =
+ cpu_to_le16(cfg->adv.max_dmat_entries);
+ cmd_params->adv_max_mc_groups = cpu_to_le16(cfg->adv.max_mc_groups);
+ cmd_params->adv_max_vlan_ids = cpu_to_le16(cfg->adv.max_vlan_ids);
+ cmd_params->options = cpu_to_le64(cfg->adv.options);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_hdr_read_token(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpdmux_destroy() - Destroy the DPDMUX object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpdmux_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ u32 object_id)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_destroy *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpdmux_cmd_destroy *)cmd.params;
+ cmd_params->dpdmux_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_enable() - Enable DPDMUX functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ENABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_disable() - Disable DPDMUX functionality
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_DISABLE,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_is_enabled() - Check if the DPDMUX is enabled.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @en: Returns '1' if object is enabled; '0' otherwise
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_is_enabled *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IS_ENABLED,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmux_rsp_is_enabled *)cmd.params;
+ *en = dpdmux_get_field(rsp_params->en, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpdmux_reset() - Reset the DPDMUX, returns the object to initial state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_RESET,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_irq_enable *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_set_irq_enable *)cmd.params;
+ cmd_params->enable = en;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_get_irq_enable() - Get overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_enable *cmd_params;
+ struct dpdmux_rsp_get_irq_enable *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmux_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->enable;
+
+ return 0;
+}
+
+/**
+ * dpdmux_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @irq_index: The interrupt index to configure
+ * @mask: event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_irq_mask *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_mask *cmd_params;
+ struct dpdmux_rsp_get_irq_mask *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmux_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dpdmux_get_irq_status() - Get the current status of any pending interrupts.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_get_irq_status *cmd_params;
+ struct dpdmux_rsp_get_irq_status *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmux_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpdmux_clear_irq_status() - Clear a pending interrupt's status
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @irq_index: The interrupt index to configure
+ * @status: bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_clear_irq_status *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_get_attributes() - Retrieve DPDMUX attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpdmux_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_get_attr *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmux_rsp_get_attr *)cmd.params;
+ attr->id = le32_to_cpu(rsp_params->id);
+ attr->options = le64_to_cpu(rsp_params->options);
+ attr->method = rsp_params->method;
+ attr->manip = rsp_params->manip;
+ attr->num_ifs = le16_to_cpu(rsp_params->num_ifs);
+ attr->mem_size = le16_to_cpu(rsp_params->mem_size);
+
+ return 0;
+}
+
+/**
+ * dpdmux_if_enable() - Enable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpdmux_if_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id)
+{
+ struct dpdmux_cmd_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_if_disable() - Disable Interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @if_id: Interface Identifier
+ *
+ * Return: Completion status. '0' on Success; Error code otherwise.
+ */
+int dpdmux_if_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id)
+{
+ struct dpdmux_cmd_if *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_DISABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_set_max_frame_length() - Set the maximum frame length in DPDMUX
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @max_frame_length: The required maximum frame length
+ *
+ * Update the maximum frame length on all DMUX interfaces.
+ * In case of VEPA, the maximum frame length on all dmux interfaces
+ * will be updated with the minimum value of the mfls of the connected
+ * dpnis and the actual value of dmux mfl.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 max_frame_length)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_set_max_frame_length *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_MAX_FRAME_LENGTH,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_set_max_frame_length *)cmd.params;
+ cmd_params->max_frame_length = cpu_to_le16(max_frame_length);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_ul_reset_counters() - Function resets the uplink counter
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_UL_RESET_COUNTERS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_if_set_accepted_frames() - Set the accepted frame types
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
+ * @cfg: Frame types configuration
+ *
+ * if 'DPDMUX_ADMIT_ONLY_VLAN_TAGGED' is set - untagged frames or
+ * priority-tagged frames are discarded.
+ * if 'DPDMUX_ADMIT_ONLY_UNTAGGED' is set - untagged frames or
+ * priority-tagged frames are accepted.
+ * if 'DPDMUX_ADMIT_ALL' is set (default mode) - all VLAN tagged,
+ * untagged and priority-tagged frame are accepted;
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpdmux_accepted_frames *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_set_accepted_frames *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_ACCEPTED_FRAMES,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_if_set_accepted_frames *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ dpdmux_set_field(cmd_params->frames_options, ACCEPTED_FRAMES_TYPE,
+ cfg->type);
+ dpdmux_set_field(cmd_params->frames_options, UNACCEPTED_FRAMES_ACTION,
+ cfg->unaccept_act);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_if_get_attributes() - Obtain DPDMUX interface attributes
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @if_id: Interface ID (0 for uplink, or 1-num_ifs);
+ * @attr: Interface attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpdmux_if_attr *attr)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if *cmd_params;
+ struct dpdmux_rsp_if_get_attr *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_ATTR,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_if *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmux_rsp_if_get_attr *)cmd.params;
+ attr->rate = le32_to_cpu(rsp_params->rate);
+ attr->enabled = dpdmux_get_field(rsp_params->enabled, ENABLE);
+ attr->accept_frame_type =
+ dpdmux_get_field(rsp_params->accepted_frames_type,
+ ACCEPTED_FRAMES_TYPE);
+
+ return 0;
+}
+
+/**
+ * dpdmux_if_remove_l2_rule() - Remove L2 rule from DPDMUX table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @if_id: Destination interface ID
+ * @rule: L2 rule
+ *
+ * Function removes a L2 rule from DPDMUX table
+ * or adds an interface to an existing multicast address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_l2_rule *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_REMOVE_L2_RULE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
+ cmd_params->mac_addr5 = rule->mac_addr[5];
+ cmd_params->mac_addr4 = rule->mac_addr[4];
+ cmd_params->mac_addr3 = rule->mac_addr[3];
+ cmd_params->mac_addr2 = rule->mac_addr[2];
+ cmd_params->mac_addr1 = rule->mac_addr[1];
+ cmd_params->mac_addr0 = rule->mac_addr[0];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_if_add_l2_rule() - Add L2 rule into DPDMUX table
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @if_id: Destination interface ID
+ * @rule: L2 rule
+ *
+ * Function adds a L2 rule into DPDMUX table
+ * or adds an interface to an existing multicast address
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_l2_rule *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_ADD_L2_RULE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_if_l2_rule *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->vlan_id = cpu_to_le16(rule->vlan_id);
+ cmd_params->mac_addr5 = rule->mac_addr[5];
+ cmd_params->mac_addr4 = rule->mac_addr[4];
+ cmd_params->mac_addr3 = rule->mac_addr[3];
+ cmd_params->mac_addr2 = rule->mac_addr[2];
+ cmd_params->mac_addr1 = rule->mac_addr[1];
+ cmd_params->mac_addr0 = rule->mac_addr[0];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_if_get_counter() - Functions obtains specific counter of an interface
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPDMUX object
+ * @if_id: Interface Id
+ * @counter_type: counter type
+ * @counter: Returned specific counter information
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ enum dpdmux_counter_type counter_type,
+ u64 *counter)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_get_counter *cmd_params;
+ struct dpdmux_rsp_if_get_counter *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_COUNTER,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_if_get_counter *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->counter_type = counter_type;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmux_rsp_if_get_counter *)cmd.params;
+ *counter = le64_to_cpu(rsp_params->counter);
+
+ return 0;
+}
+
+/**
+ * dpdmux_if_set_link_cfg() - set the link configuration.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: interface id
+ * @cfg: Link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpdmux_link_cfg *cfg)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_set_link_cfg *cmd_params;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_SET_LINK_CFG,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_if_set_link_cfg *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+ cmd_params->rate = cpu_to_le32(cfg->rate);
+ cmd_params->options = cpu_to_le64(cfg->options);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_if_get_link_state - Return the link state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: interface id
+ * @state: link state
+ *
+ * @returns '0' on Success; Error code otherwise.
+ */
+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpdmux_link_state *state)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_cmd_if_get_link_state *cmd_params;
+ struct dpdmux_rsp_if_get_link_state *rsp_params;
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_IF_GET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_if_get_link_state *)cmd.params;
+ cmd_params->if_id = cpu_to_le16(if_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpdmux_rsp_if_get_link_state *)cmd.params;
+ state->rate = le32_to_cpu(rsp_params->rate);
+ state->options = le64_to_cpu(rsp_params->options);
+ state->up = dpdmux_get_field(rsp_params->up, ENABLE);
+
+ return 0;
+}
+
+/**
+ * dpdmux_set_custom_key - Set a custom classification key.
+ *
+ * This API is only available for DPDMUX instance created with
+ * DPDMUX_METHOD_CUSTOM. This API must be called before populating the
+ * classification table using dpdmux_add_custom_cls_entry.
+ *
+ * Calls to dpdmux_set_custom_key remove all existing classification entries
+ * that may have been added previously using dpdmux_add_custom_cls_entry.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @if_id: interface id
+ * @key_cfg_iova: DMA address of a configuration structure set up using
+ * dpkg_prepare_key_cfg. Maximum key size is 24 bytes.
+ *
+ * @returns '0' on Success; Error code otherwise.
+ */
+int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u64 key_cfg_iova)
+{
+ struct dpdmux_set_custom_key *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_SET_CUSTOM_KEY,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_set_custom_key *)cmd.params;
+ cmd_params->key_cfg_iova = cpu_to_le64(key_cfg_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_add_custom_cls_entry - Adds a custom classification entry.
+ *
+ * This API is only available for DPDMUX instances created with
+ * DPDMUX_METHOD_CUSTOM. Before calling this function a classification key
+ * composition rule must be set up using dpdmux_set_custom_key.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @rule: Classification rule to insert. Rules cannot be duplicated, if a
+ * matching rule already exists, the action will be replaced.
+ * @action: Action to perform for matching traffic.
+ *
+ * @returns '0' on Success; Error code otherwise.
+ */
+int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpdmux_rule_cfg *rule,
+ struct dpdmux_cls_action *action)
+{
+ struct dpdmux_cmd_add_custom_cls_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_ADD_CUSTOM_CLS_ENTRY,
+ cmd_flags,
+ token);
+
+ cmd_params = (struct dpdmux_cmd_add_custom_cls_entry *)cmd.params;
+ cmd_params->key_size = rule->key_size;
+ cmd_params->dest_if = cpu_to_le16(action->dest_if);
+ cmd_params->key_iova = cpu_to_le64(rule->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_remove_custom_cls_entry - Removes a custom classification entry.
+ *
+ * This API is only available for DPDMUX instances created with
+ * DPDMUX_METHOD_CUSTOM. The API can be used to remove classification
+ * entries previously inserted using dpdmux_add_custom_cls_entry.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPSW object
+ * @rule: Classification rule to remove
+ *
+ * @returns '0' on Success; Error code otherwise.
+ */
+int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpdmux_rule_cfg *rule)
+{
+ struct dpdmux_cmd_remove_custom_cls_entry *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_REMOVE_CUSTOM_CLS_ENTRY,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpdmux_cmd_remove_custom_cls_entry *)cmd.params;
+ cmd_params->key_size = rule->key_size;
+ cmd_params->key_iova = cpu_to_le64(rule->key_iova);
+ cmd_params->mask_iova = cpu_to_le64(rule->mask_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpdmux_get_api_version() - Get Data Path Demux API version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path demux API
+ * @minor_ver: Minor version of data path demux API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct fsl_mc_command cmd = { 0 };
+ struct dpdmux_rsp_get_api_version *rsp_params;
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPDMUX_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpdmux_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-dpaa2/evb/dpdmux.h b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
new file mode 100644
index 000000000000..a6ccc7efa014
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/dpdmux.h
@@ -0,0 +1,453 @@
+/* Copyright 2013-2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPDMUX_H
+#define __FSL_DPDMUX_H
+
+struct fsl_mc_io;
+
+/* Data Path Demux API
+ * Contains API for handling DPDMUX topology and functionality
+ */
+
+int dpdmux_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpdmux_id,
+ u16 *token);
+
+int dpdmux_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * DPDMUX general options
+ */
+
+/**
+ * Enable bridging between internal interfaces
+ */
+#define DPDMUX_OPT_BRIDGE_EN 0x0000000000000002ULL
+
+/**
+ * Mask support for classification
+ */
+#define DPDMUX_OPT_CLS_MASK_SUPPORT 0x0000000000000020ULL
+
+#define DPDMUX_IRQ_INDEX_IF 0x0000
+#define DPDMUX_IRQ_INDEX 0x0001
+
+/**
+ * IRQ event - Indicates that the link state changed
+ */
+#define DPDMUX_IRQ_EVENT_LINK_CHANGED 0x0001
+
+/**
+ * enum dpdmux_manip - DPDMUX manipulation operations
+ * @DPDMUX_MANIP_NONE: No manipulation on frames
+ * @DPDMUX_MANIP_ADD_REMOVE_S_VLAN: Add S-VLAN on egress, remove it on ingress
+ */
+enum dpdmux_manip {
+ DPDMUX_MANIP_NONE = 0x0,
+ DPDMUX_MANIP_ADD_REMOVE_S_VLAN = 0x1
+};
+
+/**
+ * enum dpdmux_method - DPDMUX method options
+ * @DPDMUX_METHOD_NONE: no DPDMUX method
+ * @DPDMUX_METHOD_C_VLAN_MAC: DPDMUX based on C-VLAN and MAC address
+ * @DPDMUX_METHOD_MAC: DPDMUX based on MAC address
+ * @DPDMUX_METHOD_C_VLAN: DPDMUX based on C-VLAN
+ * @DPDMUX_METHOD_S_VLAN: DPDMUX based on S-VLAN
+ */
+enum dpdmux_method {
+ DPDMUX_METHOD_NONE = 0x0,
+ DPDMUX_METHOD_C_VLAN_MAC = 0x1,
+ DPDMUX_METHOD_MAC = 0x2,
+ DPDMUX_METHOD_C_VLAN = 0x3,
+ DPDMUX_METHOD_S_VLAN = 0x4,
+ DPDMUX_METHOD_CUSTOM = 0x5
+};
+
+/**
+ * struct dpdmux_cfg - DPDMUX configuration parameters
+ * @method: Defines the operation method for the DPDMUX address table
+ * @manip: Required manipulation operation
+ * @num_ifs: Number of interfaces (excluding the uplink interface)
+ * @adv: Advanced parameters; default is all zeros;
+ * use this structure to change default settings
+ */
+struct dpdmux_cfg {
+ enum dpdmux_method method;
+ enum dpdmux_manip manip;
+ u16 num_ifs;
+ /**
+ * struct adv - Advanced parameters
+ * @options: DPDMUX options - combination of 'DPDMUX_OPT_<X>' flags
+ * @max_dmat_entries: Maximum entries in DPDMUX address table
+ * 0 - indicates default: 64 entries per interface.
+ * @max_mc_groups: Number of multicast groups in DPDMUX table
+ * 0 - indicates default: 32 multicast groups
+ * @max_vlan_ids: max vlan ids allowed in the system -
+ * relevant only case of working in mac+vlan method.
+ * 0 - indicates default 16 vlan ids.
+ */
+ struct {
+ u64 options;
+ u16 max_dmat_entries;
+ u16 max_mc_groups;
+ u16 max_vlan_ids;
+ } adv;
+};
+
+int dpdmux_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dpdmux_cfg *cfg,
+ u32 *obj_id);
+
+int dpdmux_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ u32 object_id);
+
+int dpdmux_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpdmux_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpdmux_is_enabled(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ int *en);
+
+int dpdmux_reset(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+int dpdmux_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpdmux_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dpdmux_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dpdmux_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dpdmux_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dpdmux_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dpdmux_attr - Structure representing DPDMUX attributes
+ * @id: DPDMUX object ID
+ * @options: Configuration options (bitmap)
+ * @method: DPDMUX address table method
+ * @manip: DPDMUX manipulation type
+ * @num_ifs: Number of interfaces (excluding the uplink interface)
+ * @mem_size: DPDMUX frame storage memory size
+ */
+struct dpdmux_attr {
+ int id;
+ u64 options;
+ enum dpdmux_method method;
+ enum dpdmux_manip manip;
+ u16 num_ifs;
+ u16 mem_size;
+};
+
+int dpdmux_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpdmux_attr *attr);
+
+int dpdmux_set_max_frame_length(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 max_frame_length);
+
+/**
+ * enum dpdmux_counter_type - Counter types
+ * @DPDMUX_CNT_ING_FRAME: Counts ingress frames
+ * @DPDMUX_CNT_ING_BYTE: Counts ingress bytes
+ * @DPDMUX_CNT_ING_FLTR_FRAME: Counts filtered ingress frames
+ * @DPDMUX_CNT_ING_FRAME_DISCARD: Counts discarded ingress frames
+ * @DPDMUX_CNT_ING_MCAST_FRAME: Counts ingress multicast frames
+ * @DPDMUX_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes
+ * @DPDMUX_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames
+ * @DPDMUX_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes
+ * @DPDMUX_CNT_EGR_FRAME: Counts egress frames
+ * @DPDMUX_CNT_EGR_BYTE: Counts egress bytes
+ * @DPDMUX_CNT_EGR_FRAME_DISCARD: Counts discarded egress frames
+ */
+enum dpdmux_counter_type {
+ DPDMUX_CNT_ING_FRAME = 0x0,
+ DPDMUX_CNT_ING_BYTE = 0x1,
+ DPDMUX_CNT_ING_FLTR_FRAME = 0x2,
+ DPDMUX_CNT_ING_FRAME_DISCARD = 0x3,
+ DPDMUX_CNT_ING_MCAST_FRAME = 0x4,
+ DPDMUX_CNT_ING_MCAST_BYTE = 0x5,
+ DPDMUX_CNT_ING_BCAST_FRAME = 0x6,
+ DPDMUX_CNT_ING_BCAST_BYTES = 0x7,
+ DPDMUX_CNT_EGR_FRAME = 0x8,
+ DPDMUX_CNT_EGR_BYTE = 0x9,
+ DPDMUX_CNT_EGR_FRAME_DISCARD = 0xa
+};
+
+/**
+ * enum dpdmux_accepted_frames_type - DPDMUX frame types
+ * @DPDMUX_ADMIT_ALL: The device accepts VLAN tagged, untagged and
+ * priority-tagged frames
+ * @DPDMUX_ADMIT_ONLY_VLAN_TAGGED: The device discards untagged frames or
+ * priority-tagged frames that are received on this
+ * interface
+ * @DPDMUX_ADMIT_ONLY_UNTAGGED: Untagged frames or priority-tagged frames
+ * received on this interface are accepted
+ */
+enum dpdmux_accepted_frames_type {
+ DPDMUX_ADMIT_ALL = 0,
+ DPDMUX_ADMIT_ONLY_VLAN_TAGGED = 1,
+ DPDMUX_ADMIT_ONLY_UNTAGGED = 2
+};
+
+/**
+ * enum dpdmux_action - DPDMUX action for un-accepted frames
+ * @DPDMUX_ACTION_DROP: Drop un-accepted frames
+ * @DPDMUX_ACTION_REDIRECT_TO_CTRL: Redirect un-accepted frames to the
+ * control interface
+ */
+enum dpdmux_action {
+ DPDMUX_ACTION_DROP = 0,
+ DPDMUX_ACTION_REDIRECT_TO_CTRL = 1
+};
+
+/**
+ * struct dpdmux_accepted_frames - Frame types configuration
+ * @type: Defines ingress accepted frames
+ * @unaccept_act: Defines action on frames not accepted
+ */
+struct dpdmux_accepted_frames {
+ enum dpdmux_accepted_frames_type type;
+ enum dpdmux_action unaccept_act;
+};
+
+int dpdmux_if_set_accepted_frames(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpdmux_accepted_frames *cfg);
+
+/**
+ * struct dpdmux_if_attr - Structure representing frame types configuration
+ * @rate: Configured interface rate (in bits per second)
+ * @enabled: Indicates if interface is enabled
+ * @accept_frame_type: Indicates type of accepted frames for the interface
+ */
+struct dpdmux_if_attr {
+ u32 rate;
+ int enabled;
+ enum dpdmux_accepted_frames_type accept_frame_type;
+};
+
+int dpdmux_if_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpdmux_if_attr *attr);
+
+int dpdmux_if_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id);
+
+int dpdmux_if_disable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id);
+
+/**
+ * struct dpdmux_l2_rule - Structure representing L2 rule
+ * @mac_addr: MAC address
+ * @vlan_id: VLAN ID
+ */
+struct dpdmux_l2_rule {
+ u8 mac_addr[6];
+ u16 vlan_id;
+};
+
+int dpdmux_if_remove_l2_rule(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule);
+
+int dpdmux_if_add_l2_rule(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ const struct dpdmux_l2_rule *rule);
+
+int dpdmux_if_get_counter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ enum dpdmux_counter_type counter_type,
+ u64 *counter);
+
+int dpdmux_ul_reset_counters(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPDMUX_LINK_OPT_AUTONEG 0x0000000000000001ULL
+/**
+ * Enable half-duplex mode
+ */
+#define DPDMUX_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+/**
+ * Enable pause frames
+ */
+#define DPDMUX_LINK_OPT_PAUSE 0x0000000000000004ULL
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPDMUX_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * struct dpdmux_link_cfg - Structure representing DPDMUX link configuration
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
+ */
+struct dpdmux_link_cfg {
+ u32 rate;
+ u64 options;
+};
+
+int dpdmux_if_set_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpdmux_link_cfg *cfg);
+/**
+ * struct dpdmux_link_state - Structure representing DPDMUX link state
+ * @rate: Rate
+ * @options: Mask of available options; use 'DPDMUX_LINK_OPT_<X>' values
+ * @up: 0 - down, 1 - up
+ */
+struct dpdmux_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+};
+
+int dpdmux_if_get_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u16 if_id,
+ struct dpdmux_link_state *state);
+
+int dpdmux_set_custom_key(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u64 key_cfg_iova);
+
+/**
+ * struct dpdmux_rule_cfg - Custom classification rule.
+ *
+ * @key_iova: DMA address of buffer storing the look-up value
+ * @mask_iova: DMA address of the mask used for TCAM classification
+ * @key_size: size, in bytes, of the look-up value. This must match the size
+ * of the look-up key defined using dpdmux_set_custom_key, otherwise the
+ * entry will never be hit
+ */
+struct dpdmux_rule_cfg {
+ u64 key_iova;
+ u64 mask_iova;
+ u8 key_size;
+};
+
+/**
+ * struct dpdmux_cls_action - Action to execute for frames matching the
+ * classification entry
+ *
+ * @dest_if: Interface to forward the frames to. Port numbering is similar to
+ * the one used to connect interfaces:
+ * - 0 is the uplink port,
+ * - all others are downlink ports.
+ */
+struct dpdmux_cls_action {
+ u16 dest_if;
+};
+
+int dpdmux_add_custom_cls_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpdmux_rule_cfg *rule,
+ struct dpdmux_cls_action *action);
+
+int dpdmux_remove_custom_cls_entry(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpdmux_rule_cfg *rule);
+
+int dpdmux_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+#endif /* __FSL_DPDMUX_H */
diff --git a/drivers/staging/fsl-dpaa2/evb/evb.c b/drivers/staging/fsl-dpaa2/evb/evb.c
new file mode 100644
index 000000000000..958f8a54efcc
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/evb/evb.c
@@ -0,0 +1,1356 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+
+#include <uapi/linux/if_bridge.h>
+#include <net/netlink.h>
+
+#include <linux/fsl/mc.h>
+
+#include "dpdmux.h"
+#include "dpdmux-cmd.h"
+
+static const char evb_drv_version[] = "0.1";
+
+/* Minimal supported DPDMUX version */
+#define DPDMUX_MIN_VER_MAJOR 6
+#define DPDMUX_MIN_VER_MINOR 0
+
+/* IRQ index */
+#define DPDMUX_MAX_IRQ_NUM 2
+
+/* MAX FRAME LENGTH (currently 10k) */
+#define EVB_MAX_FRAME_LENGTH (10 * 1024)
+#define EVB_MAX_MTU (EVB_MAX_FRAME_LENGTH - VLAN_ETH_HLEN)
+#define EVB_MIN_MTU 68
+
+struct evb_port_priv {
+ struct net_device *netdev;
+ struct list_head list;
+ u16 port_index;
+ struct evb_priv *evb_priv;
+ u8 vlans[VLAN_VID_MASK + 1];
+};
+
+struct evb_priv {
+ /* keep first */
+ struct evb_port_priv uplink;
+
+ struct fsl_mc_io *mc_io;
+ struct list_head port_list;
+ struct dpdmux_attr attr;
+ u16 mux_handle;
+ int dev_id;
+};
+
+static int _evb_port_carrier_state_sync(struct net_device *netdev)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct dpdmux_link_state state;
+ int err;
+
+ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index, &state);
+ if (unlikely(err)) {
+ netdev_err(netdev, "dpdmux_if_get_link_state() err %d\n", err);
+ return err;
+ }
+
+ WARN_ONCE(state.up > 1, "Garbage read into link_state");
+
+ if (state.up)
+ netif_carrier_on(port_priv->netdev);
+ else
+ netif_carrier_off(port_priv->netdev);
+
+ return 0;
+}
+
+static int evb_port_open(struct net_device *netdev)
+{
+ int err;
+
+ /* FIXME: enable port when support added */
+
+ err = _evb_port_carrier_state_sync(netdev);
+ if (err) {
+ netdev_err(netdev, "ethsw_port_carrier_state_sync err %d\n",
+ err);
+ return err;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t evb_dropframe(struct sk_buff *skb, struct net_device *dev)
+{
+ /* we don't support I/O for now, drop the frame */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int evb_links_state_update(struct evb_priv *priv)
+{
+ struct evb_port_priv *port_priv;
+ struct list_head *pos;
+ int err;
+
+ list_for_each(pos, &priv->port_list) {
+ port_priv = list_entry(pos, struct evb_port_priv, list);
+
+ err = _evb_port_carrier_state_sync(port_priv->netdev);
+ if (err)
+ netdev_err(port_priv->netdev,
+ "_evb_port_carrier_state_sync err %d\n",
+ err);
+ }
+
+ return 0;
+}
+
+static irqreturn_t evb_irq0_handler(int irq_num, void *arg)
+{
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t _evb_irq0_handler_thread(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct fsl_mc_device *evb_dev = to_fsl_mc_device(dev);
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct evb_priv *priv = netdev_priv(netdev);
+ struct fsl_mc_io *io = priv->mc_io;
+ u16 token = priv->mux_handle;
+ int irq_index = DPDMUX_IRQ_INDEX_IF;
+
+ /* Mask the events and the if_id reserved bits to be cleared on read */
+ u32 status = DPDMUX_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000;
+ int err;
+
+ /* Sanity check */
+ if (WARN_ON(!evb_dev || !evb_dev->irqs || !evb_dev->irqs[irq_index]))
+ goto out;
+ if (WARN_ON(evb_dev->irqs[irq_index]->msi_desc->irq != (u32)irq_num))
+ goto out;
+
+ err = dpdmux_get_irq_status(io, 0, token, irq_index, &status);
+ if (unlikely(err)) {
+ netdev_err(netdev, "Can't get irq status (err %d)", err);
+ err = dpdmux_clear_irq_status(io, 0, token, irq_index,
+ 0xFFFFFFFF);
+ if (unlikely(err))
+ netdev_err(netdev, "Can't clear irq status (err %d)",
+ err);
+ goto out;
+ }
+
+ if (status & DPDMUX_IRQ_EVENT_LINK_CHANGED) {
+ err = evb_links_state_update(priv);
+ if (unlikely(err))
+ goto out;
+ }
+
+out:
+ return IRQ_HANDLED;
+}
+
+static int evb_setup_irqs(struct fsl_mc_device *evb_dev)
+{
+ struct device *dev = &evb_dev->dev;
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct evb_priv *priv = netdev_priv(netdev);
+ int err = 0;
+ struct fsl_mc_device_irq *irq;
+ const int irq_index = DPDMUX_IRQ_INDEX_IF;
+ u32 mask = DPDMUX_IRQ_EVENT_LINK_CHANGED;
+
+ err = fsl_mc_allocate_irqs(evb_dev);
+ if (unlikely(err)) {
+ dev_err(dev, "MC irqs allocation failed\n");
+ return err;
+ }
+
+ if (WARN_ON(evb_dev->obj_desc.irq_count != DPDMUX_MAX_IRQ_NUM)) {
+ err = -EINVAL;
+ goto free_irq;
+ }
+
+ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
+ irq_index, 0);
+ if (unlikely(err)) {
+ dev_err(dev, "dpdmux_set_irq_enable err %d\n", err);
+ goto free_irq;
+ }
+
+ irq = evb_dev->irqs[irq_index];
+
+ err = devm_request_threaded_irq(dev, irq->msi_desc->irq,
+ evb_irq0_handler,
+ _evb_irq0_handler_thread,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(dev), dev);
+ if (unlikely(err)) {
+ dev_err(dev, "devm_request_threaded_irq(): %d", err);
+ goto free_irq;
+ }
+
+ err = dpdmux_set_irq_mask(priv->mc_io, 0, priv->mux_handle,
+ irq_index, mask);
+ if (unlikely(err)) {
+ dev_err(dev, "dpdmux_set_irq_mask(): %d", err);
+ goto free_devm_irq;
+ }
+
+ err = dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
+ irq_index, 1);
+ if (unlikely(err)) {
+ dev_err(dev, "dpdmux_set_irq_enable(): %d", err);
+ goto free_devm_irq;
+ }
+
+ return 0;
+
+free_devm_irq:
+ devm_free_irq(dev, irq->msi_desc->irq, dev);
+free_irq:
+ fsl_mc_free_irqs(evb_dev);
+ return err;
+}
+
+static void evb_teardown_irqs(struct fsl_mc_device *evb_dev)
+{
+ struct device *dev = &evb_dev->dev;
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct evb_priv *priv = netdev_priv(netdev);
+
+ dpdmux_set_irq_enable(priv->mc_io, 0, priv->mux_handle,
+ DPDMUX_IRQ_INDEX_IF, 0);
+
+ devm_free_irq(dev,
+ evb_dev->irqs[DPDMUX_IRQ_INDEX_IF]->msi_desc->irq,
+ dev);
+ fsl_mc_free_irqs(evb_dev);
+}
+
+static int evb_port_add_rule(struct net_device *netdev,
+ const unsigned char *addr, u16 vid)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct dpdmux_l2_rule rule = { .vlan_id = vid };
+ int err;
+
+ if (addr)
+ ether_addr_copy(rule.mac_addr, addr);
+
+ err = dpdmux_if_add_l2_rule(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index, &rule);
+ if (unlikely(err))
+ netdev_err(netdev, "dpdmux_if_add_l2_rule err %d\n", err);
+ return err;
+}
+
+static int evb_port_del_rule(struct net_device *netdev,
+ const unsigned char *addr, u16 vid)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct dpdmux_l2_rule rule = { .vlan_id = vid };
+ int err;
+
+ if (addr)
+ ether_addr_copy(rule.mac_addr, addr);
+
+ err = dpdmux_if_remove_l2_rule(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index, &rule);
+ if (unlikely(err))
+ netdev_err(netdev, "dpdmux_if_remove_l2_rule err %d\n", err);
+ return err;
+}
+
+static bool _lookup_address(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct netdev_hw_addr *ha;
+ struct netdev_hw_addr_list *list = (is_unicast_ether_addr(addr)) ?
+ &netdev->uc : &netdev->mc;
+
+ netif_addr_lock_bh(netdev);
+ list_for_each_entry(ha, &list->list, list) {
+ if (ether_addr_equal(ha->addr, addr)) {
+ netif_addr_unlock_bh(netdev);
+ return true;
+ }
+ }
+ netif_addr_unlock_bh(netdev);
+ return false;
+}
+
+static inline int evb_port_fdb_prep(struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 *vid,
+ bool del)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct evb_priv *evb_priv = port_priv->evb_priv;
+
+ *vid = 0;
+
+ if (evb_priv->attr.method != DPDMUX_METHOD_MAC &&
+ evb_priv->attr.method != DPDMUX_METHOD_C_VLAN_MAC) {
+ netdev_err(netdev,
+ "EVB mode does not support MAC classification\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* check if the address is configured on this port */
+ if (_lookup_address(netdev, addr)) {
+ if (!del)
+ return -EEXIST;
+ } else {
+ if (del)
+ return -ENOENT;
+ }
+
+ if (tb[NDA_VLAN] && evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
+ if (nla_len(tb[NDA_VLAN]) != sizeof(unsigned short)) {
+ netdev_err(netdev, "invalid vlan size %d\n",
+ nla_len(tb[NDA_VLAN]));
+ return -EINVAL;
+ }
+
+ *vid = nla_get_u16(tb[NDA_VLAN]);
+
+ if (!*vid || *vid >= VLAN_VID_MASK) {
+ netdev_err(netdev, "invalid vid value 0x%04x\n", *vid);
+ return -EINVAL;
+ }
+ } else if (evb_priv->attr.method == DPDMUX_METHOD_C_VLAN_MAC) {
+ netdev_err(netdev,
+ "EVB mode requires explicit VLAN configuration\n");
+ return -EINVAL;
+ } else if (tb[NDA_VLAN]) {
+ netdev_warn(netdev, "VLAN not supported, argument ignored\n");
+ }
+
+ return 0;
+}
+
+static int evb_port_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 vid, u16 flags,
+ struct netlink_ext_ack *extack)
+{
+ u16 _vid;
+ int err;
+
+ /* TODO: add replace support when added to iproute bridge */
+ if (!(flags & NLM_F_REQUEST)) {
+ netdev_err(netdev,
+ "evb_port_fdb_add unexpected flags value %08x\n",
+ flags);
+ return -EINVAL;
+ }
+
+ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 0);
+ if (unlikely(err))
+ return err;
+
+ err = evb_port_add_rule(netdev, addr, _vid);
+ if (unlikely(err))
+ return err;
+
+ if (is_unicast_ether_addr(addr)) {
+ err = dev_uc_add(netdev, addr);
+ if (unlikely(err)) {
+ netdev_err(netdev, "dev_uc_add err %d\n", err);
+ return err;
+ }
+ } else {
+ err = dev_mc_add(netdev, addr);
+ if (unlikely(err)) {
+ netdev_err(netdev, "dev_mc_add err %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int evb_port_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *netdev,
+ const unsigned char *addr, u16 vid)
+{
+ u16 _vid;
+ int err;
+
+ err = evb_port_fdb_prep(tb, netdev, addr, &_vid, 1);
+ if (unlikely(err))
+ return err;
+
+ err = evb_port_del_rule(netdev, addr, _vid);
+ if (unlikely(err))
+ return err;
+
+ if (is_unicast_ether_addr(addr)) {
+ err = dev_uc_del(netdev, addr);
+ if (unlikely(err)) {
+ netdev_err(netdev, "dev_uc_del err %d\n", err);
+ return err;
+ }
+ } else {
+ err = dev_mc_del(netdev, addr);
+ if (unlikely(err)) {
+ netdev_err(netdev, "dev_mc_del err %d\n", err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int evb_change_mtu(struct net_device *netdev,
+ int mtu)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct evb_priv *evb_priv = port_priv->evb_priv;
+ struct list_head *pos;
+ int err = 0;
+
+ /* This operation is not permitted on downlinks */
+ if (port_priv->port_index > 0)
+ return -EPERM;
+
+ err = dpdmux_set_max_frame_length(evb_priv->mc_io,
+ 0,
+ evb_priv->mux_handle,
+ (uint16_t)(mtu + VLAN_ETH_HLEN));
+
+ if (unlikely(err)) {
+ netdev_err(netdev, "dpdmux_ul_set_max_frame_length err %d\n",
+ err);
+ return err;
+ }
+
+ /* Update the max frame length for downlinks */
+ list_for_each(pos, &evb_priv->port_list) {
+ port_priv = list_entry(pos, struct evb_port_priv, list);
+ port_priv->netdev->mtu = mtu;
+ }
+
+ netdev->mtu = mtu;
+ return 0;
+}
+
+static const struct nla_policy ifla_br_policy[IFLA_MAX + 1] = {
+ [IFLA_BRIDGE_FLAGS] = { .type = NLA_U16 },
+ [IFLA_BRIDGE_MODE] = { .type = NLA_U16 },
+ [IFLA_BRIDGE_VLAN_INFO] = { .type = NLA_BINARY,
+ .len = sizeof(struct bridge_vlan_info), },
+};
+
+static int evb_setlink_af_spec(struct net_device *netdev,
+ struct nlattr **tb)
+{
+ struct bridge_vlan_info *vinfo;
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ int err = 0;
+
+ if (!tb[IFLA_BRIDGE_VLAN_INFO]) {
+ netdev_err(netdev, "no VLAN INFO in nlmsg\n");
+ return -EOPNOTSUPP;
+ }
+
+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
+
+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
+ return -EINVAL;
+
+ err = evb_port_add_rule(netdev, NULL, vinfo->vid);
+ if (unlikely(err))
+ return err;
+
+ port_priv->vlans[vinfo->vid] = 1;
+
+ return 0;
+}
+
+static int evb_setlink(struct net_device *netdev,
+ struct nlmsghdr *nlh,
+ u16 flags,
+ struct netlink_ext_ack *extack)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct evb_priv *evb_priv = port_priv->evb_priv;
+ struct nlattr *attr;
+ struct nlattr *tb[(IFLA_BRIDGE_MAX > IFLA_BRPORT_MAX) ?
+ IFLA_BRIDGE_MAX : IFLA_BRPORT_MAX + 1];
+ int err = 0;
+
+ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
+ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
+ netdev_err(netdev,
+ "EVB mode does not support VLAN only classification\n");
+ return -EOPNOTSUPP;
+ }
+
+ attr = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (attr) {
+ err = nla_parse_nested_deprecated(tb, IFLA_BRIDGE_MAX, attr,
+ ifla_br_policy, NULL);
+ if (unlikely(err)) {
+ netdev_err(netdev,
+ "nla_parse_nested for br_policy err %d\n",
+ err);
+ return err;
+ }
+
+ err = evb_setlink_af_spec(netdev, tb);
+ return err;
+ }
+
+ netdev_err(netdev, "nlmsg_find_attr found no AF_SPEC\n");
+ return -EOPNOTSUPP;
+}
+
+static int __nla_put_netdev(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct evb_priv *evb_priv = port_priv->evb_priv;
+ u8 operstate = netif_running(netdev) ?
+ netdev->operstate : IF_OPER_DOWN;
+ int iflink;
+ int err;
+
+ err = nla_put_string(skb, IFLA_IFNAME, netdev->name);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u32(skb, IFLA_MASTER, evb_priv->uplink.netdev->ifindex);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u32(skb, IFLA_MTU, netdev->mtu);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u8(skb, IFLA_OPERSTATE, operstate);
+ if (unlikely(err))
+ goto nla_put_err;
+ if (netdev->addr_len) {
+ err = nla_put(skb, IFLA_ADDRESS, netdev->addr_len,
+ netdev->dev_addr);
+ if (unlikely(err))
+ goto nla_put_err;
+ }
+
+ iflink = dev_get_iflink(netdev);
+ if (netdev->ifindex != iflink) {
+ err = nla_put_u32(skb, IFLA_LINK, iflink);
+ if (unlikely(err))
+ goto nla_put_err;
+ }
+
+ return 0;
+
+nla_put_err:
+ netdev_err(netdev, "nla_put_ err %d\n", err);
+ return err;
+}
+
+static int __nla_put_port(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nlattr *nest;
+ int err;
+
+ nest = nla_nest_start_noflag(skb, IFLA_PROTINFO | NLA_F_NESTED);
+ if (!nest) {
+ netdev_err(netdev, "nla_nest_start failed\n");
+ return -ENOMEM;
+ }
+
+ err = nla_put_u8(skb, IFLA_BRPORT_STATE, BR_STATE_FORWARDING);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u16(skb, IFLA_BRPORT_PRIORITY, 0);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u32(skb, IFLA_BRPORT_COST, 0);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u8(skb, IFLA_BRPORT_MODE, 0);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u8(skb, IFLA_BRPORT_GUARD, 0);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u8(skb, IFLA_BRPORT_PROTECT, 0);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE, 0);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u8(skb, IFLA_BRPORT_LEARNING, 0);
+ if (unlikely(err))
+ goto nla_put_err;
+ err = nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD, 1);
+ if (unlikely(err))
+ goto nla_put_err;
+ nla_nest_end(skb, nest);
+
+ return 0;
+
+nla_put_err:
+ netdev_err(netdev, "nla_put_ err %d\n", err);
+ nla_nest_cancel(skb, nest);
+ return err;
+}
+
+static int __nla_put_vlan(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct nlattr *nest;
+ struct bridge_vlan_info vinfo;
+ const u8 *vlans = port_priv->vlans;
+ u16 i;
+ int err;
+
+ nest = nla_nest_start_noflag(skb, IFLA_AF_SPEC);
+ if (!nest) {
+ netdev_err(netdev, "nla_nest_start failed");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < VLAN_VID_MASK + 1; i++) {
+ if (!vlans[i])
+ continue;
+
+ vinfo.flags = 0;
+ vinfo.vid = i;
+
+ err = nla_put(skb, IFLA_BRIDGE_VLAN_INFO,
+ sizeof(vinfo), &vinfo);
+ if (unlikely(err))
+ goto nla_put_err;
+ }
+
+ nla_nest_end(skb, nest);
+
+ return 0;
+
+nla_put_err:
+ netdev_err(netdev, "nla_put_ err %d\n", err);
+ nla_nest_cancel(skb, nest);
+ return err;
+}
+
+static int evb_getlink(struct sk_buff *skb, u32 pid, u32 seq,
+ struct net_device *netdev, u32 filter_mask, int nlflags)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct evb_priv *evb_priv = port_priv->evb_priv;
+ struct ifinfomsg *hdr;
+ struct nlmsghdr *nlh;
+ int err;
+
+ if (evb_priv->attr.method != DPDMUX_METHOD_C_VLAN &&
+ evb_priv->attr.method != DPDMUX_METHOD_S_VLAN) {
+ return 0;
+ }
+
+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*hdr), NLM_F_MULTI);
+ if (!nlh)
+ return -EMSGSIZE;
+
+ hdr = nlmsg_data(nlh);
+ memset(hdr, 0, sizeof(*hdr));
+ hdr->ifi_family = AF_BRIDGE;
+ hdr->ifi_type = netdev->type;
+ hdr->ifi_index = netdev->ifindex;
+ hdr->ifi_flags = dev_get_flags(netdev);
+
+ err = __nla_put_netdev(skb, netdev);
+ if (unlikely(err))
+ goto nla_put_err;
+
+ err = __nla_put_port(skb, netdev);
+ if (unlikely(err))
+ goto nla_put_err;
+
+ /* Check if the VID information is requested */
+ if (filter_mask & RTEXT_FILTER_BRVLAN) {
+ err = __nla_put_vlan(skb, netdev);
+ if (unlikely(err))
+ goto nla_put_err;
+ }
+
+ nlmsg_end(skb, nlh);
+ return skb->len;
+
+nla_put_err:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int evb_dellink(struct net_device *netdev,
+ struct nlmsghdr *nlh,
+ u16 flags)
+{
+ struct nlattr *tb[IFLA_BRIDGE_MAX + 1];
+ struct nlattr *spec;
+ struct bridge_vlan_info *vinfo;
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ int err = 0;
+
+ spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
+ if (!spec)
+ return 0;
+
+ err = nla_parse_nested_deprecated(tb, IFLA_BRIDGE_MAX, spec,
+ ifla_br_policy, NULL);
+ if (unlikely(err))
+ return err;
+
+ if (!tb[IFLA_BRIDGE_VLAN_INFO])
+ return -EOPNOTSUPP;
+
+ vinfo = nla_data(tb[IFLA_BRIDGE_VLAN_INFO]);
+
+ if (!vinfo->vid || vinfo->vid > VLAN_VID_MASK)
+ return -EINVAL;
+
+ err = evb_port_del_rule(netdev, NULL, vinfo->vid);
+ if (unlikely(err)) {
+ netdev_err(netdev, "evb_port_del_rule err %d\n", err);
+ return err;
+ }
+ port_priv->vlans[vinfo->vid] = 0;
+
+ return 0;
+}
+
+void evb_port_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ u64 tmp;
+ int err;
+
+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ DPDMUX_CNT_ING_FRAME, &storage->rx_packets);
+ if (unlikely(err))
+ goto error;
+
+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ DPDMUX_CNT_ING_BYTE, &storage->rx_bytes);
+ if (unlikely(err))
+ goto error;
+
+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ DPDMUX_CNT_ING_FLTR_FRAME, &tmp);
+ if (unlikely(err))
+ goto error;
+
+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ DPDMUX_CNT_ING_FRAME_DISCARD,
+ &storage->rx_dropped);
+ if (unlikely(err)) {
+ storage->rx_dropped = tmp;
+ goto error;
+ }
+ storage->rx_dropped += tmp;
+
+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ DPDMUX_CNT_ING_MCAST_FRAME,
+ &storage->multicast);
+ if (unlikely(err))
+ goto error;
+
+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ DPDMUX_CNT_EGR_FRAME, &storage->tx_packets);
+ if (unlikely(err))
+ goto error;
+
+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ DPDMUX_CNT_EGR_BYTE, &storage->tx_bytes);
+ if (unlikely(err))
+ goto error;
+
+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ DPDMUX_CNT_EGR_FRAME_DISCARD,
+ &storage->tx_dropped);
+ if (unlikely(err))
+ goto error;
+
+ return;
+
+error:
+ netdev_err(netdev, "dpdmux_if_get_counter err %d\n", err);
+}
+
+static const struct net_device_ops evb_port_ops = {
+ .ndo_open = &evb_port_open,
+
+ .ndo_start_xmit = &evb_dropframe,
+
+ .ndo_fdb_add = &evb_port_fdb_add,
+ .ndo_fdb_del = &evb_port_fdb_del,
+
+ .ndo_get_stats64 = &evb_port_get_stats,
+ .ndo_change_mtu = &evb_change_mtu,
+};
+
+static void evb_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ u16 version_major, version_minor;
+ int err;
+
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, evb_drv_version, sizeof(drvinfo->version));
+
+ err = dpdmux_get_api_version(port_priv->evb_priv->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (err)
+ strlcpy(drvinfo->fw_version, "N/A",
+ sizeof(drvinfo->fw_version));
+ else
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", version_major, version_minor);
+
+ strlcpy(drvinfo->bus_info, dev_name(netdev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int evb_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *link_settings)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct dpdmux_link_state state = {0};
+ int err = 0;
+
+ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ &state);
+ if (err) {
+ netdev_err(netdev, "ERROR %d getting link state", err);
+ goto out;
+ }
+
+ /* At the moment, we have no way of interrogating the DPMAC
+ * from the DPDMUX side or there may not exist a DPMAC at all.
+ * Report only autoneg state, duplexity and speed.
+ */
+ if (state.options & DPDMUX_LINK_OPT_AUTONEG)
+ link_settings->base.autoneg = AUTONEG_ENABLE;
+ if (!(state.options & DPDMUX_LINK_OPT_HALF_DUPLEX))
+ link_settings->base.duplex = DUPLEX_FULL;
+ link_settings->base.speed = state.rate;
+
+out:
+ return err;
+}
+
+static int evb_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *link_settings)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ struct dpdmux_link_state state = {0};
+ struct dpdmux_link_cfg cfg = {0};
+ int err = 0;
+
+ netdev_dbg(netdev, "Setting link parameters...");
+
+ err = dpdmux_if_get_link_state(port_priv->evb_priv->mc_io, 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ &state);
+ if (err) {
+ netdev_err(netdev, "ERROR %d getting link state", err);
+ goto out;
+ }
+
+ /* Due to a temporary MC limitation, the DPDMUX port must be down
+ * in order to be able to change link settings. Taking steps to let
+ * the user know that.
+ */
+ if (netif_running(netdev)) {
+ netdev_info(netdev,
+ "Sorry, interface must be brought down first.\n");
+ return -EACCES;
+ }
+
+ cfg.options = state.options;
+ cfg.rate = link_settings->base.speed;
+ if (link_settings->base.autoneg == AUTONEG_ENABLE)
+ cfg.options |= DPDMUX_LINK_OPT_AUTONEG;
+ else
+ cfg.options &= ~DPDMUX_LINK_OPT_AUTONEG;
+ if (link_settings->base.duplex == DUPLEX_HALF)
+ cfg.options |= DPDMUX_LINK_OPT_HALF_DUPLEX;
+ else
+ cfg.options &= ~DPDMUX_LINK_OPT_HALF_DUPLEX;
+
+ err = dpdmux_if_set_link_cfg(port_priv->evb_priv->mc_io, 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ &cfg);
+ if (err)
+ /* ethtool will be loud enough if we return an error; no point
+ * in putting our own error message on the console by default
+ */
+ netdev_dbg(netdev, "ERROR %d setting link cfg", err);
+
+out:
+ return err;
+}
+
+static struct {
+ enum dpdmux_counter_type id;
+ char name[ETH_GSTRING_LEN];
+} evb_ethtool_counters[] = {
+ {DPDMUX_CNT_ING_FRAME, "rx frames"},
+ {DPDMUX_CNT_ING_BYTE, "rx bytes"},
+ {DPDMUX_CNT_ING_FLTR_FRAME, "rx filtered frames"},
+ {DPDMUX_CNT_ING_FRAME_DISCARD, "rx discarded frames"},
+ {DPDMUX_CNT_ING_BCAST_FRAME, "rx b-cast frames"},
+ {DPDMUX_CNT_ING_BCAST_BYTES, "rx b-cast bytes"},
+ {DPDMUX_CNT_ING_MCAST_FRAME, "rx m-cast frames"},
+ {DPDMUX_CNT_ING_MCAST_BYTE, "rx m-cast bytes"},
+ {DPDMUX_CNT_EGR_FRAME, "tx frames"},
+ {DPDMUX_CNT_EGR_BYTE, "tx bytes"},
+ {DPDMUX_CNT_EGR_FRAME_DISCARD, "tx discarded frames"},
+};
+
+static int evb_ethtool_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(evb_ethtool_counters);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void evb_ethtool_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ u32 i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ evb_ethtool_counters[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static void evb_ethtool_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct evb_port_priv *port_priv = netdev_priv(netdev);
+ u32 i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(evb_ethtool_counters); i++) {
+ err = dpdmux_if_get_counter(port_priv->evb_priv->mc_io,
+ 0,
+ port_priv->evb_priv->mux_handle,
+ port_priv->port_index,
+ evb_ethtool_counters[i].id,
+ &data[i]);
+ if (err)
+ netdev_err(netdev, "dpdmux_if_get_counter[%s] err %d\n",
+ evb_ethtool_counters[i].name, err);
+ }
+}
+
+static const struct ethtool_ops evb_port_ethtool_ops = {
+ .get_drvinfo = &evb_get_drvinfo,
+ .get_link = &ethtool_op_get_link,
+ .get_link_ksettings = &evb_get_link_ksettings,
+ .set_link_ksettings = &evb_set_link_ksettings,
+ .get_strings = &evb_ethtool_get_strings,
+ .get_ethtool_stats = &evb_ethtool_get_stats,
+ .get_sset_count = &evb_ethtool_get_sset_count,
+};
+
+static int evb_open(struct net_device *netdev)
+{
+ struct evb_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ err = dpdmux_enable(priv->mc_io, 0, priv->mux_handle);
+ if (unlikely(err))
+ netdev_err(netdev, "dpdmux_enable err %d\n", err);
+
+ return err;
+}
+
+static int evb_close(struct net_device *netdev)
+{
+ struct evb_priv *priv = netdev_priv(netdev);
+ int err = 0;
+
+ err = dpdmux_disable(priv->mc_io, 0, priv->mux_handle);
+ if (unlikely(err))
+ netdev_err(netdev, "dpdmux_disable err %d\n", err);
+
+ return err;
+}
+
+static const struct net_device_ops evb_ops = {
+ .ndo_start_xmit = &evb_dropframe,
+ .ndo_open = &evb_open,
+ .ndo_stop = &evb_close,
+
+ .ndo_bridge_setlink = &evb_setlink,
+ .ndo_bridge_getlink = &evb_getlink,
+ .ndo_bridge_dellink = &evb_dellink,
+
+ .ndo_get_stats64 = &evb_port_get_stats,
+ .ndo_change_mtu = &evb_change_mtu,
+};
+
+static int evb_takedown(struct fsl_mc_device *evb_dev)
+{
+ struct device *dev = &evb_dev->dev;
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct evb_priv *priv = netdev_priv(netdev);
+ int err;
+
+ err = dpdmux_close(priv->mc_io, 0, priv->mux_handle);
+ if (unlikely(err))
+ dev_warn(dev, "dpdmux_close err %d\n", err);
+
+ return 0;
+}
+
+static int evb_init(struct fsl_mc_device *evb_dev)
+{
+ struct device *dev = &evb_dev->dev;
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct evb_priv *priv = netdev_priv(netdev);
+ u16 version_major;
+ u16 version_minor;
+ int err = 0;
+
+ priv->dev_id = evb_dev->obj_desc.id;
+
+ err = dpdmux_open(priv->mc_io, 0, priv->dev_id, &priv->mux_handle);
+ if (unlikely(err)) {
+ dev_err(dev, "dpdmux_open err %d\n", err);
+ goto err_exit;
+ }
+ if (!priv->mux_handle) {
+ dev_err(dev, "dpdmux_open returned null handle but no error\n");
+ err = -EFAULT;
+ goto err_exit;
+ }
+
+ err = dpdmux_get_attributes(priv->mc_io, 0, priv->mux_handle,
+ &priv->attr);
+ if (unlikely(err)) {
+ dev_err(dev, "dpdmux_get_attributes err %d\n", err);
+ goto err_close;
+ }
+
+ err = dpdmux_get_api_version(priv->mc_io, 0,
+ &version_major,
+ &version_minor);
+ if (unlikely(err)) {
+ dev_err(dev, "dpdmux_get_api_version err %d\n", err);
+ goto err_close;
+ }
+
+ /* Minimum supported DPDMUX version check */
+ if (version_major < DPDMUX_MIN_VER_MAJOR ||
+ (version_major == DPDMUX_MIN_VER_MAJOR &&
+ version_minor < DPDMUX_MIN_VER_MINOR)) {
+ dev_err(dev, "DPDMUX version %d.%d not supported. Use %d.%d or greater.\n",
+ version_major, version_minor,
+ DPDMUX_MIN_VER_MAJOR, DPDMUX_MIN_VER_MAJOR);
+ err = -ENOTSUPP;
+ goto err_close;
+ }
+
+ err = dpdmux_reset(priv->mc_io, 0, priv->mux_handle);
+ if (unlikely(err)) {
+ dev_err(dev, "dpdmux_reset err %d\n", err);
+ goto err_close;
+ }
+
+ return 0;
+
+err_close:
+ dpdmux_close(priv->mc_io, 0, priv->mux_handle);
+err_exit:
+ return err;
+}
+
+static int evb_remove(struct fsl_mc_device *evb_dev)
+{
+ struct device *dev = &evb_dev->dev;
+ struct net_device *netdev = dev_get_drvdata(dev);
+ struct evb_priv *priv = netdev_priv(netdev);
+ struct evb_port_priv *port_priv;
+ struct list_head *pos;
+
+ list_for_each(pos, &priv->port_list) {
+ port_priv = list_entry(pos, struct evb_port_priv, list);
+
+ rtnl_lock();
+ netdev_upper_dev_unlink(port_priv->netdev, netdev);
+ rtnl_unlock();
+
+ unregister_netdev(port_priv->netdev);
+ free_netdev(port_priv->netdev);
+ }
+
+ evb_teardown_irqs(evb_dev);
+
+ unregister_netdev(netdev);
+
+ evb_takedown(evb_dev);
+ fsl_mc_portal_free(priv->mc_io);
+
+ dev_set_drvdata(dev, NULL);
+ free_netdev(netdev);
+
+ return 0;
+}
+
+static int evb_probe(struct fsl_mc_device *evb_dev)
+{
+ struct device *dev;
+ struct evb_priv *priv = NULL;
+ struct net_device *netdev = NULL;
+ char port_name[IFNAMSIZ];
+ int i;
+ int err = 0;
+
+ dev = &evb_dev->dev;
+
+ /* register switch device, it's for management only - no I/O */
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ return -ENOMEM;
+ }
+ netdev->netdev_ops = &evb_ops;
+
+ dev_set_drvdata(dev, netdev);
+
+ priv = netdev_priv(netdev);
+
+ err = fsl_mc_portal_allocate(evb_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &priv->mc_io);
+ if (err) {
+ if (err == -ENXIO)
+ err = -EPROBE_DEFER;
+ else
+ dev_err(dev, "fsl_mc_portal_allocate err %d\n", err);
+ goto err_free_netdev;
+ }
+
+ if (!priv->mc_io) {
+ dev_err(dev, "fsl_mc_portal_allocate returned null handle but no error\n");
+ err = -EFAULT;
+ goto err_free_netdev;
+ }
+
+ err = evb_init(evb_dev);
+ if (unlikely(err)) {
+ dev_err(dev, "evb init err %d\n", err);
+ goto err_free_cmdport;
+ }
+
+ INIT_LIST_HEAD(&priv->port_list);
+ netdev->flags |= IFF_PROMISC | IFF_MASTER;
+
+ dev_alloc_name(netdev, "evb%d");
+
+ /* register switch ports */
+ snprintf(port_name, IFNAMSIZ, "%sp%%d", netdev->name);
+
+ /* only register downlinks? */
+ for (i = 0; i < priv->attr.num_ifs + 1; i++) {
+ struct net_device *port_netdev;
+ struct evb_port_priv *port_priv;
+
+ if (i) {
+ port_netdev =
+ alloc_etherdev(sizeof(struct evb_port_priv));
+ if (!port_netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ goto err_takedown;
+ }
+
+ port_priv = netdev_priv(port_netdev);
+
+ port_netdev->flags |= IFF_PROMISC | IFF_SLAVE;
+
+ dev_alloc_name(port_netdev, port_name);
+ } else {
+ port_netdev = netdev;
+ port_priv = &priv->uplink;
+ }
+
+ port_priv->netdev = port_netdev;
+ port_priv->evb_priv = priv;
+ port_priv->port_index = i;
+
+ SET_NETDEV_DEV(port_netdev, dev);
+
+ if (i) {
+ port_netdev->netdev_ops = &evb_port_ops;
+
+ err = register_netdev(port_netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev err %d\n", err);
+ free_netdev(port_netdev);
+ goto err_takedown;
+ }
+
+ rtnl_lock();
+ err = netdev_master_upper_dev_link(port_netdev, netdev,
+ NULL, NULL, NULL);
+ if (unlikely(err)) {
+ dev_err(dev, "netdev_master_upper_dev_link err %d\n",
+ err);
+ unregister_netdev(port_netdev);
+ free_netdev(port_netdev);
+ rtnl_unlock();
+ goto err_takedown;
+ }
+ rtmsg_ifinfo(RTM_NEWLINK, port_netdev,
+ IFF_SLAVE, GFP_KERNEL);
+ rtnl_unlock();
+
+ list_add(&port_priv->list, &priv->port_list);
+ } else {
+ /* Set MTU limits only on uplink */
+ port_netdev->min_mtu = EVB_MIN_MTU;
+ port_netdev->max_mtu = EVB_MAX_MTU;
+
+ err = register_netdev(netdev);
+
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
+ goto err_takedown;
+ }
+ }
+
+ port_netdev->ethtool_ops = &evb_port_ethtool_ops;
+
+ /* ports are up from init */
+ rtnl_lock();
+ err = dev_open(port_netdev, NULL);
+ rtnl_unlock();
+ if (unlikely(err))
+ dev_warn(dev, "dev_open err %d\n", err);
+ }
+
+ /* setup irqs */
+ err = evb_setup_irqs(evb_dev);
+ if (unlikely(err)) {
+ dev_warn(dev, "evb_setup_irqs err %d\n", err);
+ goto err_takedown;
+ }
+
+ dev_info(dev, "probed evb device with %d ports\n",
+ priv->attr.num_ifs);
+ return 0;
+
+err_takedown:
+ evb_remove(evb_dev);
+err_free_cmdport:
+ fsl_mc_portal_free(priv->mc_io);
+err_free_netdev:
+ return err;
+}
+
+static const struct fsl_mc_device_id evb_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpdmux",
+ },
+ {}
+};
+
+static struct fsl_mc_driver evb_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = evb_probe,
+ .remove = evb_remove,
+ .match_id_table = evb_match_id_table,
+};
+
+module_fsl_mc_driver(evb_drv);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Layerscape DPAA Edge Virtual Bridge driver (prototype)");
diff --git a/drivers/staging/fsl-dpaa2/mac/Kconfig b/drivers/staging/fsl-dpaa2/mac/Kconfig
new file mode 100644
index 000000000000..c94f7c1b10d5
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/Kconfig
@@ -0,0 +1,23 @@
+config FSL_DPAA2_MAC
+ tristate "DPAA2 MAC / PHY interface"
+ depends on FSL_MC_BUS && FSL_DPAA2
+ select MDIO_BUS_MUX_MMIOREG
+ select FSL_XGMAC_MDIO
+ select FIXED_PHY
+ ---help---
+ Prototype driver for DPAA2 MAC / PHY interface object.
+ This driver works as a proxy between phylib including phy drivers and
+ the MC firmware. It receives updates on link state changes from PHY
+ lib and forwards them to MC and receives interrupt from MC whenever
+ a request is made to change the link state.
+
+
+config FSL_DPAA2_MAC_NETDEVS
+ bool "Expose net interfaces for PHYs"
+ default n
+ depends on FSL_DPAA2_MAC
+ ---help---
+ Exposes macX net interfaces which allow direct control over MACs and
+ PHYs.
+ .
+ Leave disabled if unsure.
diff --git a/drivers/staging/fsl-dpaa2/mac/Makefile b/drivers/staging/fsl-dpaa2/mac/Makefile
new file mode 100644
index 000000000000..bda94101b7c1
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/Makefile
@@ -0,0 +1,10 @@
+
+obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o
+
+dpaa2-mac-objs := mac.o dpmac.o
+
+all:
+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
+
+clean:
+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
new file mode 100644
index 000000000000..eff4bc1c0bd4
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h
@@ -0,0 +1,196 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _FSL_DPMAC_CMD_H
+#define _FSL_DPMAC_CMD_H
+
+/* DPMAC Version */
+#define DPMAC_VER_MAJOR 4
+#define DPMAC_VER_MINOR 2
+#define DPMAC_CMD_BASE_VERSION 1
+#define DPMAC_CMD_2ND_VERSION 2
+#define DPMAC_CMD_ID_OFFSET 4
+
+#define DPMAC_CMD(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_BASE_VERSION)
+#define DPMAC_CMD_V2(id) (((id) << DPMAC_CMD_ID_OFFSET) | DPMAC_CMD_2ND_VERSION)
+
+/* Command IDs */
+#define DPMAC_CMDID_CLOSE DPMAC_CMD(0x800)
+#define DPMAC_CMDID_OPEN DPMAC_CMD(0x80c)
+#define DPMAC_CMDID_CREATE DPMAC_CMD(0x90c)
+#define DPMAC_CMDID_DESTROY DPMAC_CMD(0x98c)
+#define DPMAC_CMDID_GET_API_VERSION DPMAC_CMD(0xa0c)
+
+#define DPMAC_CMDID_GET_ATTR DPMAC_CMD(0x004)
+#define DPMAC_CMDID_RESET DPMAC_CMD(0x005)
+
+#define DPMAC_CMDID_SET_IRQ_ENABLE DPMAC_CMD(0x012)
+#define DPMAC_CMDID_GET_IRQ_ENABLE DPMAC_CMD(0x013)
+#define DPMAC_CMDID_SET_IRQ_MASK DPMAC_CMD(0x014)
+#define DPMAC_CMDID_GET_IRQ_MASK DPMAC_CMD(0x015)
+#define DPMAC_CMDID_GET_IRQ_STATUS DPMAC_CMD(0x016)
+#define DPMAC_CMDID_CLEAR_IRQ_STATUS DPMAC_CMD(0x017)
+
+#define DPMAC_CMDID_GET_LINK_CFG DPMAC_CMD(0x0c2)
+#define DPMAC_CMDID_GET_LINK_CFG_V2 DPMAC_CMD_V2(0x0c2)
+#define DPMAC_CMDID_SET_LINK_STATE DPMAC_CMD(0x0c3)
+#define DPMAC_CMDID_SET_LINK_STATE_V2 DPMAC_CMD_V2(0x0c3)
+#define DPMAC_CMDID_GET_COUNTER DPMAC_CMD(0x0c4)
+
+#define DPMAC_CMDID_SET_PORT_MAC_ADDR DPMAC_CMD(0x0c5)
+
+/* Macros for accessing command fields smaller than 1byte */
+#define DPMAC_MASK(field) \
+ GENMASK(DPMAC_##field##_SHIFT + DPMAC_##field##_SIZE - 1, \
+ DPMAC_##field##_SHIFT)
+#define dpmac_set_field(var, field, val) \
+ ((var) |= (((val) << DPMAC_##field##_SHIFT) & DPMAC_MASK(field)))
+#define dpmac_get_field(var, field) \
+ (((var) & DPMAC_MASK(field)) >> DPMAC_##field##_SHIFT)
+
+struct dpmac_cmd_open {
+ u32 dpmac_id;
+};
+
+struct dpmac_cmd_create {
+ u32 mac_id;
+};
+
+struct dpmac_cmd_destroy {
+ u32 dpmac_id;
+};
+
+struct dpmac_cmd_set_irq_enable {
+ u8 enable;
+ u8 pad[3];
+ u8 irq_index;
+};
+
+struct dpmac_cmd_get_irq_enable {
+ u32 pad;
+ u8 irq_index;
+};
+
+struct dpmac_rsp_get_irq_enable {
+ u8 enabled;
+};
+
+struct dpmac_cmd_set_irq_mask {
+ u32 mask;
+ u8 irq_index;
+};
+
+struct dpmac_cmd_get_irq_mask {
+ u32 pad;
+ u8 irq_index;
+};
+
+struct dpmac_rsp_get_irq_mask {
+ u32 mask;
+};
+
+struct dpmac_cmd_get_irq_status {
+ u32 status;
+ u8 irq_index;
+};
+
+struct dpmac_rsp_get_irq_status {
+ u32 status;
+};
+
+struct dpmac_cmd_clear_irq_status {
+ u32 status;
+ u8 irq_index;
+};
+
+struct dpmac_rsp_get_attributes {
+ u8 eth_if;
+ u8 link_type;
+ u16 id;
+ u32 max_rate;
+};
+
+struct dpmac_rsp_get_link_cfg {
+ u64 options;
+ u32 rate;
+};
+
+struct dpmac_rsp_get_link_cfg_v2 {
+ u64 options;
+ u32 rate;
+ u32 pad;
+ u64 advertising;
+};
+
+#define DPMAC_STATE_SIZE 1
+#define DPMAC_STATE_SHIFT 0
+#define DPMAC_STATE_VALID_SIZE 1
+#define DPMAC_STATE_VALID_SHIFT 1
+
+struct dpmac_cmd_set_link_state {
+ u64 options;
+ u32 rate;
+ u32 pad;
+ /* only least significant bit is valid */
+ u8 up;
+};
+
+struct dpmac_cmd_set_link_state_v2 {
+ u64 options;
+ u32 rate;
+ u32 pad0;
+ /* from lsb: up:1, state_valid:1 */
+ u8 state;
+ u8 pad1[7];
+ u64 supported;
+ u64 advertising;
+};
+
+struct dpmac_cmd_get_counter {
+ u8 type;
+};
+
+struct dpmac_rsp_get_counter {
+ u64 pad;
+ u64 counter;
+};
+
+struct dpmac_rsp_get_api_version {
+ u16 major;
+ u16 minor;
+};
+
+struct dpmac_cmd_set_port_mac_addr {
+ u8 pad[2];
+ u8 addr[6];
+};
+
+#endif /* _FSL_DPMAC_CMD_H */
diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.c b/drivers/staging/fsl-dpaa2/mac/dpmac.c
new file mode 100644
index 000000000000..c37cbc13b57b
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c
@@ -0,0 +1,689 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/fsl/mc.h>
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+/**
+ * dpmac_open() - Open a control session for the specified object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @dpmac_id: DPMAC unique ID
+ * @token: Returned token; use in subsequent API calls
+ *
+ * This function can be used to open a control session for an
+ * already created object; an object may have been declared in
+ * the DPL or by calling the dpmac_create function.
+ * This function returns a unique authentication token,
+ * associated with the specific object ID and the specific MC
+ * portal; this token must be used in all subsequent commands for
+ * this specific object
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token)
+{
+ struct dpmac_cmd_open *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN,
+ cmd_flags,
+ 0);
+ cmd_params = (struct dpmac_cmd_open *)cmd.params;
+ cmd_params->dpmac_id = cpu_to_le32(dpmac_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *token = mc_cmd_hdr_read_token(&cmd);
+
+ return err;
+}
+
+/**
+ * dpmac_close() - Close the control session of the object
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ *
+ * After this function is called, no further operations are
+ * allowed on the object without opening a new control session.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token)
+{
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags,
+ token);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_create() - Create the DPMAC object.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @cfg: Configuration structure
+ * @obj_id: Returned object id
+ *
+ * Create the DPMAC object, allocate required resources and
+ * perform required initialization.
+ *
+ * The function accepts an authentication token of a parent
+ * container that this object should be assigned to. The token
+ * can be '0' so the object will be assigned to the default container.
+ * The newly created object can be opened with the returned
+ * object id and using the container's associated tokens and MC portals.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dpmac_cfg *cfg,
+ u32 *obj_id)
+{
+ struct dpmac_cmd_create *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpmac_cmd_create *)cmd.params;
+ cmd_params->mac_id = cpu_to_le32(cfg->mac_id);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ *obj_id = mc_cmd_read_object_id(&cmd);
+
+ return 0;
+}
+
+/**
+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @dprc_token: Parent container token; '0' for default container
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @object_id: The object id; it must be a valid id within the container that
+ * created this object;
+ *
+ * The function accepts the authentication token of the parent container that
+ * created the object (not the one that currently owns the object). The object
+ * is searched within parent using the provided 'object_id'.
+ * All tokens to the object must be closed before calling destroy.
+ *
+ * Return: '0' on Success; error code otherwise.
+ */
+int dpmac_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ u32 object_id)
+{
+ struct dpmac_cmd_destroy *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY,
+ cmd_flags,
+ dprc_token);
+ cmd_params = (struct dpmac_cmd_destroy *)cmd.params;
+ cmd_params->dpmac_id = cpu_to_le32(object_id);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_set_irq_enable() - Set overall interrupt state.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @irq_index: The interrupt index to configure
+ * @en: Interrupt state - enable = 1, disable = 0
+ *
+ * Allows GPP software to control when interrupts are generated.
+ * Each interrupt can have up to 32 causes. The enable/disable control's the
+ * overall interrupt state. if the interrupt is disabled no causes will cause
+ * an interrupt.
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en)
+{
+ struct dpmac_cmd_set_irq_enable *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_set_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+ cmd_params->enable = en;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_irq_enable() - Get overall interrupt state
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @irq_index: The interrupt index to configure
+ * @en: Returned interrupt state - enable = 1, disable = 0
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en)
+{
+ struct dpmac_cmd_get_irq_enable *cmd_params;
+ struct dpmac_rsp_get_irq_enable *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_get_irq_enable *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmac_rsp_get_irq_enable *)cmd.params;
+ *en = rsp_params->enabled;
+
+ return 0;
+}
+
+/**
+ * dpmac_set_irq_mask() - Set interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Event mask to trigger interrupt;
+ * each bit:
+ * 0 = ignore event
+ * 1 = consider event for asserting IRQ
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask)
+{
+ struct dpmac_cmd_set_irq_mask *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_set_irq_mask *)cmd.params;
+ cmd_params->mask = cpu_to_le32(mask);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_irq_mask() - Get interrupt mask.
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @irq_index: The interrupt index to configure
+ * @mask: Returned event mask to trigger interrupt
+ *
+ * Every interrupt can have up to 32 causes and the interrupt model supports
+ * masking/unmasking each cause independently
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask)
+{
+ struct dpmac_cmd_get_irq_mask *cmd_params;
+ struct dpmac_rsp_get_irq_mask *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_get_irq_mask *)cmd.params;
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmac_rsp_get_irq_mask *)cmd.params;
+ *mask = le32_to_cpu(rsp_params->mask);
+
+ return 0;
+}
+
+/**
+ * dpmac_get_irq_status() - Get the current status of any pending interrupts.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @irq_index: The interrupt index to configure
+ * @status: Returned interrupts status - one bit per cause:
+ * 0 = no interrupt pending
+ * 1 = interrupt pending
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status)
+{
+ struct dpmac_cmd_get_irq_status *cmd_params;
+ struct dpmac_rsp_get_irq_status *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_get_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(*status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmac_rsp_get_irq_status *)cmd.params;
+ *status = le32_to_cpu(rsp_params->status);
+
+ return 0;
+}
+
+/**
+ * dpmac_clear_irq_status() - Clear a pending interrupt's status
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @irq_index: The interrupt index to configure
+ * @status: Bits to clear (W1C) - one bit per cause:
+ * 0 = don't change
+ * 1 = clear status bit
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status)
+{
+ struct dpmac_cmd_clear_irq_status *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_clear_irq_status *)cmd.params;
+ cmd_params->status = cpu_to_le32(status);
+ cmd_params->irq_index = irq_index;
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_attributes - Retrieve DPMAC attributes.
+ *
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @attr: Returned object's attributes
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr)
+{
+ struct dpmac_rsp_get_attributes *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ rsp_params = (struct dpmac_rsp_get_attributes *)cmd.params;
+ attr->eth_if = rsp_params->eth_if;
+ attr->link_type = rsp_params->link_type;
+ attr->id = le16_to_cpu(rsp_params->id);
+ attr->max_rate = le32_to_cpu(rsp_params->max_rate);
+
+ return 0;
+}
+
+/**
+ * dpmac_get_link_cfg() - Get Ethernet link configuration
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @cfg: Returned structure with the link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_cfg *cfg)
+{
+ struct dpmac_rsp_get_link_cfg *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_link_cfg *)cmd.params;
+ cfg->options = le64_to_cpu(rsp_params->options);
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+
+ return 0;
+}
+
+/**
+ * dpmac_get_link_cfg_v2() - Get Ethernet link configuration
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @cfg: Returned structure with the link configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_link_cfg_v2(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_cfg *cfg)
+{
+ struct dpmac_rsp_get_link_cfg_v2 *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG_V2,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_link_cfg_v2 *)cmd.params;
+ cfg->options = le64_to_cpu(rsp_params->options);
+ cfg->rate = le32_to_cpu(rsp_params->rate);
+ cfg->advertising = le64_to_cpu(rsp_params->advertising);
+
+ return 0;
+}
+
+/**
+ * dpmac_set_link_state() - Set the Ethernet link status
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @link_state: Link state configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_set_link_state *)cmd.params;
+ cmd_params->options = cpu_to_le64(link_state->options);
+ cmd_params->rate = cpu_to_le32(link_state->rate);
+ dpmac_set_field(cmd_params->up, STATE, link_state->up);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_set_link_state_v2() - Set the Ethernet link status
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @link_state: Link state configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_set_link_state_v2(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state)
+{
+ struct dpmac_cmd_set_link_state_v2 *cmd_params;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE_V2,
+ cmd_flags,
+ token);
+ cmd_params = (struct dpmac_cmd_set_link_state_v2 *)cmd.params;
+ cmd_params->options = cpu_to_le64(link_state->options);
+ cmd_params->rate = cpu_to_le32(link_state->rate);
+ dpmac_set_field(cmd_params->state, STATE, link_state->up);
+ dpmac_set_field(cmd_params->state, STATE_VALID,
+ link_state->state_valid);
+ cmd_params->supported = cpu_to_le64(link_state->supported);
+ cmd_params->advertising = cpu_to_le64(link_state->advertising);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_counter() - Read a specific DPMAC counter
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @type: The requested counter
+ * @counter: Returned counter value
+ *
+ * Return: The requested counter; '0' otherwise.
+ */
+int dpmac_get_counter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpmac_counter type,
+ u64 *counter)
+{
+ struct dpmac_cmd_get_counter *dpmac_cmd;
+ struct dpmac_rsp_get_counter *dpmac_rsp;
+ struct fsl_mc_command cmd = { 0 };
+ int err = 0;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER,
+ cmd_flags,
+ token);
+ dpmac_cmd = (struct dpmac_cmd_get_counter *)cmd.params;
+ dpmac_cmd->type = type;
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ dpmac_rsp = (struct dpmac_rsp_get_counter *)cmd.params;
+ *counter = le64_to_cpu(dpmac_rsp->counter);
+
+ return 0;
+}
+
+/* untested */
+int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 addr[6])
+{
+ struct dpmac_cmd_set_port_mac_addr *dpmac_cmd;
+ struct fsl_mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_PORT_MAC_ADDR,
+ cmd_flags,
+ token);
+ dpmac_cmd = (struct dpmac_cmd_set_port_mac_addr *)cmd.params;
+ dpmac_cmd->addr[0] = addr[5];
+ dpmac_cmd->addr[1] = addr[4];
+ dpmac_cmd->addr[2] = addr[3];
+ dpmac_cmd->addr[3] = addr[2];
+ dpmac_cmd->addr[4] = addr[1];
+ dpmac_cmd->addr[5] = addr[0];
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpmac_get_api_version() - Get Data Path MAC version
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @major_ver: Major version of data path mac API
+ * @minor_ver: Minor version of data path mac API
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpmac_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver)
+{
+ struct dpmac_rsp_get_api_version *rsp_params;
+ struct fsl_mc_command cmd = { 0 };
+ int err;
+
+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_API_VERSION,
+ cmd_flags,
+ 0);
+
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ rsp_params = (struct dpmac_rsp_get_api_version *)cmd.params;
+ *major_ver = le16_to_cpu(rsp_params->major);
+ *minor_ver = le16_to_cpu(rsp_params->minor);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.h b/drivers/staging/fsl-dpaa2/mac/dpmac.h
new file mode 100644
index 000000000000..98ada210f047
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h
@@ -0,0 +1,379 @@
+/* Copyright 2013-2016 Freescale Semiconductor Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __FSL_DPMAC_H
+#define __FSL_DPMAC_H
+
+/* Data Path MAC API
+ * Contains initialization APIs and runtime control APIs for DPMAC
+ */
+
+struct fsl_mc_io;
+
+int dpmac_open(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ int dpmac_id,
+ u16 *token);
+
+int dpmac_close(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token);
+
+/**
+ * enum dpmac_link_type - DPMAC link type
+ * @DPMAC_LINK_TYPE_NONE: No link
+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type
+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID
+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type
+ */
+enum dpmac_link_type {
+ DPMAC_LINK_TYPE_NONE,
+ DPMAC_LINK_TYPE_FIXED,
+ DPMAC_LINK_TYPE_PHY,
+ DPMAC_LINK_TYPE_BACKPLANE
+};
+
+/**
+ * enum dpmac_eth_if - DPMAC Ethrnet interface
+ * @DPMAC_ETH_IF_MII: MII interface
+ * @DPMAC_ETH_IF_RMII: RMII interface
+ * @DPMAC_ETH_IF_SMII: SMII interface
+ * @DPMAC_ETH_IF_GMII: GMII interface
+ * @DPMAC_ETH_IF_RGMII: RGMII interface
+ * @DPMAC_ETH_IF_SGMII: SGMII interface
+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface
+ * @DPMAC_ETH_IF_XAUI: XAUI interface
+ * @DPMAC_ETH_IF_XFI: XFI interface
+ */
+enum dpmac_eth_if {
+ DPMAC_ETH_IF_MII,
+ DPMAC_ETH_IF_RMII,
+ DPMAC_ETH_IF_SMII,
+ DPMAC_ETH_IF_GMII,
+ DPMAC_ETH_IF_RGMII,
+ DPMAC_ETH_IF_SGMII,
+ DPMAC_ETH_IF_QSGMII,
+ DPMAC_ETH_IF_XAUI,
+ DPMAC_ETH_IF_XFI
+};
+
+/**
+ * struct dpmac_cfg - Structure representing DPMAC configuration
+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP,
+ * the MAC IDs are continuous.
+ * For example: 2 WRIOPs, 16 MACs in each:
+ * MAC IDs for the 1st WRIOP: 1-16,
+ * MAC IDs for the 2nd WRIOP: 17-32.
+ */
+struct dpmac_cfg {
+ u16 mac_id;
+};
+
+int dpmac_create(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ const struct dpmac_cfg *cfg,
+ u32 *obj_id);
+
+int dpmac_destroy(struct fsl_mc_io *mc_io,
+ u16 dprc_token,
+ u32 cmd_flags,
+ u32 object_id);
+
+/**
+ * DPMAC IRQ Index and Events
+ */
+
+/**
+ * IRQ index
+ */
+#define DPMAC_IRQ_INDEX 0
+/**
+ * IRQ event - indicates a change in link state
+ */
+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001
+/**
+ * IRQ event - Indicates that the link state changed
+ */
+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002
+/**
+ * IRQ event - Indicate if the phy needs to suspend or resume
+ */
+#define DPMAC_IRQ_EVENT_LINK_UP_REQ 0x00000004
+#define DPMAC_IRQ_EVENT_LINK_DOWN_REQ 0x00000008
+
+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 en);
+
+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u8 *en);
+
+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 mask);
+
+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *mask);
+
+int dpmac_get_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 *status);
+
+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ u8 irq_index,
+ u32 status);
+
+/**
+ * struct dpmac_attr - Structure representing DPMAC attributes
+ * @id: DPMAC object ID
+ * @max_rate: Maximum supported rate - in Mbps
+ * @eth_if: Ethernet interface
+ * @link_type: link type
+ */
+struct dpmac_attr {
+ u16 id;
+ u32 max_rate;
+ enum dpmac_eth_if eth_if;
+ enum dpmac_link_type link_type;
+};
+
+int dpmac_get_attributes(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_attr *attr);
+
+/**
+ * DPMAC link configuration/state options
+ */
+
+/**
+ * Enable auto-negotiation
+ */
+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL
+/**
+ * Enable half-duplex mode
+ */
+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL
+/**
+ * Enable pause frames
+ */
+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL
+/**
+ * Enable a-symmetric pause frames
+ */
+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
+
+/**
+ * Advertised link speeds
+ */
+#define DPMAC_ADVERTISED_10BASET_FULL 0x0000000000000001ULL
+#define DPMAC_ADVERTISED_100BASET_FULL 0x0000000000000002ULL
+#define DPMAC_ADVERTISED_1000BASET_FULL 0x0000000000000004ULL
+#define DPMAC_ADVERTISED_10000BASET_FULL 0x0000000000000010ULL
+#define DPMAC_ADVERTISED_2500BASEX_FULL 0x0000000000000020ULL
+
+/**
+ * Advertise auto-negotiation enable
+ */
+#define DPMAC_ADVERTISED_AUTONEG 0x0000000000000008ULL
+
+/**
+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration
+ * @rate: Link's rate - in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_cfg {
+ u32 rate;
+ u64 options;
+ u64 advertising;
+};
+
+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_cfg *cfg);
+
+int dpmac_get_link_cfg_v2(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_cfg *cfg);
+
+/**
+ * struct dpmac_link_state - DPMAC link configuration request
+ * @rate: Rate in Mbps
+ * @options: Enable/Disable DPMAC link cfg features (bitmap)
+ * @up: Link state
+ * @state_valid: Ignore/Update the state of the link
+ * @supported: Speeds capability of the phy (bitmap)
+ * @advertising: Speeds that are advertised for autoneg (bitmap)
+ */
+struct dpmac_link_state {
+ u32 rate;
+ u64 options;
+ int up;
+ int state_valid;
+ u64 supported;
+ u64 advertising;
+};
+
+int dpmac_set_link_state(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state);
+
+int dpmac_set_link_state_v2(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpmac_link_state *link_state);
+
+/**
+ * enum dpmac_counter - DPMAC counter types
+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad.
+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger
+ * (up to max frame length specified),
+ * good or bad.
+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received
+ * with a wrong CRC
+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length
+ * specified, with a bad frame check sequence.
+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors.
+ * Occurs when a receive FIFO overflows.
+ * Includes also frames truncated as a result of
+ * the receive FIFO overflow.
+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error
+ * (optional used for wrong SFD).
+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64
+ * bytes long with a good CRC.
+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length
+ * specified, with a good frame check sequence.
+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC)
+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted
+ * (regular and PFC).
+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid
+ * frames and valid pause frames.
+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames.
+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames.
+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received.
+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames.
+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error
+ * (except for undersized/fragment frame).
+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid
+ * frames and valid pause frames transmitted.
+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames.
+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames.
+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames.
+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error.
+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including
+ * pause frames.
+ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including
+ * pause frames.
+ */
+enum dpmac_counter {
+ DPMAC_CNT_ING_FRAME_64,
+ DPMAC_CNT_ING_FRAME_127,
+ DPMAC_CNT_ING_FRAME_255,
+ DPMAC_CNT_ING_FRAME_511,
+ DPMAC_CNT_ING_FRAME_1023,
+ DPMAC_CNT_ING_FRAME_1518,
+ DPMAC_CNT_ING_FRAME_1519_MAX,
+ DPMAC_CNT_ING_FRAG,
+ DPMAC_CNT_ING_JABBER,
+ DPMAC_CNT_ING_FRAME_DISCARD,
+ DPMAC_CNT_ING_ALIGN_ERR,
+ DPMAC_CNT_EGR_UNDERSIZED,
+ DPMAC_CNT_ING_OVERSIZED,
+ DPMAC_CNT_ING_VALID_PAUSE_FRAME,
+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME,
+ DPMAC_CNT_ING_BYTE,
+ DPMAC_CNT_ING_MCAST_FRAME,
+ DPMAC_CNT_ING_BCAST_FRAME,
+ DPMAC_CNT_ING_ALL_FRAME,
+ DPMAC_CNT_ING_UCAST_FRAME,
+ DPMAC_CNT_ING_ERR_FRAME,
+ DPMAC_CNT_EGR_BYTE,
+ DPMAC_CNT_EGR_MCAST_FRAME,
+ DPMAC_CNT_EGR_BCAST_FRAME,
+ DPMAC_CNT_EGR_UCAST_FRAME,
+ DPMAC_CNT_EGR_ERR_FRAME,
+ DPMAC_CNT_ING_GOOD_FRAME,
+ DPMAC_CNT_ENG_GOOD_FRAME
+};
+
+int dpmac_get_counter(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ enum dpmac_counter type,
+ u64 *counter);
+
+/**
+ * dpmac_set_port_mac_addr() - Set a MAC address associated with the physical
+ * port. This is not used for filtering, MAC is always in
+ * promiscuous mode, it is passed to DPNIs through DPNI API for
+ * application used.
+ * @mc_io: Pointer to opaque I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPMAC object
+ * @addr: MAC address to set
+ *
+ * Return: The requested counter; '0' otherwise.
+ */
+int dpmac_set_port_mac_addr(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ const u8 addr[6]);
+
+int dpmac_get_api_version(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 *major_ver,
+ u16 *minor_ver);
+
+#endif /* __FSL_DPMAC_H */
diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c
new file mode 100644
index 000000000000..65cbdbe7f313
--- /dev/null
+++ b/drivers/staging/fsl-dpaa2/mac/mac.c
@@ -0,0 +1,819 @@
+/* Copyright 2015 Freescale Semiconductor Inc.
+ * Copyright 2018-2019 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/msi.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_vlan.h>
+
+#include <uapi/linux/if_bridge.h>
+#include <net/netlink.h>
+
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+
+#include <linux/fsl/mc.h>
+
+#include "dpmac.h"
+#include "dpmac-cmd.h"
+
+struct dpaa2_mac_priv {
+ struct net_device *netdev;
+ struct fsl_mc_device *mc_dev;
+ struct dpmac_attr attr;
+ struct dpmac_link_state old_state;
+ u16 dpmac_ver_major;
+ u16 dpmac_ver_minor;
+};
+
+/* TODO: fix the 10G modes, mapping can't be right:
+ * XGMII is paralel
+ * XAUI is serial, using 8b/10b encoding
+ * XFI is also serial but using 64b/66b encoding
+ * they can't all map to XGMII...
+ *
+ * This must be kept in sync with enum dpmac_eth_if.
+ */
+static phy_interface_t dpaa2_mac_iface_mode[] = {
+ PHY_INTERFACE_MODE_MII, /* DPMAC_ETH_IF_MII */
+ PHY_INTERFACE_MODE_RMII, /* DPMAC_ETH_IF_RMII */
+ PHY_INTERFACE_MODE_SMII, /* DPMAC_ETH_IF_SMII */
+ PHY_INTERFACE_MODE_GMII, /* DPMAC_ETH_IF_GMII */
+ PHY_INTERFACE_MODE_RGMII, /* DPMAC_ETH_IF_RGMII */
+ PHY_INTERFACE_MODE_SGMII, /* DPMAC_ETH_IF_SGMII */
+ PHY_INTERFACE_MODE_QSGMII, /* DPMAC_ETH_IF_QSGMII */
+ PHY_INTERFACE_MODE_XAUI, /* DPMAC_ETH_IF_XAUI */
+ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_XFI */
+ PHY_INTERFACE_MODE_XGMII, /* DPMAC_ETH_IF_CAUI */
+ PHY_INTERFACE_MODE_1000BASEX, /* DPMAC_ETH_IF_1000BASEX */
+ PHY_INTERFACE_MODE_USXGMII, /* DPMAC_ETH_IF_USXGMII */
+};
+
+static int cmp_dpmac_ver(struct dpaa2_mac_priv *priv,
+ u16 ver_major, u16 ver_minor)
+{
+ if (priv->dpmac_ver_major == ver_major)
+ return priv->dpmac_ver_minor - ver_minor;
+ return priv->dpmac_ver_major - ver_major;
+}
+
+#define DPMAC_LINK_AUTONEG_VER_MAJOR 4
+#define DPMAC_LINK_AUTONEG_VER_MINOR 3
+
+struct dpaa2_mac_link_mode_map {
+ u64 dpmac_lm;
+ u64 ethtool_lm;
+};
+
+static const struct dpaa2_mac_link_mode_map dpaa2_mac_lm_map[] = {
+ {DPMAC_ADVERTISED_10BASET_FULL, ETHTOOL_LINK_MODE_10baseT_Full_BIT},
+ {DPMAC_ADVERTISED_100BASET_FULL, ETHTOOL_LINK_MODE_100baseT_Full_BIT},
+ {DPMAC_ADVERTISED_1000BASET_FULL, ETHTOOL_LINK_MODE_1000baseT_Full_BIT},
+ {DPMAC_ADVERTISED_10000BASET_FULL, ETHTOOL_LINK_MODE_10000baseT_Full_BIT},
+ {DPMAC_ADVERTISED_2500BASEX_FULL, ETHTOOL_LINK_MODE_2500baseT_Full_BIT},
+ {DPMAC_ADVERTISED_AUTONEG, ETHTOOL_LINK_MODE_Autoneg_BIT},
+};
+
+static void link_mode_dpmac2phydev(u64 dpmac_lm, unsigned long *phydev_lm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_lm_map); i++) {
+ if (dpmac_lm & dpaa2_mac_lm_map[i].dpmac_lm)
+ linkmode_set_bit(dpaa2_mac_lm_map[i].ethtool_lm, phydev_lm);
+ }
+}
+
+static void link_mode_phydev2dpmac(unsigned long *phydev_lm, u64 *dpni_lm)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_lm_map); i++) {
+ if (linkmode_test_bit(dpaa2_mac_lm_map[i].ethtool_lm, phydev_lm))
+ *dpni_lm |= dpaa2_mac_lm_map[i].dpmac_lm;
+ }
+}
+
+static void dpaa2_mac_link_changed(struct net_device *netdev)
+{
+ struct phy_device *phydev;
+ struct dpmac_link_state state = { 0 };
+ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
+ int err;
+
+ /* the PHY just notified us of link state change */
+ phydev = netdev->phydev;
+
+ state.up = !!phydev->link;
+ if (phydev->link) {
+ state.rate = phydev->speed;
+
+ if (!phydev->duplex)
+ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX;
+ if (phydev->autoneg)
+ state.options |= DPMAC_LINK_OPT_AUTONEG;
+
+ if (phydev->pause && linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising))
+ state.options |= DPMAC_LINK_OPT_PAUSE;
+ if (phydev->pause && linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising))
+ state.options |= DPMAC_LINK_OPT_ASYM_PAUSE;
+
+ netif_carrier_on(netdev);
+ } else {
+ netif_carrier_off(netdev);
+ }
+
+ /* Call the dpmac_set_link_state() only if there is a change in the
+ * link configuration
+ */
+ if (priv->old_state.up == state.up &&
+ priv->old_state.rate == state.rate &&
+ priv->old_state.options == state.options)
+ return;
+
+ priv->old_state = state;
+ phy_print_status(phydev);
+
+ if (cmp_dpmac_ver(priv, DPMAC_LINK_AUTONEG_VER_MAJOR,
+ DPMAC_LINK_AUTONEG_VER_MINOR) < 0) {
+ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0,
+ priv->mc_dev->mc_handle, &state);
+ } else {
+ link_mode_phydev2dpmac(phydev->supported, &state.supported);
+ link_mode_phydev2dpmac(phydev->advertising, &state.advertising);
+ state.state_valid = 1;
+
+ err = dpmac_set_link_state_v2(priv->mc_dev->mc_io, 0,
+ priv->mc_dev->mc_handle, &state);
+ }
+ if (unlikely(err))
+ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err);
+}
+
+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+static int dpaa2_mac_open(struct net_device *netdev)
+{
+ /* start PHY state machine */
+ phy_start(netdev->phydev);
+
+ return 0;
+}
+#endif
+
+static int dpaa2_mac_stop(struct net_device *netdev)
+{
+ if (!netdev->phydev)
+ goto done;
+
+ /* stop PHY state machine */
+ phy_stop(netdev->phydev);
+
+ /* signal link down to firmware */
+ netdev->phydev->link = 0;
+ dpaa2_mac_link_changed(netdev);
+
+done:
+ return 0;
+}
+
+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ /* we don't support I/O for now, drop the frame */
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static void dpaa2_mac_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct dpaa2_mac_priv *priv = netdev_priv(net_dev);
+
+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u", priv->dpmac_ver_major, priv->dpmac_ver_minor);
+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent),
+ sizeof(drvinfo->bus_info));
+}
+
+static int dpaa2_mac_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *ks)
+{
+ phy_ethtool_ksettings_get(netdev->phydev, ks);
+
+ return 0;
+}
+
+static int dpaa2_mac_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *ks)
+{
+ return phy_ethtool_ksettings_set(netdev->phydev, ks);
+}
+
+static void dpaa2_mac_get_stats(struct net_device *netdev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
+ u64 tmp;
+ int err;
+
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_EGR_MCAST_FRAME,
+ &storage->tx_packets);
+ if (err)
+ goto error;
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_EGR_BCAST_FRAME, &tmp);
+ if (err)
+ goto error;
+ storage->tx_packets += tmp;
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_EGR_UCAST_FRAME, &tmp);
+ if (err)
+ goto error;
+ storage->tx_packets += tmp;
+
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped);
+ if (err)
+ goto error;
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes);
+ if (err)
+ goto error;
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors);
+ if (err)
+ goto error;
+
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets);
+ if (err)
+ goto error;
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast);
+ if (err)
+ goto error;
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_ING_FRAME_DISCARD,
+ &storage->rx_dropped);
+ if (err)
+ goto error;
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors);
+ if (err)
+ goto error;
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_ING_OVERSIZED, &tmp);
+ if (err)
+ goto error;
+ storage->rx_errors += tmp;
+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle,
+ DPMAC_CNT_ING_BYTE, &storage->rx_bytes);
+ if (err)
+ goto error;
+
+ return;
+error:
+ netdev_err(netdev, "dpmac_get_counter err %d\n", err);
+}
+
+static struct {
+ enum dpmac_counter id;
+ char name[ETH_GSTRING_LEN];
+} dpaa2_mac_counters[] = {
+ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"},
+ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"},
+ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"},
+ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"},
+ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"},
+ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"},
+ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"},
+ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"},
+ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"},
+ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"},
+ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"},
+ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"},
+ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"},
+ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"},
+ {DPMAC_CNT_ING_FRAG, "rx frags"},
+ {DPMAC_CNT_ING_JABBER, "rx jabber"},
+ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"},
+ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"},
+ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"},
+ {DPMAC_CNT_ING_BYTE, "rx bytes"},
+ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"},
+ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"},
+ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"},
+ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"},
+ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"},
+ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"},
+ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"},
+ {DPMAC_CNT_EGR_BYTE, "tx bytes"},
+
+};
+
+static void dpaa2_mac_get_strings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ dpaa2_mac_counters[i].name,
+ ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct dpaa2_mac_priv *priv = netdev_priv(netdev);
+ int i;
+ int err;
+
+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) {
+ err = dpmac_get_counter(priv->mc_dev->mc_io,
+ 0,
+ priv->mc_dev->mc_handle,
+ dpaa2_mac_counters[i].id, &data[i]);
+ if (err)
+ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n",
+ dpaa2_mac_counters[i].name, err);
+ }
+}
+
+static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(dpaa2_mac_counters);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct net_device_ops dpaa2_mac_ndo_ops = {
+ .ndo_open = &dpaa2_mac_open,
+ .ndo_stop = &dpaa2_mac_stop,
+ .ndo_start_xmit = &dpaa2_mac_drop_frame,
+ .ndo_get_stats64 = &dpaa2_mac_get_stats,
+};
+
+static const struct ethtool_ops dpaa2_mac_ethtool_ops = {
+ .get_drvinfo = &dpaa2_mac_get_drvinfo,
+ .get_link_ksettings = &dpaa2_mac_get_link_ksettings,
+ .set_link_ksettings = &dpaa2_mac_set_link_ksettings,
+ .get_strings = &dpaa2_mac_get_strings,
+ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats,
+ .get_sset_count = &dpaa2_mac_get_sset_count,
+};
+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
+
+static void configure_link(struct dpaa2_mac_priv *priv,
+ struct dpmac_link_cfg *cfg)
+{
+ struct phy_device *phydev = priv->netdev->phydev;
+
+ if (unlikely(!phydev))
+ return;
+
+ phydev->speed = cfg->rate;
+ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX);
+
+ if (cfg->advertising != 0) {
+ linkmode_zero(phydev->advertising);
+ link_mode_dpmac2phydev(cfg->advertising, phydev->advertising);
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported)) {
+ if (cfg->options & DPMAC_LINK_OPT_PAUSE)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
+ else
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->advertising);
+ }
+
+ if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported)) {
+ if (cfg->options & DPMAC_LINK_OPT_ASYM_PAUSE)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+ else
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->advertising);
+ }
+
+ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) {
+ phydev->autoneg = AUTONEG_ENABLE;
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->advertising);
+ } else {
+ phydev->autoneg = AUTONEG_DISABLE;
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->advertising);
+ }
+
+ phy_start_aneg(phydev);
+}
+
+static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg)
+{
+ struct device *dev = (struct device *)arg;
+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *ndev = priv->netdev;
+ struct dpmac_link_cfg link_cfg = { 0 };
+ u32 status;
+ int err;
+
+ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPMAC_IRQ_INDEX, &status);
+ if (unlikely(err || !status))
+ return IRQ_NONE;
+
+ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */
+ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) {
+ if (cmp_dpmac_ver(priv, DPMAC_LINK_AUTONEG_VER_MAJOR,
+ DPMAC_LINK_AUTONEG_VER_MINOR) < 0)
+ err = dpmac_get_link_cfg(mc_dev->mc_io, 0,
+ mc_dev->mc_handle, &link_cfg);
+ else
+ err = dpmac_get_link_cfg_v2(mc_dev->mc_io, 0,
+ mc_dev->mc_handle,
+ &link_cfg);
+ if (unlikely(err))
+ goto out;
+
+ configure_link(priv, &link_cfg);
+ }
+
+ if (status & DPMAC_IRQ_EVENT_LINK_DOWN_REQ)
+ phy_stop(ndev->phydev);
+
+ if (status & DPMAC_IRQ_EVENT_LINK_UP_REQ)
+ phy_start(ndev->phydev);
+out:
+ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPMAC_IRQ_INDEX, status);
+
+ return IRQ_HANDLED;
+}
+
+static int setup_irqs(struct fsl_mc_device *mc_dev)
+{
+ int err = 0;
+ struct fsl_mc_device_irq *irq;
+
+ err = fsl_mc_allocate_irqs(mc_dev);
+ if (err) {
+ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err);
+ return err;
+ }
+
+ irq = mc_dev->irqs[0];
+ err = devm_request_threaded_irq(&mc_dev->dev, irq->msi_desc->irq,
+ NULL, &dpaa2_mac_irq_handler,
+ IRQF_NO_SUSPEND | IRQF_ONESHOT,
+ dev_name(&mc_dev->dev), &mc_dev->dev);
+ if (err) {
+ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n",
+ err);
+ goto free_irq;
+ }
+
+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPMAC_IRQ_INDEX, DPMAC_IRQ_EVENT_LINK_CFG_REQ |
+ DPMAC_IRQ_EVENT_LINK_UP_REQ |
+ DPMAC_IRQ_EVENT_LINK_DOWN_REQ);
+ if (err) {
+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err);
+ goto free_irq;
+ }
+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPMAC_IRQ_INDEX, 1);
+ if (err) {
+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
+ goto free_irq;
+ }
+
+ return 0;
+
+free_irq:
+ fsl_mc_free_irqs(mc_dev);
+
+ return err;
+}
+
+static void teardown_irqs(struct fsl_mc_device *mc_dev)
+{
+ int err;
+
+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ DPMAC_IRQ_INDEX, 0);
+ if (err)
+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err);
+
+ fsl_mc_free_irqs(mc_dev);
+}
+
+static struct device_node *find_dpmac_node(struct device *dev, u16 dpmac_id)
+{
+ struct device_node *dpmacs, *dpmac = NULL;
+ struct device_node *mc_node = dev->of_node;
+ u32 id;
+ int err;
+
+ dpmacs = of_find_node_by_name(mc_node, "dpmacs");
+ if (!dpmacs) {
+ dev_err(dev, "No dpmacs subnode in device-tree\n");
+ return NULL;
+ }
+
+ while ((dpmac = of_get_next_child(dpmacs, dpmac))) {
+ err = of_property_read_u32(dpmac, "reg", &id);
+ if (err)
+ continue;
+ if (id == dpmac_id)
+ return dpmac;
+ }
+
+ return NULL;
+}
+
+static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev)
+{
+ struct device *dev;
+ struct dpaa2_mac_priv *priv = NULL;
+ struct device_node *phy_node, *dpmac_node;
+ struct net_device *netdev;
+ int if_mode;
+ int err = 0;
+
+ dev = &mc_dev->dev;
+
+ /* prepare a net_dev structure to make the phy lib API happy */
+ netdev = alloc_etherdev(sizeof(*priv));
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev error\n");
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ priv = netdev_priv(netdev);
+ priv->mc_dev = mc_dev;
+ priv->netdev = netdev;
+
+ SET_NETDEV_DEV(netdev, dev);
+
+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id);
+#endif
+
+ dev_set_drvdata(dev, priv);
+
+ /* We may need to issue MC commands while in atomic context */
+ err = fsl_mc_portal_allocate(mc_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL,
+ &mc_dev->mc_io);
+ if (err || !mc_dev->mc_io) {
+ dev_dbg(dev, "fsl_mc_portal_allocate error: %d\n", err);
+ err = -EPROBE_DEFER;
+ goto err_free_netdev;
+ }
+
+ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id,
+ &mc_dev->mc_handle);
+ if (err || !mc_dev->mc_handle) {
+ dev_err(dev, "dpmac_open error: %d\n", err);
+ err = -ENODEV;
+ goto err_free_mcp;
+ }
+
+ err = dpmac_get_api_version(mc_dev->mc_io, 0, &priv->dpmac_ver_major,
+ &priv->dpmac_ver_minor);
+ if (err) {
+ dev_err(dev, "dpmac_get_api_version failed\n");
+ goto err_version;
+ }
+
+ if (cmp_dpmac_ver(priv, DPMAC_VER_MAJOR, DPMAC_VER_MINOR) < 0) {
+ dev_err(dev, "DPMAC version %u.%u lower than supported %u.%u\n",
+ priv->dpmac_ver_major, priv->dpmac_ver_minor,
+ DPMAC_VER_MAJOR, DPMAC_VER_MINOR);
+ err = -ENOTSUPP;
+ goto err_version;
+ }
+
+ err = dpmac_get_attributes(mc_dev->mc_io, 0,
+ mc_dev->mc_handle, &priv->attr);
+ if (err) {
+ dev_err(dev, "dpmac_get_attributes err %d\n", err);
+ err = -EINVAL;
+ goto err_close;
+ }
+
+ /* Look up the DPMAC node in the device-tree. */
+ dpmac_node = find_dpmac_node(dev, priv->attr.id);
+ if (!dpmac_node) {
+ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id);
+ err = -ENODEV;
+ goto err_close;
+ }
+
+ err = setup_irqs(mc_dev);
+ if (err) {
+ err = -EFAULT;
+ goto err_close;
+ }
+
+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+ /* OPTIONAL, register netdev just to make it visible to the user */
+ netdev->netdev_ops = &dpaa2_mac_ndo_ops;
+ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops;
+
+ err = register_netdev(priv->netdev);
+ if (err < 0) {
+ dev_err(dev, "register_netdev error %d\n", err);
+ err = -ENODEV;
+ goto err_free_irq;
+ }
+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */
+
+ /* get the interface mode from the dpmac of node or from the MC attributes */
+ if_mode = of_get_phy_mode(dpmac_node);
+ if (if_mode >= 0) {
+ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
+ phy_modes(if_mode), priv->attr.eth_if);
+ goto link_type;
+ }
+
+ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) {
+ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if];
+ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n",
+ phy_modes(if_mode), priv->attr.eth_if);
+ } else {
+ dev_err(dev, "Unexpected interface mode %d\n",
+ priv->attr.eth_if);
+ err = -EINVAL;
+ goto err_no_if_mode;
+ }
+
+link_type:
+ /* probe the PHY as fixed-link if the DPMAC attribute indicates so */
+ if (priv->attr.link_type == DPMAC_LINK_TYPE_FIXED)
+ goto probe_fixed_link;
+
+ /* or if there's no phy-handle defined in the device tree */
+ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0);
+ if (!phy_node) {
+ goto probe_fixed_link;
+ }
+
+ /* try to connect to the PHY */
+ netdev->phydev = of_phy_connect(netdev, phy_node,
+ &dpaa2_mac_link_changed, 0, if_mode);
+ if (!netdev->phydev) {
+ /* No need for dev_err(); the kernel's loud enough as it is. */
+ dev_dbg(dev, "Can't of_phy_connect() now.\n");
+ /* We might be waiting for the MDIO MUX to probe, so defer
+ * our own probing.
+ */
+ err = -EPROBE_DEFER;
+ goto err_defer;
+ }
+ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode));
+
+probe_fixed_link:
+ if (!netdev->phydev) {
+ struct fixed_phy_status status = {
+ .link = 1,
+ /* fixed-phys don't support 10Gbps speed for now */
+ .speed = 1000,
+ .duplex = 1,
+ };
+
+ /* try to register a fixed link phy */
+ netdev->phydev = fixed_phy_register(PHY_POLL, &status, NULL);
+ if (!netdev->phydev || IS_ERR(netdev->phydev)) {
+ dev_err(dev, "error trying to register fixed PHY\n");
+ /* So we don't crash unregister_netdev() later on */
+ netdev->phydev = NULL;
+ err = -EFAULT;
+ goto err_no_phy;
+ }
+
+ err = phy_connect_direct(netdev, netdev->phydev,
+ &dpaa2_mac_link_changed, if_mode);
+ if (err) {
+ dev_err(dev, "error trying to connect to PHY\n");
+ goto err_no_phy;
+ }
+
+ dev_info(dev, "Registered fixed PHY.\n");
+ }
+
+ return 0;
+
+err_no_if_mode:
+err_defer:
+err_no_phy:
+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+ unregister_netdev(netdev);
+err_free_irq:
+#endif
+ teardown_irqs(mc_dev);
+err_version:
+err_close:
+ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle);
+err_free_mcp:
+ fsl_mc_portal_free(mc_dev->mc_io);
+err_free_netdev:
+ free_netdev(netdev);
+err_exit:
+ return err;
+}
+
+static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev)
+{
+ struct device *dev = &mc_dev->dev;
+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev);
+ struct net_device *netdev = priv->netdev;
+
+ if (netdev->flags & IFF_UP)
+ dpaa2_mac_stop(netdev);
+
+ if (phy_is_pseudo_fixed_link(netdev->phydev))
+ fixed_phy_unregister(netdev->phydev);
+ else
+ phy_disconnect(netdev->phydev);
+ netdev->phydev = NULL;
+
+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS
+ unregister_netdev(priv->netdev);
+#endif
+ teardown_irqs(priv->mc_dev);
+ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle);
+ fsl_mc_portal_free(priv->mc_dev->mc_io);
+ free_netdev(priv->netdev);
+
+ dev_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+static const struct fsl_mc_device_id dpaa2_mac_match_id_table[] = {
+ {
+ .vendor = FSL_MC_VENDOR_FREESCALE,
+ .obj_type = "dpmac",
+ },
+ { .vendor = 0x0 }
+};
+MODULE_DEVICE_TABLE(fslmc, dpaa2_mac_match_id_table);
+
+static struct fsl_mc_driver dpaa2_mac_drv = {
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ },
+ .probe = dpaa2_mac_probe,
+ .remove = dpaa2_mac_remove,
+ .match_id_table = dpaa2_mac_match_id_table,
+};
+
+module_fsl_mc_driver(dpaa2_mac_drv);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver");
diff --git a/drivers/staging/fsl_ppfe/Kconfig b/drivers/staging/fsl_ppfe/Kconfig
new file mode 100644
index 000000000000..5a12c4309952
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/Kconfig
@@ -0,0 +1,21 @@
+#
+# Freescale Programmable Packet Forwarding Engine driver
+#
+config FSL_PPFE
+ bool "Freescale PPFE Driver"
+ select FSL_GUTS
+ default n
+ ---help---
+ Freescale LS1012A SoC has a Programmable Packet Forwarding Engine.
+ It provides two high performance ethernet interfaces.
+ This driver initializes, programs and controls the PPFE.
+ Use this driver to enable network connectivity on LS1012A platforms.
+
+if FSL_PPFE
+
+config FSL_PPFE_UTIL_DISABLED
+ bool "Disable PPFE UTIL Processor Engine"
+ ---help---
+ UTIL PE has to be enabled only if required.
+
+endif # FSL_PPFE
diff --git a/drivers/staging/fsl_ppfe/Makefile b/drivers/staging/fsl_ppfe/Makefile
new file mode 100644
index 000000000000..ebc3aa21d052
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/Makefile
@@ -0,0 +1,20 @@
+#
+# Makefile for Freesecale PPFE driver
+#
+
+ccflags-y += -I $(srctree)/$(src)/include -I $(srctree)/$(src)
+
+obj-m += pfe.o
+
+pfe-y += pfe_mod.o \
+ pfe_hw.o \
+ pfe_firmware.o \
+ pfe_ctrl.o \
+ pfe_hif.o \
+ pfe_hif_lib.o\
+ pfe_eth.o \
+ pfe_sysfs.o \
+ pfe_debugfs.o \
+ pfe_ls1012a_platform.o \
+ pfe_hal.o \
+ pfe_cdev.o
diff --git a/drivers/staging/fsl_ppfe/TODO b/drivers/staging/fsl_ppfe/TODO
new file mode 100644
index 000000000000..43c48ccdf81a
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/TODO
@@ -0,0 +1,2 @@
+TODO:
+ - provide pfe pe monitoring support
diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus.h b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
new file mode 100644
index 000000000000..04503d28c982
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CBUS_H_
+#define _CBUS_H_
+
+#define EMAC1_BASE_ADDR (CBUS_BASE_ADDR + 0x200000)
+#define EGPI1_BASE_ADDR (CBUS_BASE_ADDR + 0x210000)
+#define EMAC2_BASE_ADDR (CBUS_BASE_ADDR + 0x220000)
+#define EGPI2_BASE_ADDR (CBUS_BASE_ADDR + 0x230000)
+#define BMU1_BASE_ADDR (CBUS_BASE_ADDR + 0x240000)
+#define BMU2_BASE_ADDR (CBUS_BASE_ADDR + 0x250000)
+#define ARB_BASE_ADDR (CBUS_BASE_ADDR + 0x260000)
+#define DDR_CONFIG_BASE_ADDR (CBUS_BASE_ADDR + 0x270000)
+#define HIF_BASE_ADDR (CBUS_BASE_ADDR + 0x280000)
+#define HGPI_BASE_ADDR (CBUS_BASE_ADDR + 0x290000)
+#define LMEM_BASE_ADDR (CBUS_BASE_ADDR + 0x300000)
+#define LMEM_SIZE 0x10000
+#define LMEM_END (LMEM_BASE_ADDR + LMEM_SIZE)
+#define TMU_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x310000)
+#define CLASS_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x320000)
+#define HIF_NOCPY_BASE_ADDR (CBUS_BASE_ADDR + 0x350000)
+#define UTIL_CSR_BASE_ADDR (CBUS_BASE_ADDR + 0x360000)
+#define CBUS_GPT_BASE_ADDR (CBUS_BASE_ADDR + 0x370000)
+
+/*
+ * defgroup XXX_MEM_ACCESS_ADDR PE memory access through CSR
+ * XXX_MEM_ACCESS_ADDR register bit definitions.
+ */
+#define PE_MEM_ACCESS_WRITE BIT(31) /* Internal Memory Write. */
+#define PE_MEM_ACCESS_IMEM BIT(15)
+#define PE_MEM_ACCESS_DMEM BIT(16)
+
+/* Byte Enables of the Internal memory access. These are interpred in BE */
+#define PE_MEM_ACCESS_BYTE_ENABLE(offset, size) \
+ ({ typeof(size) size_ = (size); \
+ (((BIT(size_) - 1) << (4 - (offset) - (size_))) & 0xf) << 24; })
+
+#include "cbus/emac_mtip.h"
+#include "cbus/gpi.h"
+#include "cbus/bmu.h"
+#include "cbus/hif.h"
+#include "cbus/tmu_csr.h"
+#include "cbus/class_csr.h"
+#include "cbus/hif_nocpy.h"
+#include "cbus/util_csr.h"
+
+/* PFE cores states */
+#define CORE_DISABLE 0x00000000
+#define CORE_ENABLE 0x00000001
+#define CORE_SW_RESET 0x00000002
+
+/* LMEM defines */
+#define LMEM_HDR_SIZE 0x0010
+#define LMEM_BUF_SIZE_LN2 0x7
+#define LMEM_BUF_SIZE BIT(LMEM_BUF_SIZE_LN2)
+
+/* DDR defines */
+#define DDR_HDR_SIZE 0x0100
+#define DDR_BUF_SIZE_LN2 0xb
+#define DDR_BUF_SIZE BIT(DDR_BUF_SIZE_LN2)
+
+#endif /* _CBUS_H_ */
diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
new file mode 100644
index 000000000000..87738ca3cfbb
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/bmu.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _BMU_H_
+#define _BMU_H_
+
+#define BMU_VERSION 0x000
+#define BMU_CTRL 0x004
+#define BMU_UCAST_CONFIG 0x008
+#define BMU_UCAST_BASE_ADDR 0x00c
+#define BMU_BUF_SIZE 0x010
+#define BMU_BUF_CNT 0x014
+#define BMU_THRES 0x018
+#define BMU_INT_SRC 0x020
+#define BMU_INT_ENABLE 0x024
+#define BMU_ALLOC_CTRL 0x030
+#define BMU_FREE_CTRL 0x034
+#define BMU_FREE_ERR_ADDR 0x038
+#define BMU_CURR_BUF_CNT 0x03c
+#define BMU_MCAST_CNT 0x040
+#define BMU_MCAST_ALLOC_CTRL 0x044
+#define BMU_REM_BUF_CNT 0x048
+#define BMU_LOW_WATERMARK 0x050
+#define BMU_HIGH_WATERMARK 0x054
+#define BMU_INT_MEM_ACCESS 0x100
+
+struct BMU_CFG {
+ unsigned long baseaddr;
+ u32 count;
+ u32 size;
+ u32 low_watermark;
+ u32 high_watermark;
+};
+
+#define BMU1_BUF_SIZE LMEM_BUF_SIZE_LN2
+#define BMU2_BUF_SIZE DDR_BUF_SIZE_LN2
+
+#define BMU2_MCAST_ALLOC_CTRL (BMU2_BASE_ADDR + BMU_MCAST_ALLOC_CTRL)
+
+#endif /* _BMU_H_ */
diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
new file mode 100644
index 000000000000..e4dadff58768
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/class_csr.h
@@ -0,0 +1,289 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _CLASS_CSR_H_
+#define _CLASS_CSR_H_
+
+/* @file class_csr.h.
+ * class_csr - block containing all the classifier control and status register.
+ * Mapped on CBUS and accessible from all PE's and ARM.
+ */
+#define CLASS_VERSION (CLASS_CSR_BASE_ADDR + 0x000)
+#define CLASS_TX_CTRL (CLASS_CSR_BASE_ADDR + 0x004)
+#define CLASS_INQ_PKTPTR (CLASS_CSR_BASE_ADDR + 0x010)
+
+/* (ddr_hdr_size[24:16], lmem_hdr_size[5:0]) */
+#define CLASS_HDR_SIZE (CLASS_CSR_BASE_ADDR + 0x014)
+
+/* LMEM header size for the Classifier block.\ Data in the LMEM
+ * is written from this offset.
+ */
+#define CLASS_HDR_SIZE_LMEM(off) ((off) & 0x3f)
+
+/* DDR header size for the Classifier block.\ Data in the DDR
+ * is written from this offset.
+ */
+#define CLASS_HDR_SIZE_DDR(off) (((off) & 0x1ff) << 16)
+
+#define CLASS_PE0_QB_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x020)
+
+/* DMEM address of first [15:0] and second [31:16] buffers on QB side. */
+#define CLASS_PE0_QB_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x024)
+
+/* DMEM address of third [15:0] and fourth [31:16] buffers on QB side. */
+#define CLASS_PE0_RO_DM_ADDR0 (CLASS_CSR_BASE_ADDR + 0x060)
+
+/* DMEM address of first [15:0] and second [31:16] buffers on RO side. */
+#define CLASS_PE0_RO_DM_ADDR1 (CLASS_CSR_BASE_ADDR + 0x064)
+
+/* DMEM address of third [15:0] and fourth [31:16] buffers on RO side. */
+
+/* @name Class PE memory access. Allows external PE's and HOST to
+ * read/write PMEM/DMEM memory ranges for each classifier PE.
+ */
+/* {sr_pe_mem_cmd[31], csr_pe_mem_wren[27:24], csr_pe_mem_addr[23:0]},
+ * See \ref XXX_MEM_ACCESS_ADDR for details.
+ */
+#define CLASS_MEM_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x100)
+
+/* Internal Memory Access Write Data [31:0] */
+#define CLASS_MEM_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x104)
+
+/* Internal Memory Access Read Data [31:0] */
+#define CLASS_MEM_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x108)
+#define CLASS_TM_INQ_ADDR (CLASS_CSR_BASE_ADDR + 0x114)
+#define CLASS_PE_STATUS (CLASS_CSR_BASE_ADDR + 0x118)
+
+#define CLASS_PHY1_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x11c)
+#define CLASS_PHY1_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x120)
+#define CLASS_PHY1_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x124)
+#define CLASS_PHY1_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x128)
+#define CLASS_PHY1_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x12c)
+#define CLASS_PHY1_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x130)
+#define CLASS_PHY1_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x134)
+#define CLASS_PHY1_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x138)
+#define CLASS_PHY1_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x13c)
+#define CLASS_PHY1_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x140)
+#define CLASS_PHY2_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x144)
+#define CLASS_PHY2_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x148)
+#define CLASS_PHY2_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x14c)
+#define CLASS_PHY2_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x150)
+#define CLASS_PHY2_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x154)
+#define CLASS_PHY2_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x158)
+#define CLASS_PHY2_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x15c)
+#define CLASS_PHY2_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x160)
+#define CLASS_PHY2_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x164)
+#define CLASS_PHY2_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x168)
+#define CLASS_PHY3_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x16c)
+#define CLASS_PHY3_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x170)
+#define CLASS_PHY3_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x174)
+#define CLASS_PHY3_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x178)
+#define CLASS_PHY3_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x17c)
+#define CLASS_PHY3_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x180)
+#define CLASS_PHY3_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x184)
+#define CLASS_PHY3_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x188)
+#define CLASS_PHY3_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x18c)
+#define CLASS_PHY3_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x190)
+#define CLASS_PHY1_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x194)
+#define CLASS_PHY1_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x198)
+#define CLASS_PHY1_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x19c)
+#define CLASS_PHY1_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a0)
+#define CLASS_PHY2_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a4)
+#define CLASS_PHY2_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1a8)
+#define CLASS_PHY2_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1ac)
+#define CLASS_PHY2_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b0)
+#define CLASS_PHY3_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b4)
+#define CLASS_PHY3_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1b8)
+#define CLASS_PHY3_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1bc)
+#define CLASS_PHY3_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c0)
+#define CLASS_PHY4_ICMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c4)
+#define CLASS_PHY4_IGMP_PKTS (CLASS_CSR_BASE_ADDR + 0x1c8)
+#define CLASS_PHY4_TCP_PKTS (CLASS_CSR_BASE_ADDR + 0x1cc)
+#define CLASS_PHY4_UDP_PKTS (CLASS_CSR_BASE_ADDR + 0x1d0)
+#define CLASS_PHY4_RX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d4)
+#define CLASS_PHY4_TX_PKTS (CLASS_CSR_BASE_ADDR + 0x1d8)
+#define CLASS_PHY4_LP_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1dc)
+#define CLASS_PHY4_INTF_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e0)
+#define CLASS_PHY4_INTF_MATCH_PKTS (CLASS_CSR_BASE_ADDR + 0x1e4)
+#define CLASS_PHY4_L3_FAIL_PKTS (CLASS_CSR_BASE_ADDR + 0x1e8)
+#define CLASS_PHY4_V4_PKTS (CLASS_CSR_BASE_ADDR + 0x1ec)
+#define CLASS_PHY4_V6_PKTS (CLASS_CSR_BASE_ADDR + 0x1f0)
+#define CLASS_PHY4_CHKSUM_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f4)
+#define CLASS_PHY4_TTL_ERR_PKTS (CLASS_CSR_BASE_ADDR + 0x1f8)
+
+#define CLASS_PE_SYS_CLK_RATIO (CLASS_CSR_BASE_ADDR + 0x200)
+#define CLASS_AFULL_THRES (CLASS_CSR_BASE_ADDR + 0x204)
+#define CLASS_GAP_BETWEEN_READS (CLASS_CSR_BASE_ADDR + 0x208)
+#define CLASS_MAX_BUF_CNT (CLASS_CSR_BASE_ADDR + 0x20c)
+#define CLASS_TSQ_FIFO_THRES (CLASS_CSR_BASE_ADDR + 0x210)
+#define CLASS_TSQ_MAX_CNT (CLASS_CSR_BASE_ADDR + 0x214)
+#define CLASS_IRAM_DATA_0 (CLASS_CSR_BASE_ADDR + 0x218)
+#define CLASS_IRAM_DATA_1 (CLASS_CSR_BASE_ADDR + 0x21c)
+#define CLASS_IRAM_DATA_2 (CLASS_CSR_BASE_ADDR + 0x220)
+#define CLASS_IRAM_DATA_3 (CLASS_CSR_BASE_ADDR + 0x224)
+
+#define CLASS_BUS_ACCESS_ADDR (CLASS_CSR_BASE_ADDR + 0x228)
+
+#define CLASS_BUS_ACCESS_WDATA (CLASS_CSR_BASE_ADDR + 0x22c)
+#define CLASS_BUS_ACCESS_RDATA (CLASS_CSR_BASE_ADDR + 0x230)
+
+/* (route_entry_size[9:0], route_hash_size[23:16]
+ * (this is actually ln2(size)))
+ */
+#define CLASS_ROUTE_HASH_ENTRY_SIZE (CLASS_CSR_BASE_ADDR + 0x234)
+
+#define CLASS_ROUTE_ENTRY_SIZE(size) ((size) & 0x1ff)
+#define CLASS_ROUTE_HASH_SIZE(hash_bits) (((hash_bits) & 0xff) << 16)
+
+#define CLASS_ROUTE_TABLE_BASE (CLASS_CSR_BASE_ADDR + 0x238)
+
+#define CLASS_ROUTE_MULTI (CLASS_CSR_BASE_ADDR + 0x23c)
+#define CLASS_SMEM_OFFSET (CLASS_CSR_BASE_ADDR + 0x240)
+#define CLASS_LMEM_BUF_SIZE (CLASS_CSR_BASE_ADDR + 0x244)
+#define CLASS_VLAN_ID (CLASS_CSR_BASE_ADDR + 0x248)
+#define CLASS_BMU1_BUF_FREE (CLASS_CSR_BASE_ADDR + 0x24c)
+#define CLASS_USE_TMU_INQ (CLASS_CSR_BASE_ADDR + 0x250)
+#define CLASS_VLAN_ID1 (CLASS_CSR_BASE_ADDR + 0x254)
+
+#define CLASS_BUS_ACCESS_BASE (CLASS_CSR_BASE_ADDR + 0x258)
+#define CLASS_BUS_ACCESS_BASE_MASK (0xFF000000)
+/* bit 31:24 of PE peripheral address are stored in CLASS_BUS_ACCESS_BASE */
+
+#define CLASS_HIF_PARSE (CLASS_CSR_BASE_ADDR + 0x25c)
+
+#define CLASS_HOST_PE0_GP (CLASS_CSR_BASE_ADDR + 0x260)
+#define CLASS_PE0_GP (CLASS_CSR_BASE_ADDR + 0x264)
+#define CLASS_HOST_PE1_GP (CLASS_CSR_BASE_ADDR + 0x268)
+#define CLASS_PE1_GP (CLASS_CSR_BASE_ADDR + 0x26c)
+#define CLASS_HOST_PE2_GP (CLASS_CSR_BASE_ADDR + 0x270)
+#define CLASS_PE2_GP (CLASS_CSR_BASE_ADDR + 0x274)
+#define CLASS_HOST_PE3_GP (CLASS_CSR_BASE_ADDR + 0x278)
+#define CLASS_PE3_GP (CLASS_CSR_BASE_ADDR + 0x27c)
+#define CLASS_HOST_PE4_GP (CLASS_CSR_BASE_ADDR + 0x280)
+#define CLASS_PE4_GP (CLASS_CSR_BASE_ADDR + 0x284)
+#define CLASS_HOST_PE5_GP (CLASS_CSR_BASE_ADDR + 0x288)
+#define CLASS_PE5_GP (CLASS_CSR_BASE_ADDR + 0x28c)
+
+#define CLASS_PE_INT_SRC (CLASS_CSR_BASE_ADDR + 0x290)
+#define CLASS_PE_INT_ENABLE (CLASS_CSR_BASE_ADDR + 0x294)
+
+#define CLASS_TPID0_TPID1 (CLASS_CSR_BASE_ADDR + 0x298)
+#define CLASS_TPID2 (CLASS_CSR_BASE_ADDR + 0x29c)
+
+#define CLASS_L4_CHKSUM_ADDR (CLASS_CSR_BASE_ADDR + 0x2a0)
+
+#define CLASS_PE0_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a4)
+#define CLASS_PE1_DEBUG (CLASS_CSR_BASE_ADDR + 0x2a8)
+#define CLASS_PE2_DEBUG (CLASS_CSR_BASE_ADDR + 0x2ac)
+#define CLASS_PE3_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b0)
+#define CLASS_PE4_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b4)
+#define CLASS_PE5_DEBUG (CLASS_CSR_BASE_ADDR + 0x2b8)
+
+#define CLASS_STATE (CLASS_CSR_BASE_ADDR + 0x2bc)
+
+/* CLASS defines */
+#define CLASS_PBUF_SIZE 0x100 /* Fixed by hardware */
+#define CLASS_PBUF_HEADER_OFFSET 0x80 /* Can be configured */
+
+/* Can be configured */
+#define CLASS_PBUF0_BASE_ADDR 0x000
+/* Can be configured */
+#define CLASS_PBUF1_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + CLASS_PBUF_SIZE)
+/* Can be configured */
+#define CLASS_PBUF2_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + CLASS_PBUF_SIZE)
+/* Can be configured */
+#define CLASS_PBUF3_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + CLASS_PBUF_SIZE)
+
+#define CLASS_PBUF0_HEADER_BASE_ADDR (CLASS_PBUF0_BASE_ADDR + \
+ CLASS_PBUF_HEADER_OFFSET)
+#define CLASS_PBUF1_HEADER_BASE_ADDR (CLASS_PBUF1_BASE_ADDR + \
+ CLASS_PBUF_HEADER_OFFSET)
+#define CLASS_PBUF2_HEADER_BASE_ADDR (CLASS_PBUF2_BASE_ADDR + \
+ CLASS_PBUF_HEADER_OFFSET)
+#define CLASS_PBUF3_HEADER_BASE_ADDR (CLASS_PBUF3_BASE_ADDR + \
+ CLASS_PBUF_HEADER_OFFSET)
+
+#define CLASS_PE0_RO_DM_ADDR0_VAL ((CLASS_PBUF1_BASE_ADDR << 16) | \
+ CLASS_PBUF0_BASE_ADDR)
+#define CLASS_PE0_RO_DM_ADDR1_VAL ((CLASS_PBUF3_BASE_ADDR << 16) | \
+ CLASS_PBUF2_BASE_ADDR)
+
+#define CLASS_PE0_QB_DM_ADDR0_VAL ((CLASS_PBUF1_HEADER_BASE_ADDR << 16) |\
+ CLASS_PBUF0_HEADER_BASE_ADDR)
+#define CLASS_PE0_QB_DM_ADDR1_VAL ((CLASS_PBUF3_HEADER_BASE_ADDR << 16) |\
+ CLASS_PBUF2_HEADER_BASE_ADDR)
+
+#define CLASS_ROUTE_SIZE 128
+#define CLASS_MAX_ROUTE_SIZE 256
+#define CLASS_ROUTE_HASH_BITS 20
+#define CLASS_ROUTE_HASH_MASK (BIT(CLASS_ROUTE_HASH_BITS) - 1)
+
+/* Can be configured */
+#define CLASS_ROUTE0_BASE_ADDR 0x400
+/* Can be configured */
+#define CLASS_ROUTE1_BASE_ADDR (CLASS_ROUTE0_BASE_ADDR + CLASS_ROUTE_SIZE)
+/* Can be configured */
+#define CLASS_ROUTE2_BASE_ADDR (CLASS_ROUTE1_BASE_ADDR + CLASS_ROUTE_SIZE)
+/* Can be configured */
+#define CLASS_ROUTE3_BASE_ADDR (CLASS_ROUTE2_BASE_ADDR + CLASS_ROUTE_SIZE)
+
+#define CLASS_SA_SIZE 128
+#define CLASS_IPSEC_SA0_BASE_ADDR 0x600
+/* not used */
+#define CLASS_IPSEC_SA1_BASE_ADDR (CLASS_IPSEC_SA0_BASE_ADDR + CLASS_SA_SIZE)
+/* not used */
+#define CLASS_IPSEC_SA2_BASE_ADDR (CLASS_IPSEC_SA1_BASE_ADDR + CLASS_SA_SIZE)
+/* not used */
+#define CLASS_IPSEC_SA3_BASE_ADDR (CLASS_IPSEC_SA2_BASE_ADDR + CLASS_SA_SIZE)
+
+/* generic purpose free dmem buffer, last portion of 2K dmem pbuf */
+#define CLASS_GP_DMEM_BUF_SIZE (2048 - (CLASS_PBUF_SIZE * 4) - \
+ (CLASS_ROUTE_SIZE * 4) - (CLASS_SA_SIZE))
+#define CLASS_GP_DMEM_BUF ((void *)(CLASS_IPSEC_SA0_BASE_ADDR + \
+ CLASS_SA_SIZE))
+
+#define TWO_LEVEL_ROUTE BIT(0)
+#define PHYNO_IN_HASH BIT(1)
+#define HW_ROUTE_FETCH BIT(3)
+#define HW_BRIDGE_FETCH BIT(5)
+#define IP_ALIGNED BIT(6)
+#define ARC_HIT_CHECK_EN BIT(7)
+#define CLASS_TOE BIT(11)
+#define HASH_NORMAL (0 << 12)
+#define HASH_CRC_PORT BIT(12)
+#define HASH_CRC_IP (2 << 12)
+#define HASH_CRC_PORT_IP (3 << 12)
+#define QB2BUS_LE BIT(15)
+
+#define TCP_CHKSUM_DROP BIT(0)
+#define UDP_CHKSUM_DROP BIT(1)
+#define IPV4_CHKSUM_DROP BIT(9)
+
+/*CLASS_HIF_PARSE bits*/
+#define HIF_PKT_CLASS_EN BIT(0)
+#define HIF_PKT_OFFSET(ofst) (((ofst) & 0xF) << 1)
+
+struct class_cfg {
+ u32 toe_mode;
+ unsigned long route_table_baseaddr;
+ u32 route_table_hash_bits;
+ u32 pe_sys_clk_ratio;
+ u32 resume;
+};
+
+#endif /* _CLASS_CSR_H_ */
diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
new file mode 100644
index 000000000000..9c5d7919455d
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/emac_mtip.h
@@ -0,0 +1,242 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _EMAC_H_
+#define _EMAC_H_
+
+#include <linux/ethtool.h>
+
+#define EMAC_IEVENT_REG 0x004
+#define EMAC_IMASK_REG 0x008
+#define EMAC_R_DES_ACTIVE_REG 0x010
+#define EMAC_X_DES_ACTIVE_REG 0x014
+#define EMAC_ECNTRL_REG 0x024
+#define EMAC_MII_DATA_REG 0x040
+#define EMAC_MII_CTRL_REG 0x044
+#define EMAC_MIB_CTRL_STS_REG 0x064
+#define EMAC_RCNTRL_REG 0x084
+#define EMAC_TCNTRL_REG 0x0C4
+#define EMAC_PHY_ADDR_LOW 0x0E4
+#define EMAC_PHY_ADDR_HIGH 0x0E8
+#define EMAC_GAUR 0x120
+#define EMAC_GALR 0x124
+#define EMAC_TFWR_STR_FWD 0x144
+#define EMAC_RX_SECTION_FULL 0x190
+#define EMAC_RX_SECTION_EMPTY 0x194
+#define EMAC_TX_SECTION_EMPTY 0x1A0
+#define EMAC_TRUNC_FL 0x1B0
+
+#define RMON_T_DROP 0x200 /* Count of frames not cntd correctly */
+#define RMON_T_PACKETS 0x204 /* RMON TX packet count */
+#define RMON_T_BC_PKT 0x208 /* RMON TX broadcast pkts */
+#define RMON_T_MC_PKT 0x20c /* RMON TX multicast pkts */
+#define RMON_T_CRC_ALIGN 0x210 /* RMON TX pkts with CRC align err */
+#define RMON_T_UNDERSIZE 0x214 /* RMON TX pkts < 64 bytes, good CRC */
+#define RMON_T_OVERSIZE 0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
+#define RMON_T_FRAG 0x21c /* RMON TX pkts < 64 bytes, bad CRC */
+#define RMON_T_JAB 0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
+#define RMON_T_COL 0x224 /* RMON TX collision count */
+#define RMON_T_P64 0x228 /* RMON TX 64 byte pkts */
+#define RMON_T_P65TO127 0x22c /* RMON TX 65 to 127 byte pkts */
+#define RMON_T_P128TO255 0x230 /* RMON TX 128 to 255 byte pkts */
+#define RMON_T_P256TO511 0x234 /* RMON TX 256 to 511 byte pkts */
+#define RMON_T_P512TO1023 0x238 /* RMON TX 512 to 1023 byte pkts */
+#define RMON_T_P1024TO2047 0x23c /* RMON TX 1024 to 2047 byte pkts */
+#define RMON_T_P_GTE2048 0x240 /* RMON TX pkts > 2048 bytes */
+#define RMON_T_OCTETS 0x244 /* RMON TX octets */
+#define IEEE_T_DROP 0x248 /* Count of frames not counted crtly */
+#define IEEE_T_FRAME_OK 0x24c /* Frames tx'd OK */
+#define IEEE_T_1COL 0x250 /* Frames tx'd with single collision */
+#define IEEE_T_MCOL 0x254 /* Frames tx'd with multiple collision */
+#define IEEE_T_DEF 0x258 /* Frames tx'd after deferral delay */
+#define IEEE_T_LCOL 0x25c /* Frames tx'd with late collision */
+#define IEEE_T_EXCOL 0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_MACERR 0x264 /* Frames tx'd with TX FIFO underrun */
+#define IEEE_T_CSERR 0x268 /* Frames tx'd with carrier sense err */
+#define IEEE_T_SQE 0x26c /* Frames tx'd with SQE err */
+#define IEEE_T_FDXFC 0x270 /* Flow control pause frames tx'd */
+#define IEEE_T_OCTETS_OK 0x274 /* Octet count for frames tx'd w/o err */
+#define RMON_R_PACKETS 0x284 /* RMON RX packet count */
+#define RMON_R_BC_PKT 0x288 /* RMON RX broadcast pkts */
+#define RMON_R_MC_PKT 0x28c /* RMON RX multicast pkts */
+#define RMON_R_CRC_ALIGN 0x290 /* RMON RX pkts with CRC alignment err */
+#define RMON_R_UNDERSIZE 0x294 /* RMON RX pkts < 64 bytes, good CRC */
+#define RMON_R_OVERSIZE 0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
+#define RMON_R_FRAG 0x29c /* RMON RX pkts < 64 bytes, bad CRC */
+#define RMON_R_JAB 0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
+#define RMON_R_RESVD_O 0x2a4 /* Reserved */
+#define RMON_R_P64 0x2a8 /* RMON RX 64 byte pkts */
+#define RMON_R_P65TO127 0x2ac /* RMON RX 65 to 127 byte pkts */
+#define RMON_R_P128TO255 0x2b0 /* RMON RX 128 to 255 byte pkts */
+#define RMON_R_P256TO511 0x2b4 /* RMON RX 256 to 511 byte pkts */
+#define RMON_R_P512TO1023 0x2b8 /* RMON RX 512 to 1023 byte pkts */
+#define RMON_R_P1024TO2047 0x2bc /* RMON RX 1024 to 2047 byte pkts */
+#define RMON_R_P_GTE2048 0x2c0 /* RMON RX pkts > 2048 bytes */
+#define RMON_R_OCTETS 0x2c4 /* RMON RX octets */
+#define IEEE_R_DROP 0x2c8 /* Count frames not counted correctly */
+#define IEEE_R_FRAME_OK 0x2cc /* Frames rx'd OK */
+#define IEEE_R_CRC 0x2d0 /* Frames rx'd with CRC err */
+#define IEEE_R_ALIGN 0x2d4 /* Frames rx'd with alignment err */
+#define IEEE_R_MACERR 0x2d8 /* Receive FIFO overflow count */
+#define IEEE_R_FDXFC 0x2dc /* Flow control pause frames rx'd */
+#define IEEE_R_OCTETS_OK 0x2e0 /* Octet cnt for frames rx'd w/o err */
+
+#define EMAC_SMAC_0_0 0x500 /*Supplemental MAC Address 0 (RW).*/
+#define EMAC_SMAC_0_1 0x504 /*Supplemental MAC Address 0 (RW).*/
+
+/* GEMAC definitions and settings */
+
+#define EMAC_PORT_0 0
+#define EMAC_PORT_1 1
+
+/* GEMAC Bit definitions */
+#define EMAC_IEVENT_HBERR 0x80000000
+#define EMAC_IEVENT_BABR 0x40000000
+#define EMAC_IEVENT_BABT 0x20000000
+#define EMAC_IEVENT_GRA 0x10000000
+#define EMAC_IEVENT_TXF 0x08000000
+#define EMAC_IEVENT_TXB 0x04000000
+#define EMAC_IEVENT_RXF 0x02000000
+#define EMAC_IEVENT_RXB 0x01000000
+#define EMAC_IEVENT_MII 0x00800000
+#define EMAC_IEVENT_EBERR 0x00400000
+#define EMAC_IEVENT_LC 0x00200000
+#define EMAC_IEVENT_RL 0x00100000
+#define EMAC_IEVENT_UN 0x00080000
+
+#define EMAC_IMASK_HBERR 0x80000000
+#define EMAC_IMASK_BABR 0x40000000
+#define EMAC_IMASKT_BABT 0x20000000
+#define EMAC_IMASK_GRA 0x10000000
+#define EMAC_IMASKT_TXF 0x08000000
+#define EMAC_IMASK_TXB 0x04000000
+#define EMAC_IMASKT_RXF 0x02000000
+#define EMAC_IMASK_RXB 0x01000000
+#define EMAC_IMASK_MII 0x00800000
+#define EMAC_IMASK_EBERR 0x00400000
+#define EMAC_IMASK_LC 0x00200000
+#define EMAC_IMASKT_RL 0x00100000
+#define EMAC_IMASK_UN 0x00080000
+
+#define EMAC_RCNTRL_MAX_FL_SHIFT 16
+#define EMAC_RCNTRL_LOOP 0x00000001
+#define EMAC_RCNTRL_DRT 0x00000002
+#define EMAC_RCNTRL_MII_MODE 0x00000004
+#define EMAC_RCNTRL_PROM 0x00000008
+#define EMAC_RCNTRL_BC_REJ 0x00000010
+#define EMAC_RCNTRL_FCE 0x00000020
+#define EMAC_RCNTRL_RGMII 0x00000040
+#define EMAC_RCNTRL_SGMII 0x00000080
+#define EMAC_RCNTRL_RMII 0x00000100
+#define EMAC_RCNTRL_RMII_10T 0x00000200
+#define EMAC_RCNTRL_CRC_FWD 0x00004000
+
+#define EMAC_TCNTRL_GTS 0x00000001
+#define EMAC_TCNTRL_HBC 0x00000002
+#define EMAC_TCNTRL_FDEN 0x00000004
+#define EMAC_TCNTRL_TFC_PAUSE 0x00000008
+#define EMAC_TCNTRL_RFC_PAUSE 0x00000010
+
+#define EMAC_ECNTRL_RESET 0x00000001 /* reset the EMAC */
+#define EMAC_ECNTRL_ETHER_EN 0x00000002 /* enable the EMAC */
+#define EMAC_ECNTRL_MAGIC_ENA 0x00000004
+#define EMAC_ECNTRL_SLEEP 0x00000008
+#define EMAC_ECNTRL_SPEED 0x00000020
+#define EMAC_ECNTRL_DBSWAP 0x00000100
+
+#define EMAC_X_WMRK_STRFWD 0x00000100
+
+#define EMAC_X_DES_ACTIVE_TDAR 0x01000000
+#define EMAC_R_DES_ACTIVE_RDAR 0x01000000
+
+#define EMAC_RX_SECTION_EMPTY_V 0x00010006
+/*
+ * The possible operating speeds of the MAC, currently supporting 10, 100 and
+ * 1000Mb modes.
+ */
+enum mac_speed {SPEED_10M, SPEED_100M, SPEED_1000M, SPEED_1000M_PCS};
+
+/* MII-related definitios */
+#define EMAC_MII_DATA_ST 0x40000000 /* Start of frame delimiter */
+#define EMAC_MII_DATA_OP_RD 0x20000000 /* Perform a read operation */
+#define EMAC_MII_DATA_OP_CL45_RD 0x30000000 /* Perform a read operation */
+#define EMAC_MII_DATA_OP_WR 0x10000000 /* Perform a write operation */
+#define EMAC_MII_DATA_OP_CL45_WR 0x10000000 /* Perform a write operation */
+#define EMAC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address field mask */
+#define EMAC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register field mask */
+#define EMAC_MII_DATA_TA 0x00020000 /* Turnaround */
+#define EMAC_MII_DATA_DATAMSK 0x0000ffff /* PHY data field */
+
+#define EMAC_MII_DATA_RA_SHIFT 18 /* MII Register address bits */
+#define EMAC_MII_DATA_RA_MASK 0x1F /* MII Register address mask */
+#define EMAC_MII_DATA_PA_SHIFT 23 /* MII PHY address bits */
+#define EMAC_MII_DATA_PA_MASK 0x1F /* MII PHY address mask */
+
+#define EMAC_MII_DATA_RA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
+ EMAC_MII_DATA_RA_SHIFT)
+#define EMAC_MII_DATA_PA(v) (((v) & EMAC_MII_DATA_RA_MASK) << \
+ EMAC_MII_DATA_PA_SHIFT)
+#define EMAC_MII_DATA(v) ((v) & 0xffff)
+
+#define EMAC_MII_SPEED_SHIFT 1
+#define EMAC_HOLDTIME_SHIFT 8
+#define EMAC_HOLDTIME_MASK 0x7
+#define EMAC_HOLDTIME(v) (((v) & EMAC_HOLDTIME_MASK) << \
+ EMAC_HOLDTIME_SHIFT)
+
+/*
+ * The Address organisation for the MAC device. All addresses are split into
+ * two 32-bit register fields. The first one (bottom) is the lower 32-bits of
+ * the address and the other field are the high order bits - this may be 16-bits
+ * in the case of MAC addresses, or 32-bits for the hash address.
+ * In terms of memory storage, the first item (bottom) is assumed to be at a
+ * lower address location than 'top'. i.e. top should be at address location of
+ * 'bottom' + 4 bytes.
+ */
+struct pfe_mac_addr {
+ u32 bottom; /* Lower 32-bits of address. */
+ u32 top; /* Upper 32-bits of address. */
+};
+
+/*
+ * The following is the organisation of the address filters section of the MAC
+ * registers. The Cadence MAC contains four possible specific address match
+ * addresses, if an incoming frame corresponds to any one of these four
+ * addresses then the frame will be copied to memory.
+ * It is not necessary for all four of the address match registers to be
+ * programmed, this is application dependent.
+ */
+struct spec_addr {
+ struct pfe_mac_addr one; /* Specific address register 1. */
+ struct pfe_mac_addr two; /* Specific address register 2. */
+ struct pfe_mac_addr three; /* Specific address register 3. */
+ struct pfe_mac_addr four; /* Specific address register 4. */
+};
+
+struct gemac_cfg {
+ u32 mode;
+ u32 speed;
+ u32 duplex;
+};
+
+/* EMAC Hash size */
+#define EMAC_HASH_REG_BITS 64
+
+#define EMAC_SPEC_ADDR_MAX 4
+
+#endif /* _EMAC_H_ */
diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
new file mode 100644
index 000000000000..7b295830f50b
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/gpi.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _GPI_H_
+#define _GPI_H_
+
+#define GPI_VERSION 0x00
+#define GPI_CTRL 0x04
+#define GPI_RX_CONFIG 0x08
+#define GPI_HDR_SIZE 0x0c
+#define GPI_BUF_SIZE 0x10
+#define GPI_LMEM_ALLOC_ADDR 0x14
+#define GPI_LMEM_FREE_ADDR 0x18
+#define GPI_DDR_ALLOC_ADDR 0x1c
+#define GPI_DDR_FREE_ADDR 0x20
+#define GPI_CLASS_ADDR 0x24
+#define GPI_DRX_FIFO 0x28
+#define GPI_TRX_FIFO 0x2c
+#define GPI_INQ_PKTPTR 0x30
+#define GPI_DDR_DATA_OFFSET 0x34
+#define GPI_LMEM_DATA_OFFSET 0x38
+#define GPI_TMLF_TX 0x4c
+#define GPI_DTX_ASEQ 0x50
+#define GPI_FIFO_STATUS 0x54
+#define GPI_FIFO_DEBUG 0x58
+#define GPI_TX_PAUSE_TIME 0x5c
+#define GPI_LMEM_SEC_BUF_DATA_OFFSET 0x60
+#define GPI_DDR_SEC_BUF_DATA_OFFSET 0x64
+#define GPI_TOE_CHKSUM_EN 0x68
+#define GPI_OVERRUN_DROPCNT 0x6c
+#define GPI_CSR_MTIP_PAUSE_REG 0x74
+#define GPI_CSR_MTIP_PAUSE_QUANTUM 0x78
+#define GPI_CSR_RX_CNT 0x7c
+#define GPI_CSR_TX_CNT 0x80
+#define GPI_CSR_DEBUG1 0x84
+#define GPI_CSR_DEBUG2 0x88
+
+struct gpi_cfg {
+ u32 lmem_rtry_cnt;
+ u32 tmlf_txthres;
+ u32 aseq_len;
+ u32 mtip_pause_reg;
+};
+
+/* GPI commons defines */
+#define GPI_LMEM_BUF_EN 0x1
+#define GPI_DDR_BUF_EN 0x1
+
+/* EGPI 1 defines */
+#define EGPI1_LMEM_RTRY_CNT 0x40
+#define EGPI1_TMLF_TXTHRES 0xBC
+#define EGPI1_ASEQ_LEN 0x50
+
+/* EGPI 2 defines */
+#define EGPI2_LMEM_RTRY_CNT 0x40
+#define EGPI2_TMLF_TXTHRES 0xBC
+#define EGPI2_ASEQ_LEN 0x40
+
+/* EGPI 3 defines */
+#define EGPI3_LMEM_RTRY_CNT 0x40
+#define EGPI3_TMLF_TXTHRES 0xBC
+#define EGPI3_ASEQ_LEN 0x40
+
+/* HGPI defines */
+#define HGPI_LMEM_RTRY_CNT 0x40
+#define HGPI_TMLF_TXTHRES 0xBC
+#define HGPI_ASEQ_LEN 0x40
+
+#define EGPI_PAUSE_TIME 0x000007D0
+#define EGPI_PAUSE_ENABLE 0x40000000
+#endif /* _GPI_H_ */
diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
new file mode 100644
index 000000000000..71cf81a7910c
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HIF_H_
+#define _HIF_H_
+
+/* @file hif.h.
+ * hif - PFE hif block control and status register.
+ * Mapped on CBUS and accessible from all PE's and ARM.
+ */
+#define HIF_VERSION (HIF_BASE_ADDR + 0x00)
+#define HIF_TX_CTRL (HIF_BASE_ADDR + 0x04)
+#define HIF_TX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x08)
+#define HIF_TX_ALLOC (HIF_BASE_ADDR + 0x0c)
+#define HIF_TX_BDP_ADDR (HIF_BASE_ADDR + 0x10)
+#define HIF_TX_STATUS (HIF_BASE_ADDR + 0x14)
+#define HIF_RX_CTRL (HIF_BASE_ADDR + 0x20)
+#define HIF_RX_BDP_ADDR (HIF_BASE_ADDR + 0x24)
+#define HIF_RX_STATUS (HIF_BASE_ADDR + 0x30)
+#define HIF_INT_SRC (HIF_BASE_ADDR + 0x34)
+#define HIF_INT_ENABLE (HIF_BASE_ADDR + 0x38)
+#define HIF_POLL_CTRL (HIF_BASE_ADDR + 0x3c)
+#define HIF_RX_CURR_BD_ADDR (HIF_BASE_ADDR + 0x40)
+#define HIF_RX_ALLOC (HIF_BASE_ADDR + 0x44)
+#define HIF_TX_DMA_STATUS (HIF_BASE_ADDR + 0x48)
+#define HIF_RX_DMA_STATUS (HIF_BASE_ADDR + 0x4c)
+#define HIF_INT_COAL (HIF_BASE_ADDR + 0x50)
+
+/* HIF_INT_SRC/ HIF_INT_ENABLE control bits */
+#define HIF_INT BIT(0)
+#define HIF_RXBD_INT BIT(1)
+#define HIF_RXPKT_INT BIT(2)
+#define HIF_TXBD_INT BIT(3)
+#define HIF_TXPKT_INT BIT(4)
+
+/* HIF_TX_CTRL bits */
+#define HIF_CTRL_DMA_EN BIT(0)
+#define HIF_CTRL_BDP_POLL_CTRL_EN BIT(1)
+#define HIF_CTRL_BDP_CH_START_WSTB BIT(2)
+
+/* HIF_RX_STATUS bits */
+#define BDP_CSR_RX_DMA_ACTV BIT(16)
+
+/* HIF_INT_ENABLE bits */
+#define HIF_INT_EN BIT(0)
+#define HIF_RXBD_INT_EN BIT(1)
+#define HIF_RXPKT_INT_EN BIT(2)
+#define HIF_TXBD_INT_EN BIT(3)
+#define HIF_TXPKT_INT_EN BIT(4)
+
+/* HIF_POLL_CTRL bits*/
+#define HIF_RX_POLL_CTRL_CYCLE 0x0400
+#define HIF_TX_POLL_CTRL_CYCLE 0x0400
+
+/* HIF_INT_COAL bits*/
+#define HIF_INT_COAL_ENABLE BIT(31)
+
+/* Buffer descriptor control bits */
+#define BD_CTRL_BUFLEN_MASK 0x3fff
+#define BD_BUF_LEN(x) ((x) & BD_CTRL_BUFLEN_MASK)
+#define BD_CTRL_CBD_INT_EN BIT(16)
+#define BD_CTRL_PKT_INT_EN BIT(17)
+#define BD_CTRL_LIFM BIT(18)
+#define BD_CTRL_LAST_BD BIT(19)
+#define BD_CTRL_DIR BIT(20)
+#define BD_CTRL_LMEM_CPY BIT(21) /* Valid only for HIF_NOCPY */
+#define BD_CTRL_PKT_XFER BIT(24)
+#define BD_CTRL_DESC_EN BIT(31)
+#define BD_CTRL_PARSE_DISABLE BIT(25)
+#define BD_CTRL_BRFETCH_DISABLE BIT(26)
+#define BD_CTRL_RTFETCH_DISABLE BIT(27)
+
+/* Buffer descriptor status bits*/
+#define BD_STATUS_CONN_ID(x) ((x) & 0xffff)
+#define BD_STATUS_DIR_PROC_ID BIT(16)
+#define BD_STATUS_CONN_ID_EN BIT(17)
+#define BD_STATUS_PE2PROC_ID(x) (((x) & 7) << 18)
+#define BD_STATUS_LE_DATA BIT(21)
+#define BD_STATUS_CHKSUM_EN BIT(22)
+
+/* HIF Buffer descriptor status bits */
+#define DIR_PROC_ID BIT(16)
+#define PROC_ID(id) ((id) << 18)
+
+#endif /* _HIF_H_ */
diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
new file mode 100644
index 000000000000..3d4d43ce9fe2
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/hif_nocpy.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _HIF_NOCPY_H_
+#define _HIF_NOCPY_H_
+
+#define HIF_NOCPY_VERSION (HIF_NOCPY_BASE_ADDR + 0x00)
+#define HIF_NOCPY_TX_CTRL (HIF_NOCPY_BASE_ADDR + 0x04)
+#define HIF_NOCPY_TX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x08)
+#define HIF_NOCPY_TX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x0c)
+#define HIF_NOCPY_TX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x10)
+#define HIF_NOCPY_TX_STATUS (HIF_NOCPY_BASE_ADDR + 0x14)
+#define HIF_NOCPY_RX_CTRL (HIF_NOCPY_BASE_ADDR + 0x20)
+#define HIF_NOCPY_RX_BDP_ADDR (HIF_NOCPY_BASE_ADDR + 0x24)
+#define HIF_NOCPY_RX_STATUS (HIF_NOCPY_BASE_ADDR + 0x30)
+#define HIF_NOCPY_INT_SRC (HIF_NOCPY_BASE_ADDR + 0x34)
+#define HIF_NOCPY_INT_ENABLE (HIF_NOCPY_BASE_ADDR + 0x38)
+#define HIF_NOCPY_POLL_CTRL (HIF_NOCPY_BASE_ADDR + 0x3c)
+#define HIF_NOCPY_RX_CURR_BD_ADDR (HIF_NOCPY_BASE_ADDR + 0x40)
+#define HIF_NOCPY_RX_ALLOC (HIF_NOCPY_BASE_ADDR + 0x44)
+#define HIF_NOCPY_TX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x48)
+#define HIF_NOCPY_RX_DMA_STATUS (HIF_NOCPY_BASE_ADDR + 0x4c)
+#define HIF_NOCPY_RX_INQ0_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x50)
+#define HIF_NOCPY_RX_INQ1_PKTPTR (HIF_NOCPY_BASE_ADDR + 0x54)
+#define HIF_NOCPY_TX_PORT_NO (HIF_NOCPY_BASE_ADDR + 0x60)
+#define HIF_NOCPY_LMEM_ALLOC_ADDR (HIF_NOCPY_BASE_ADDR + 0x64)
+#define HIF_NOCPY_CLASS_ADDR (HIF_NOCPY_BASE_ADDR + 0x68)
+#define HIF_NOCPY_TMU_PORT0_ADDR (HIF_NOCPY_BASE_ADDR + 0x70)
+#define HIF_NOCPY_TMU_PORT1_ADDR (HIF_NOCPY_BASE_ADDR + 0x74)
+#define HIF_NOCPY_TMU_PORT2_ADDR (HIF_NOCPY_BASE_ADDR + 0x7c)
+#define HIF_NOCPY_TMU_PORT3_ADDR (HIF_NOCPY_BASE_ADDR + 0x80)
+#define HIF_NOCPY_TMU_PORT4_ADDR (HIF_NOCPY_BASE_ADDR + 0x84)
+#define HIF_NOCPY_INT_COAL (HIF_NOCPY_BASE_ADDR + 0x90)
+
+#endif /* _HIF_NOCPY_H_ */
diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
new file mode 100644
index 000000000000..05f3d681d1a4
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/tmu_csr.h
@@ -0,0 +1,168 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _TMU_CSR_H_
+#define _TMU_CSR_H_
+
+#define TMU_VERSION (TMU_CSR_BASE_ADDR + 0x000)
+#define TMU_INQ_WATERMARK (TMU_CSR_BASE_ADDR + 0x004)
+#define TMU_PHY_INQ_PKTPTR (TMU_CSR_BASE_ADDR + 0x008)
+#define TMU_PHY_INQ_PKTINFO (TMU_CSR_BASE_ADDR + 0x00c)
+#define TMU_PHY_INQ_FIFO_CNT (TMU_CSR_BASE_ADDR + 0x010)
+#define TMU_SYS_GENERIC_CONTROL (TMU_CSR_BASE_ADDR + 0x014)
+#define TMU_SYS_GENERIC_STATUS (TMU_CSR_BASE_ADDR + 0x018)
+#define TMU_SYS_GEN_CON0 (TMU_CSR_BASE_ADDR + 0x01c)
+#define TMU_SYS_GEN_CON1 (TMU_CSR_BASE_ADDR + 0x020)
+#define TMU_SYS_GEN_CON2 (TMU_CSR_BASE_ADDR + 0x024)
+#define TMU_SYS_GEN_CON3 (TMU_CSR_BASE_ADDR + 0x028)
+#define TMU_SYS_GEN_CON4 (TMU_CSR_BASE_ADDR + 0x02c)
+#define TMU_TEQ_DISABLE_DROPCHK (TMU_CSR_BASE_ADDR + 0x030)
+#define TMU_TEQ_CTRL (TMU_CSR_BASE_ADDR + 0x034)
+#define TMU_TEQ_QCFG (TMU_CSR_BASE_ADDR + 0x038)
+#define TMU_TEQ_DROP_STAT (TMU_CSR_BASE_ADDR + 0x03c)
+#define TMU_TEQ_QAVG (TMU_CSR_BASE_ADDR + 0x040)
+#define TMU_TEQ_WREG_PROB (TMU_CSR_BASE_ADDR + 0x044)
+#define TMU_TEQ_TRANS_STAT (TMU_CSR_BASE_ADDR + 0x048)
+#define TMU_TEQ_HW_PROB_CFG0 (TMU_CSR_BASE_ADDR + 0x04c)
+#define TMU_TEQ_HW_PROB_CFG1 (TMU_CSR_BASE_ADDR + 0x050)
+#define TMU_TEQ_HW_PROB_CFG2 (TMU_CSR_BASE_ADDR + 0x054)
+#define TMU_TEQ_HW_PROB_CFG3 (TMU_CSR_BASE_ADDR + 0x058)
+#define TMU_TEQ_HW_PROB_CFG4 (TMU_CSR_BASE_ADDR + 0x05c)
+#define TMU_TEQ_HW_PROB_CFG5 (TMU_CSR_BASE_ADDR + 0x060)
+#define TMU_TEQ_HW_PROB_CFG6 (TMU_CSR_BASE_ADDR + 0x064)
+#define TMU_TEQ_HW_PROB_CFG7 (TMU_CSR_BASE_ADDR + 0x068)
+#define TMU_TEQ_HW_PROB_CFG8 (TMU_CSR_BASE_ADDR + 0x06c)
+#define TMU_TEQ_HW_PROB_CFG9 (TMU_CSR_BASE_ADDR + 0x070)
+#define TMU_TEQ_HW_PROB_CFG10 (TMU_CSR_BASE_ADDR + 0x074)
+#define TMU_TEQ_HW_PROB_CFG11 (TMU_CSR_BASE_ADDR + 0x078)
+#define TMU_TEQ_HW_PROB_CFG12 (TMU_CSR_BASE_ADDR + 0x07c)
+#define TMU_TEQ_HW_PROB_CFG13 (TMU_CSR_BASE_ADDR + 0x080)
+#define TMU_TEQ_HW_PROB_CFG14 (TMU_CSR_BASE_ADDR + 0x084)
+#define TMU_TEQ_HW_PROB_CFG15 (TMU_CSR_BASE_ADDR + 0x088)
+#define TMU_TEQ_HW_PROB_CFG16 (TMU_CSR_BASE_ADDR + 0x08c)
+#define TMU_TEQ_HW_PROB_CFG17 (TMU_CSR_BASE_ADDR + 0x090)
+#define TMU_TEQ_HW_PROB_CFG18 (TMU_CSR_BASE_ADDR + 0x094)
+#define TMU_TEQ_HW_PROB_CFG19 (TMU_CSR_BASE_ADDR + 0x098)
+#define TMU_TEQ_HW_PROB_CFG20 (TMU_CSR_BASE_ADDR + 0x09c)
+#define TMU_TEQ_HW_PROB_CFG21 (TMU_CSR_BASE_ADDR + 0x0a0)
+#define TMU_TEQ_HW_PROB_CFG22 (TMU_CSR_BASE_ADDR + 0x0a4)
+#define TMU_TEQ_HW_PROB_CFG23 (TMU_CSR_BASE_ADDR + 0x0a8)
+#define TMU_TEQ_HW_PROB_CFG24 (TMU_CSR_BASE_ADDR + 0x0ac)
+#define TMU_TEQ_HW_PROB_CFG25 (TMU_CSR_BASE_ADDR + 0x0b0)
+#define TMU_TDQ_IIFG_CFG (TMU_CSR_BASE_ADDR + 0x0b4)
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
+ * This is a global Enable for all schedulers in PHY0
+ */
+#define TMU_TDQ0_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x0b8)
+
+#define TMU_LLM_CTRL (TMU_CSR_BASE_ADDR + 0x0bc)
+#define TMU_LLM_BASE_ADDR (TMU_CSR_BASE_ADDR + 0x0c0)
+#define TMU_LLM_QUE_LEN (TMU_CSR_BASE_ADDR + 0x0c4)
+#define TMU_LLM_QUE_HEADPTR (TMU_CSR_BASE_ADDR + 0x0c8)
+#define TMU_LLM_QUE_TAILPTR (TMU_CSR_BASE_ADDR + 0x0cc)
+#define TMU_LLM_QUE_DROPCNT (TMU_CSR_BASE_ADDR + 0x0d0)
+#define TMU_INT_EN (TMU_CSR_BASE_ADDR + 0x0d4)
+#define TMU_INT_SRC (TMU_CSR_BASE_ADDR + 0x0d8)
+#define TMU_INQ_STAT (TMU_CSR_BASE_ADDR + 0x0dc)
+#define TMU_CTRL (TMU_CSR_BASE_ADDR + 0x0e0)
+
+/* [31] Mem Access Command. 0 = Internal Memory Read, 1 = Internal memory
+ * Write [27:24] Byte Enables of the Internal memory access [23:0] Address of
+ * the internal memory. This address is used to access both the PM and DM of
+ * all the PE's
+ */
+#define TMU_MEM_ACCESS_ADDR (TMU_CSR_BASE_ADDR + 0x0e4)
+
+/* Internal Memory Access Write Data */
+#define TMU_MEM_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x0e8)
+/* Internal Memory Access Read Data. The commands are blocked
+ * at the mem_access only
+ */
+#define TMU_MEM_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x0ec)
+
+/* [31:0] PHY0 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY0_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f0)
+/* [31:0] PHY1 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY1_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f4)
+/* [31:0] PHY2 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY2_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0f8)
+/* [31:0] PHY3 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY3_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x0fc)
+#define TMU_BMU_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x100)
+#define TMU_TX_CTRL (TMU_CSR_BASE_ADDR + 0x104)
+
+#define TMU_BUS_ACCESS_WDATA (TMU_CSR_BASE_ADDR + 0x108)
+#define TMU_BUS_ACCESS (TMU_CSR_BASE_ADDR + 0x10c)
+#define TMU_BUS_ACCESS_RDATA (TMU_CSR_BASE_ADDR + 0x110)
+
+#define TMU_PE_SYS_CLK_RATIO (TMU_CSR_BASE_ADDR + 0x114)
+#define TMU_PE_STATUS (TMU_CSR_BASE_ADDR + 0x118)
+#define TMU_TEQ_MAX_THRESHOLD (TMU_CSR_BASE_ADDR + 0x11c)
+/* [31:0] PHY4 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY4_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x134)
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
+ * This is a global Enable for all schedulers in PHY1
+ */
+#define TMU_TDQ1_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x138)
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
+ * This is a global Enable for all schedulers in PHY2
+ */
+#define TMU_TDQ2_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x13c)
+/* [9:0] Scheduler Enable for each of the scheduler in the TDQ.
+ * This is a global Enable for all schedulers in PHY3
+ */
+#define TMU_TDQ3_SCH_CTRL (TMU_CSR_BASE_ADDR + 0x140)
+#define TMU_BMU_BUF_SIZE (TMU_CSR_BASE_ADDR + 0x144)
+/* [31:0] PHY5 in queue address (must be initialized with one of the
+ * xxx_INQ_PKTPTR cbus addresses)
+ */
+#define TMU_PHY5_INQ_ADDR (TMU_CSR_BASE_ADDR + 0x148)
+
+#define SW_RESET BIT(0) /* Global software reset */
+#define INQ_RESET BIT(2)
+#define TEQ_RESET BIT(3)
+#define TDQ_RESET BIT(4)
+#define PE_RESET BIT(5)
+#define MEM_INIT BIT(6)
+#define MEM_INIT_DONE BIT(7)
+#define LLM_INIT BIT(8)
+#define LLM_INIT_DONE BIT(9)
+#define ECC_MEM_INIT_DONE BIT(10)
+
+struct tmu_cfg {
+ u32 pe_sys_clk_ratio;
+ unsigned long llm_base_addr;
+ u32 llm_queue_len;
+};
+
+/* Not HW related for pfe_ctrl / pfe common defines */
+#define DEFAULT_MAX_QDEPTH 80
+#define DEFAULT_Q0_QDEPTH 511 /*We keep one large queue for host tx qos */
+#define DEFAULT_TMU3_QDEPTH 127
+
+#endif /* _TMU_CSR_H_ */
diff --git a/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
new file mode 100644
index 000000000000..ae623cdafe7b
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/cbus/util_csr.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _UTIL_CSR_H_
+#define _UTIL_CSR_H_
+
+#define UTIL_VERSION (UTIL_CSR_BASE_ADDR + 0x000)
+#define UTIL_TX_CTRL (UTIL_CSR_BASE_ADDR + 0x004)
+#define UTIL_INQ_PKTPTR (UTIL_CSR_BASE_ADDR + 0x010)
+
+#define UTIL_HDR_SIZE (UTIL_CSR_BASE_ADDR + 0x014)
+
+#define UTIL_PE0_QB_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x020)
+#define UTIL_PE0_QB_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x024)
+#define UTIL_PE0_RO_DM_ADDR0 (UTIL_CSR_BASE_ADDR + 0x060)
+#define UTIL_PE0_RO_DM_ADDR1 (UTIL_CSR_BASE_ADDR + 0x064)
+
+#define UTIL_MEM_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x100)
+#define UTIL_MEM_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x104)
+#define UTIL_MEM_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x108)
+
+#define UTIL_TM_INQ_ADDR (UTIL_CSR_BASE_ADDR + 0x114)
+#define UTIL_PE_STATUS (UTIL_CSR_BASE_ADDR + 0x118)
+
+#define UTIL_PE_SYS_CLK_RATIO (UTIL_CSR_BASE_ADDR + 0x200)
+#define UTIL_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x204)
+#define UTIL_GAP_BETWEEN_READS (UTIL_CSR_BASE_ADDR + 0x208)
+#define UTIL_MAX_BUF_CNT (UTIL_CSR_BASE_ADDR + 0x20c)
+#define UTIL_TSQ_FIFO_THRES (UTIL_CSR_BASE_ADDR + 0x210)
+#define UTIL_TSQ_MAX_CNT (UTIL_CSR_BASE_ADDR + 0x214)
+#define UTIL_IRAM_DATA_0 (UTIL_CSR_BASE_ADDR + 0x218)
+#define UTIL_IRAM_DATA_1 (UTIL_CSR_BASE_ADDR + 0x21c)
+#define UTIL_IRAM_DATA_2 (UTIL_CSR_BASE_ADDR + 0x220)
+#define UTIL_IRAM_DATA_3 (UTIL_CSR_BASE_ADDR + 0x224)
+
+#define UTIL_BUS_ACCESS_ADDR (UTIL_CSR_BASE_ADDR + 0x228)
+#define UTIL_BUS_ACCESS_WDATA (UTIL_CSR_BASE_ADDR + 0x22c)
+#define UTIL_BUS_ACCESS_RDATA (UTIL_CSR_BASE_ADDR + 0x230)
+
+#define UTIL_INQ_AFULL_THRES (UTIL_CSR_BASE_ADDR + 0x234)
+
+struct util_cfg {
+ u32 pe_sys_clk_ratio;
+};
+
+#endif /* _UTIL_CSR_H_ */
diff --git a/drivers/staging/fsl_ppfe/include/pfe/pfe.h b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
new file mode 100644
index 000000000000..c5b5ce31a122
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/include/pfe/pfe.h
@@ -0,0 +1,372 @@
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _PFE_H_
+#define _PFE_H_
+
+#include "cbus.h"
+
+#define CLASS_DMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
+/*
+ * Only valid for mem access register interface
+ */
+#define CLASS_IMEM_BASE_ADDR(i) (0x00000000 | ((i) << 20))
+#define CLASS_DMEM_SIZE 0x00002000
+#define CLASS_IMEM_SIZE 0x00008000
+
+#define TMU_DMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
+/*
+ * Only valid for mem access register interface
+ */
+#define TMU_IMEM_BASE_ADDR(i) (0x00000000 + ((i) << 20))
+#define TMU_DMEM_SIZE 0x00000800
+#define TMU_IMEM_SIZE 0x00002000
+
+#define UTIL_DMEM_BASE_ADDR 0x00000000
+#define UTIL_DMEM_SIZE 0x00002000
+
+#define PE_LMEM_BASE_ADDR 0xc3010000
+#define PE_LMEM_SIZE 0x8000
+#define PE_LMEM_END (PE_LMEM_BASE_ADDR + PE_LMEM_SIZE)
+
+#define DMEM_BASE_ADDR 0x00000000
+#define DMEM_SIZE 0x2000 /* TMU has less... */
+#define DMEM_END (DMEM_BASE_ADDR + DMEM_SIZE)
+
+#define PMEM_BASE_ADDR 0x00010000
+#define PMEM_SIZE 0x8000 /* TMU has less... */
+#define PMEM_END (PMEM_BASE_ADDR + PMEM_SIZE)
+
+/* These check memory ranges from PE point of view/memory map */
+#define IS_DMEM(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= DMEM_BASE_ADDR) && \
+ (((unsigned long)(addr_) + (len)) <= DMEM_END); })
+
+#define IS_PMEM(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= PMEM_BASE_ADDR) && \
+ (((unsigned long)(addr_) + (len)) <= PMEM_END); })
+
+#define IS_PE_LMEM(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= \
+ PE_LMEM_BASE_ADDR) && \
+ (((unsigned long)(addr_) + \
+ (len)) <= PE_LMEM_END); })
+
+#define IS_PFE_LMEM(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= \
+ CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR)) && \
+ (((unsigned long)(addr_) + (len)) <= \
+ CBUS_VIRT_TO_PFE(LMEM_END)); })
+
+#define __IS_PHYS_DDR(addr, len) \
+ ({ typeof(addr) addr_ = (addr); \
+ ((unsigned long)(addr_) >= \
+ DDR_PHYS_BASE_ADDR) && \
+ (((unsigned long)(addr_) + (len)) <= \
+ DDR_PHYS_END); })
+
+#define IS_PHYS_DDR(addr, len) __IS_PHYS_DDR(DDR_PFE_TO_PHYS(addr), len)
+
+/*
+ * If using a run-time virtual address for the cbus base address use this code
+ */
+extern void *cbus_base_addr;
+extern void *ddr_base_addr;
+extern unsigned long ddr_phys_base_addr;
+extern unsigned int ddr_size;
+
+#define CBUS_BASE_ADDR cbus_base_addr
+#define DDR_PHYS_BASE_ADDR ddr_phys_base_addr
+#define DDR_BASE_ADDR ddr_base_addr
+#define DDR_SIZE ddr_size
+
+#define DDR_PHYS_END (DDR_PHYS_BASE_ADDR + DDR_SIZE)
+
+#define LS1012A_PFE_RESET_WA /*
+ * PFE doesn't have global reset and re-init
+ * should takecare few things to make PFE
+ * functional after reset
+ */
+#define PFE_CBUS_PHYS_BASE_ADDR 0xc0000000 /* CBUS physical base address
+ * as seen by PE's.
+ */
+/* CBUS physical base address as seen by PE's. */
+#define PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE 0xc0000000
+
+#define DDR_PHYS_TO_PFE(p) (((unsigned long int)(p)) & 0x7FFFFFFF)
+#define DDR_PFE_TO_PHYS(p) (((unsigned long int)(p)) | 0x80000000)
+#define CBUS_PHYS_TO_PFE(p) (((p) - PFE_CBUS_PHYS_BASE_ADDR) + \
+ PFE_CBUS_PHYS_BASE_ADDR_FROM_PFE)
+/* Translates to PFE address map */
+
+#define DDR_PHYS_TO_VIRT(p) (((p) - DDR_PHYS_BASE_ADDR) + DDR_BASE_ADDR)
+#define DDR_VIRT_TO_PHYS(v) (((v) - DDR_BASE_ADDR) + DDR_PHYS_BASE_ADDR)
+#define DDR_VIRT_TO_PFE(p) (DDR_PHYS_TO_PFE(DDR_VIRT_TO_PHYS(p)))
+
+#define CBUS_VIRT_TO_PFE(v) (((v) - CBUS_BASE_ADDR) + \
+ PFE_CBUS_PHYS_BASE_ADDR)
+#define CBUS_PFE_TO_VIRT(p) (((unsigned long int)(p) - \
+ PFE_CBUS_PHYS_BASE_ADDR) + CBUS_BASE_ADDR)
+
+/* The below part of the code is used in QOS control driver from host */
+#define TMU_APB_BASE_ADDR 0xc1000000 /* TMU base address seen by
+ * pe's
+ */
+
+enum {
+ CLASS0_ID = 0,
+ CLASS1_ID,
+ CLASS2_ID,
+ CLASS3_ID,
+ CLASS4_ID,
+ CLASS5_ID,
+ TMU0_ID,
+ TMU1_ID,
+ TMU2_ID,
+ TMU3_ID,
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ UTIL_ID,
+#endif
+ MAX_PE
+};
+
+#define CLASS_MASK (BIT(CLASS0_ID) | BIT(CLASS1_ID) |\
+ BIT(CLASS2_ID) | BIT(CLASS3_ID) |\
+ BIT(CLASS4_ID) | BIT(CLASS5_ID))
+#define CLASS_MAX_ID CLASS5_ID
+
+#define TMU_MASK (BIT(TMU0_ID) | BIT(TMU1_ID) |\
+ BIT(TMU3_ID))
+
+#define TMU_MAX_ID TMU3_ID
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+#define UTIL_MASK BIT(UTIL_ID)
+#endif
+
+struct pe_status {
+ u32 cpu_state;
+ u32 activity_counter;
+ u32 rx;
+ union {
+ u32 tx;
+ u32 tmu_qstatus;
+ };
+ u32 drop;
+#if defined(CFG_PE_DEBUG)
+ u32 debug_indicator;
+ u32 debug[16];
+#endif
+} __aligned(16);
+
+struct pe_sync_mailbox {
+ u32 stop;
+ u32 stopped;
+};
+
+/* Drop counter definitions */
+
+#define CLASS_NUM_DROP_COUNTERS 13
+#define UTIL_NUM_DROP_COUNTERS 8
+
+/* PE information.
+ * Structure containing PE's specific information. It is used to create
+ * generic C functions common to all PE's.
+ * Before using the library functions this structure needs to be initialized
+ * with the different registers virtual addresses
+ * (according to the ARM MMU mmaping). The default initialization supports a
+ * virtual == physical mapping.
+ */
+struct pe_info {
+ u32 dmem_base_addr; /* PE's dmem base address */
+ u32 pmem_base_addr; /* PE's pmem base address */
+ u32 pmem_size; /* PE's pmem size */
+
+ void *mem_access_wdata; /* PE's _MEM_ACCESS_WDATA register
+ * address
+ */
+ void *mem_access_addr; /* PE's _MEM_ACCESS_ADDR register
+ * address
+ */
+ void *mem_access_rdata; /* PE's _MEM_ACCESS_RDATA register
+ * address
+ */
+};
+
+void pe_lmem_read(u32 *dst, u32 len, u32 offset);
+void pe_lmem_write(u32 *src, u32 len, u32 offset);
+
+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len);
+
+u32 pe_pmem_read(int id, u32 addr, u8 size);
+
+void pe_dmem_write(int id, u32 val, u32 addr, u8 size);
+u32 pe_dmem_read(int id, u32 addr, u8 size);
+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len);
+void class_pe_lmem_memset(u32 dst, int val, unsigned int len);
+void class_bus_write(u32 val, u32 addr, u8 size);
+u32 class_bus_read(u32 addr, u8 size);
+
+#define class_bus_readl(addr) class_bus_read(addr, 4)
+#define class_bus_readw(addr) class_bus_read(addr, 2)
+#define class_bus_readb(addr) class_bus_read(addr, 1)
+
+#define class_bus_writel(val, addr) class_bus_write(val, addr, 4)
+#define class_bus_writew(val, addr) class_bus_write(val, addr, 2)
+#define class_bus_writeb(val, addr) class_bus_write(val, addr, 1)
+
+#define pe_dmem_readl(id, addr) pe_dmem_read(id, addr, 4)
+#define pe_dmem_readw(id, addr) pe_dmem_read(id, addr, 2)
+#define pe_dmem_readb(id, addr) pe_dmem_read(id, addr, 1)
+
+#define pe_dmem_writel(id, val, addr) pe_dmem_write(id, val, addr, 4)
+#define pe_dmem_writew(id, val, addr) pe_dmem_write(id, val, addr, 2)
+#define pe_dmem_writeb(id, val, addr) pe_dmem_write(id, val, addr, 1)
+
+/*int pe_load_elf_section(int id, const void *data, elf32_shdr *shdr); */
+int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
+ struct device *dev);
+
+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
+ unsigned int ddr_size);
+void bmu_init(void *base, struct BMU_CFG *cfg);
+void bmu_reset(void *base);
+void bmu_enable(void *base);
+void bmu_disable(void *base);
+void bmu_set_config(void *base, struct BMU_CFG *cfg);
+
+/*
+ * An enumerated type for loopback values. This can be one of three values, no
+ * loopback -normal operation, local loopback with internal loopback module of
+ * MAC or PHY loopback which is through the external PHY.
+ */
+#ifndef __MAC_LOOP_ENUM__
+#define __MAC_LOOP_ENUM__
+enum mac_loop {LB_NONE, LB_EXT, LB_LOCAL};
+#endif
+
+void gemac_init(void *base, void *config);
+void gemac_disable_rx_checksum_offload(void *base);
+void gemac_enable_rx_checksum_offload(void *base);
+void gemac_set_speed(void *base, enum mac_speed gem_speed);
+void gemac_set_duplex(void *base, int duplex);
+void gemac_set_mode(void *base, int mode);
+void gemac_enable(void *base);
+void gemac_tx_disable(void *base);
+void gemac_tx_enable(void *base);
+void gemac_disable(void *base);
+void gemac_reset(void *base);
+void gemac_set_address(void *base, struct spec_addr *addr);
+struct spec_addr gemac_get_address(void *base);
+void gemac_set_loop(void *base, enum mac_loop gem_loop);
+void gemac_set_laddr1(void *base, struct pfe_mac_addr *address);
+void gemac_set_laddr2(void *base, struct pfe_mac_addr *address);
+void gemac_set_laddr3(void *base, struct pfe_mac_addr *address);
+void gemac_set_laddr4(void *base, struct pfe_mac_addr *address);
+void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
+ unsigned int entry_index);
+void gemac_clear_laddr1(void *base);
+void gemac_clear_laddr2(void *base);
+void gemac_clear_laddr3(void *base);
+void gemac_clear_laddr4(void *base);
+void gemac_clear_laddrN(void *base, unsigned int entry_index);
+struct pfe_mac_addr gemac_get_hash(void *base);
+void gemac_set_hash(void *base, struct pfe_mac_addr *hash);
+struct pfe_mac_addr gem_get_laddr1(void *base);
+struct pfe_mac_addr gem_get_laddr2(void *base);
+struct pfe_mac_addr gem_get_laddr3(void *base);
+struct pfe_mac_addr gem_get_laddr4(void *base);
+struct pfe_mac_addr gem_get_laddrN(void *base, unsigned int entry_index);
+void gemac_set_config(void *base, struct gemac_cfg *cfg);
+void gemac_allow_broadcast(void *base);
+void gemac_no_broadcast(void *base);
+void gemac_enable_1536_rx(void *base);
+void gemac_disable_1536_rx(void *base);
+void gemac_set_rx_max_fl(void *base, int mtu);
+void gemac_enable_rx_jmb(void *base);
+void gemac_disable_rx_jmb(void *base);
+void gemac_enable_stacked_vlan(void *base);
+void gemac_disable_stacked_vlan(void *base);
+void gemac_enable_pause_rx(void *base);
+void gemac_disable_pause_rx(void *base);
+void gemac_enable_copy_all(void *base);
+void gemac_disable_copy_all(void *base);
+void gemac_set_bus_width(void *base, int width);
+void gemac_set_wol(void *base, u32 wol_conf);
+
+void gpi_init(void *base, struct gpi_cfg *cfg);
+void gpi_reset(void *base);
+void gpi_enable(void *base);
+void gpi_disable(void *base);
+void gpi_set_config(void *base, struct gpi_cfg *cfg);
+
+void class_init(struct class_cfg *cfg);
+void class_reset(void);
+void class_enable(void);
+void class_disable(void);
+void class_set_config(struct class_cfg *cfg);
+
+void tmu_reset(void);
+void tmu_init(struct tmu_cfg *cfg);
+void tmu_enable(u32 pe_mask);
+void tmu_disable(u32 pe_mask);
+u32 tmu_qstatus(u32 if_id);
+u32 tmu_pkts_processed(u32 if_id);
+
+void util_init(struct util_cfg *cfg);
+void util_reset(void);
+void util_enable(void);
+void util_disable(void);
+
+void hif_init(void);
+void hif_tx_enable(void);
+void hif_tx_disable(void);
+void hif_rx_enable(void);
+void hif_rx_disable(void);
+
+/* Get Chip Revision level
+ *
+ */
+static inline unsigned int CHIP_REVISION(void)
+{
+ /*For LS1012A return always 1 */
+ return 1;
+}
+
+/* Start HIF rx DMA
+ *
+ */
+static inline void hif_rx_dma_start(void)
+{
+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_RX_CTRL);
+}
+
+/* Start HIF tx DMA
+ *
+ */
+static inline void hif_tx_dma_start(void)
+{
+ writel(HIF_CTRL_DMA_EN | HIF_CTRL_BDP_CH_START_WSTB, HIF_TX_CTRL);
+}
+
+#endif /* _PFE_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_cdev.c b/drivers/staging/fsl_ppfe/pfe_cdev.c
new file mode 100644
index 000000000000..a8ce95a4aced
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_cdev.c
@@ -0,0 +1,258 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2018 NXP
+ */
+
+/* @pfe_cdev.c.
+ * Dummy device representing the PFE US in userspace.
+ * - used for interacting with the kernel layer for link status
+ */
+
+#include <linux/eventfd.h>
+#include <linux/irqreturn.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+
+#include "pfe_cdev.h"
+#include "pfe_mod.h"
+
+static int pfe_majno;
+static struct class *pfe_char_class;
+static struct device *pfe_char_dev;
+struct eventfd_ctx *g_trigger;
+
+struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
+
+static int pfe_cdev_open(struct inode *inp, struct file *fp)
+{
+ pr_debug("PFE CDEV device opened.\n");
+ return 0;
+}
+
+static ssize_t pfe_cdev_read(struct file *fp, char *buf,
+ size_t len, loff_t *off)
+{
+ int ret = 0;
+
+ pr_info("PFE CDEV attempt copying (%lu) size of user.\n",
+ sizeof(link_states));
+
+ pr_debug("Dump link_state on screen before copy_to_user\n");
+ for (; ret < PFE_CDEV_ETH_COUNT; ret++) {
+ pr_debug("%u %u", link_states[ret].phy_id,
+ link_states[ret].state);
+ pr_debug("\n");
+ }
+
+ /* Copy to user the value in buffer sized len */
+ ret = copy_to_user(buf, &link_states, sizeof(link_states));
+ if (ret != 0) {
+ pr_err("Failed to send (%d)bytes of (%lu) requested.\n",
+ ret, len);
+ return -EFAULT;
+ }
+
+ /* offset set back to 0 as there is contextual reading offset */
+ *off = 0;
+ pr_debug("Read of (%lu) bytes performed.\n", sizeof(link_states));
+
+ return sizeof(link_states);
+}
+
+/**
+ * This function is for getting some commands from user through non-IOCTL
+ * channel. It can used to configure the device.
+ * TODO: To be filled in future, if require duplex communication with user
+ * space.
+ */
+static ssize_t pfe_cdev_write(struct file *fp, const char *buf,
+ size_t len, loff_t *off)
+{
+ pr_info("PFE CDEV Write operation not supported!\n");
+
+ return -EFAULT;
+}
+
+static int pfe_cdev_release(struct inode *inp, struct file *fp)
+{
+ if (g_trigger) {
+ free_irq(pfe->hif_irq, g_trigger);
+ eventfd_ctx_put(g_trigger);
+ g_trigger = NULL;
+ }
+
+ pr_info("PFE_CDEV: Device successfully closed\n");
+ return 0;
+}
+
+/*
+ * hif_us_isr-
+ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
+ */
+static irqreturn_t hif_us_isr(int irq, void *arg)
+{
+ struct eventfd_ctx *trigger = (struct eventfd_ctx *)arg;
+ int int_status;
+ int int_enable_mask;
+
+ /*Read hif interrupt source register */
+ int_status = readl_relaxed(HIF_INT_SRC);
+ int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
+
+ if ((int_status & HIF_INT) == 0)
+ return IRQ_NONE;
+
+ if (int_status & HIF_RXPKT_INT) {
+ int_enable_mask &= ~(HIF_RXPKT_INT);
+ /* Disable interrupts, they will be enabled after
+ * they are serviced
+ */
+ writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
+
+ eventfd_signal(trigger, 1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#define PFE_INTR_COAL_USECS 100
+static long pfe_cdev_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EFAULT;
+ int __user *argp = (int __user *)arg;
+
+ pr_debug("PFE CDEV IOCTL Called with cmd=(%u)\n", cmd);
+
+ switch (cmd) {
+ case PFE_CDEV_ETH0_STATE_GET:
+ /* Return an unsigned int (link state) for ETH0 */
+ *argp = link_states[0].state;
+ pr_debug("Returning state=%d for ETH0\n", *argp);
+ ret = 0;
+ break;
+ case PFE_CDEV_ETH1_STATE_GET:
+ /* Return an unsigned int (link state) for ETH0 */
+ *argp = link_states[1].state;
+ pr_debug("Returning state=%d for ETH1\n", *argp);
+ ret = 0;
+ break;
+ case PFE_CDEV_HIF_INTR_EN:
+ /* Return success/failure */
+ g_trigger = eventfd_ctx_fdget(*argp);
+ if (IS_ERR(g_trigger))
+ return PTR_ERR(g_trigger);
+ ret = request_irq(pfe->hif_irq, hif_us_isr, 0, "pfe_hif",
+ g_trigger);
+ if (ret) {
+ pr_err("%s: failed to get the hif IRQ = %d\n",
+ __func__, pfe->hif_irq);
+ eventfd_ctx_put(g_trigger);
+ g_trigger = NULL;
+ }
+ writel((PFE_INTR_COAL_USECS * (pfe->ctrl.sys_clk / 1000)) |
+ HIF_INT_COAL_ENABLE, HIF_INT_COAL);
+
+ pr_debug("request_irq for hif interrupt: %d\n", pfe->hif_irq);
+ ret = 0;
+ break;
+ default:
+ pr_info("Unsupport cmd (%d) for PFE CDEV.\n", cmd);
+ break;
+ };
+
+ return ret;
+}
+
+static unsigned int pfe_cdev_poll(struct file *fp,
+ struct poll_table_struct *wait)
+{
+ pr_info("PFE CDEV poll method not supported\n");
+ return 0;
+}
+
+static const struct file_operations pfe_cdev_fops = {
+ .open = pfe_cdev_open,
+ .read = pfe_cdev_read,
+ .write = pfe_cdev_write,
+ .release = pfe_cdev_release,
+ .unlocked_ioctl = pfe_cdev_ioctl,
+ .poll = pfe_cdev_poll,
+};
+
+int pfe_cdev_init(void)
+{
+ int ret;
+
+ pr_debug("PFE CDEV initialization begin\n");
+
+ /* Register the major number for the device */
+ pfe_majno = register_chrdev(0, PFE_CDEV_NAME, &pfe_cdev_fops);
+ if (pfe_majno < 0) {
+ pr_err("Unable to register PFE CDEV. PFE CDEV not available\n");
+ ret = pfe_majno;
+ goto cleanup;
+ }
+
+ pr_debug("PFE CDEV assigned major number: %d\n", pfe_majno);
+
+ /* Register the class for the device */
+ pfe_char_class = class_create(THIS_MODULE, PFE_CLASS_NAME);
+ if (IS_ERR(pfe_char_class)) {
+ pr_err(
+ "Failed to init class for PFE CDEV. PFE CDEV not available.\n");
+ goto cleanup;
+ }
+
+ pr_debug("PFE CDEV Class created successfully.\n");
+
+ /* Create the device without any parent and without any callback data */
+ pfe_char_dev = device_create(pfe_char_class, NULL,
+ MKDEV(pfe_majno, 0), NULL,
+ PFE_CDEV_NAME);
+ if (IS_ERR(pfe_char_dev)) {
+ pr_err("Unable to PFE CDEV device. PFE CDEV not available.\n");
+ ret = PTR_ERR(pfe_char_dev);
+ goto cleanup;
+ }
+
+ /* Information structure being shared with the userspace */
+ memset(link_states, 0, sizeof(struct pfe_shared_info) *
+ PFE_CDEV_ETH_COUNT);
+
+ pr_info("PFE CDEV created: %s\n", PFE_CDEV_NAME);
+
+ ret = 0;
+ return ret;
+
+cleanup:
+ if (!IS_ERR(pfe_char_class))
+ class_destroy(pfe_char_class);
+
+ if (pfe_majno > 0)
+ unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
+
+ ret = -EFAULT;
+ return ret;
+}
+
+void pfe_cdev_exit(void)
+{
+ if (!IS_ERR(pfe_char_dev))
+ device_destroy(pfe_char_class, MKDEV(pfe_majno, 0));
+
+ if (!IS_ERR(pfe_char_class)) {
+ class_unregister(pfe_char_class);
+ class_destroy(pfe_char_class);
+ }
+
+ if (pfe_majno > 0)
+ unregister_chrdev(pfe_majno, PFE_CDEV_NAME);
+
+ /* reset the variables */
+ pfe_majno = 0;
+ pfe_char_class = NULL;
+ pfe_char_dev = NULL;
+
+ pr_info("PFE CDEV Removed.\n");
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_cdev.h b/drivers/staging/fsl_ppfe/pfe_cdev.h
new file mode 100644
index 000000000000..069ecaa7f0ca
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_cdev.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2018 NXP
+ */
+
+#ifndef _PFE_CDEV_H_
+#define _PFE_CDEV_H_
+
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+
+#define PFE_CDEV_NAME "pfe_us_cdev"
+#define PFE_CLASS_NAME "ppfe_us"
+
+/* Extracted from ls1012a_pfe_platform_data, there are 3 interfaces which are
+ * supported by PFE driver. Should be updated if number of eth devices are
+ * changed.
+ */
+#define PFE_CDEV_ETH_COUNT 3
+
+struct pfe_shared_info {
+ uint32_t phy_id; /* Link phy ID */
+ uint8_t state; /* Has either 0 or 1 */
+};
+
+extern struct pfe_shared_info link_states[PFE_CDEV_ETH_COUNT];
+
+/* IOCTL Commands */
+#define PFE_CDEV_ETH0_STATE_GET _IOR('R', 0, int)
+#define PFE_CDEV_ETH1_STATE_GET _IOR('R', 1, int)
+#define PFE_CDEV_HIF_INTR_EN _IOWR('R', 2, int)
+
+int pfe_cdev_init(void);
+void pfe_cdev_exit(void);
+
+#endif /* _PFE_CDEV_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_ctrl.c b/drivers/staging/fsl_ppfe/pfe_ctrl.c
new file mode 100644
index 000000000000..15c97ece75c5
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.c
@@ -0,0 +1,226 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+
+#include "pfe_mod.h"
+#include "pfe_ctrl.h"
+
+#define TIMEOUT_MS 1000
+
+int relax(unsigned long end)
+{
+ if (time_after(jiffies, end)) {
+ if (time_after(jiffies, end + (TIMEOUT_MS * HZ) / 1000))
+ return -1;
+
+ if (need_resched())
+ schedule();
+ }
+
+ return 0;
+}
+
+void pfe_ctrl_suspend(struct pfe_ctrl *ctrl)
+{
+ int id;
+
+ mutex_lock(&ctrl->mutex);
+
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++)
+ pe_dmem_write(id, cpu_to_be32(0x1), CLASS_DM_RESUME, 4);
+
+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
+ if (id == TMU2_ID)
+ continue;
+ pe_dmem_write(id, cpu_to_be32(0x1), TMU_DM_RESUME, 4);
+ }
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ pe_dmem_write(UTIL_ID, cpu_to_be32(0x1), UTIL_DM_RESUME, 4);
+#endif
+ mutex_unlock(&ctrl->mutex);
+}
+
+void pfe_ctrl_resume(struct pfe_ctrl *ctrl)
+{
+ int pe_mask = CLASS_MASK | TMU_MASK;
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ pe_mask |= UTIL_MASK;
+#endif
+ mutex_lock(&ctrl->mutex);
+ pe_start(&pfe->ctrl, pe_mask);
+ mutex_unlock(&ctrl->mutex);
+}
+
+/* PE sync stop.
+ * Stops packet processing for a list of PE's (specified using a bitmask).
+ * The caller must hold ctrl->mutex.
+ *
+ * @param ctrl Control context
+ * @param pe_mask Mask of PE id's to stop
+ *
+ */
+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask)
+{
+ struct pe_sync_mailbox *mbox;
+ int pe_stopped = 0;
+ unsigned long end = jiffies + 2;
+ int i;
+
+ pe_mask &= 0x2FF; /*Exclude Util + TMU2 */
+
+ for (i = 0; i < MAX_PE; i++)
+ if (pe_mask & (1 << i)) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ pe_dmem_write(i, cpu_to_be32(0x1), (unsigned
+ long)&mbox->stop, 4);
+ }
+
+ while (pe_stopped != pe_mask) {
+ for (i = 0; i < MAX_PE; i++)
+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ if (pe_dmem_read(i, (unsigned
+ long)&mbox->stopped, 4) &
+ cpu_to_be32(0x1))
+ pe_stopped |= (1 << i);
+ }
+
+ if (relax(end) < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
+
+ for (i = 0; i < MAX_PE; i++)
+ if (pe_mask & (1 << i)) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
+ long)&mbox->stop, 4);
+ }
+
+ return -EIO;
+}
+
+/* PE start.
+ * Starts packet processing for a list of PE's (specified using a bitmask).
+ * The caller must hold ctrl->mutex.
+ *
+ * @param ctrl Control context
+ * @param pe_mask Mask of PE id's to start
+ *
+ */
+void pe_start(struct pfe_ctrl *ctrl, int pe_mask)
+{
+ struct pe_sync_mailbox *mbox;
+ int i;
+
+ for (i = 0; i < MAX_PE; i++)
+ if (pe_mask & (1 << i)) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ pe_dmem_write(i, cpu_to_be32(0x0), (unsigned
+ long)&mbox->stop, 4);
+ }
+}
+
+/* This function will ensure all PEs are put in to idle state */
+int pe_reset_all(struct pfe_ctrl *ctrl)
+{
+ struct pe_sync_mailbox *mbox;
+ int pe_stopped = 0;
+ unsigned long end = jiffies + 2;
+ int i;
+ int pe_mask = CLASS_MASK | TMU_MASK;
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ pe_mask |= UTIL_MASK;
+#endif
+
+ for (i = 0; i < MAX_PE; i++)
+ if (pe_mask & (1 << i)) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ pe_dmem_write(i, cpu_to_be32(0x2), (unsigned
+ long)&mbox->stop, 4);
+ }
+
+ while (pe_stopped != pe_mask) {
+ for (i = 0; i < MAX_PE; i++)
+ if ((pe_mask & (1 << i)) && !(pe_stopped & (1 << i))) {
+ mbox = (void *)ctrl->sync_mailbox_baseaddr[i];
+
+ if (pe_dmem_read(i, (unsigned long)
+ &mbox->stopped, 4) &
+ cpu_to_be32(0x1))
+ pe_stopped |= (1 << i);
+ }
+
+ if (relax(end) < 0)
+ goto err;
+ }
+
+ return 0;
+
+err:
+ pr_err("%s: timeout, %x %x\n", __func__, pe_mask, pe_stopped);
+ return -EIO;
+}
+
+int pfe_ctrl_init(struct pfe *pfe)
+{
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+ int id;
+
+ pr_info("%s\n", __func__);
+
+ mutex_init(&ctrl->mutex);
+ spin_lock_init(&ctrl->lock);
+
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
+ ctrl->sync_mailbox_baseaddr[id] = CLASS_DM_SYNC_MBOX;
+ ctrl->msg_mailbox_baseaddr[id] = CLASS_DM_MSG_MBOX;
+ }
+
+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
+ if (id == TMU2_ID)
+ continue;
+ ctrl->sync_mailbox_baseaddr[id] = TMU_DM_SYNC_MBOX;
+ ctrl->msg_mailbox_baseaddr[id] = TMU_DM_MSG_MBOX;
+ }
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ ctrl->sync_mailbox_baseaddr[UTIL_ID] = UTIL_DM_SYNC_MBOX;
+ ctrl->msg_mailbox_baseaddr[UTIL_ID] = UTIL_DM_MSG_MBOX;
+#endif
+
+ ctrl->hash_array_baseaddr = pfe->ddr_baseaddr + ROUTE_TABLE_BASEADDR;
+ ctrl->hash_array_phys_baseaddr = pfe->ddr_phys_baseaddr +
+ ROUTE_TABLE_BASEADDR;
+
+ ctrl->dev = pfe->dev;
+
+ pr_info("%s finished\n", __func__);
+
+ return 0;
+}
+
+void pfe_ctrl_exit(struct pfe *pfe)
+{
+ pr_info("%s\n", __func__);
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_ctrl.h b/drivers/staging/fsl_ppfe/pfe_ctrl.h
new file mode 100644
index 000000000000..2003e810fb12
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_ctrl.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_CTRL_H_
+#define _PFE_CTRL_H_
+
+#include <linux/dmapool.h>
+
+#include "pfe_mod.h"
+#include "pfe/pfe.h"
+
+#define DMA_BUF_SIZE_128 0x80 /* enough for 1 conntracks */
+#define DMA_BUF_SIZE_256 0x100
+/* enough for 2 conntracks, 1 bridge entry or 1 multicast entry */
+#define DMA_BUF_SIZE_512 0x200
+/* 512bytes dma allocated buffers used by rtp relay feature */
+#define DMA_BUF_MIN_ALIGNMENT 8
+#define DMA_BUF_BOUNDARY (4 * 1024)
+/* bursts can not cross 4k boundary */
+
+#define CMD_TX_ENABLE 0x0501
+#define CMD_TX_DISABLE 0x0502
+
+#define CMD_RX_LRO 0x0011
+#define CMD_PKTCAP_ENABLE 0x0d01
+#define CMD_QM_EXPT_RATE 0x020c
+
+#define CLASS_DM_SH_STATIC (0x800)
+#define CLASS_DM_CPU_TICKS (CLASS_DM_SH_STATIC)
+#define CLASS_DM_SYNC_MBOX (0x808)
+#define CLASS_DM_MSG_MBOX (0x810)
+#define CLASS_DM_DROP_CNTR (0x820)
+#define CLASS_DM_RESUME (0x854)
+#define CLASS_DM_PESTATUS (0x860)
+
+#define TMU_DM_SH_STATIC (0x80)
+#define TMU_DM_CPU_TICKS (TMU_DM_SH_STATIC)
+#define TMU_DM_SYNC_MBOX (0x88)
+#define TMU_DM_MSG_MBOX (0x90)
+#define TMU_DM_RESUME (0xA0)
+#define TMU_DM_PESTATUS (0xB0)
+#define TMU_DM_CONTEXT (0x300)
+#define TMU_DM_TX_TRANS (0x480)
+
+#define UTIL_DM_SH_STATIC (0x0)
+#define UTIL_DM_CPU_TICKS (UTIL_DM_SH_STATIC)
+#define UTIL_DM_SYNC_MBOX (0x8)
+#define UTIL_DM_MSG_MBOX (0x10)
+#define UTIL_DM_DROP_CNTR (0x20)
+#define UTIL_DM_RESUME (0x40)
+#define UTIL_DM_PESTATUS (0x50)
+
+struct pfe_ctrl {
+ struct mutex mutex; /* to serialize pfe control access */
+ spinlock_t lock;
+
+ void *dma_pool;
+ void *dma_pool_512;
+ void *dma_pool_128;
+
+ struct device *dev;
+
+ void *hash_array_baseaddr; /*
+ * Virtual base address of
+ * the conntrack hash array
+ */
+ unsigned long hash_array_phys_baseaddr; /*
+ * Physical base address of
+ * the conntrack hash array
+ */
+
+ int (*event_cb)(u16, u16, u16*);
+
+ unsigned long sync_mailbox_baseaddr[MAX_PE]; /*
+ * Sync mailbox PFE
+ * internal address,
+ * initialized
+ * when parsing elf images
+ */
+ unsigned long msg_mailbox_baseaddr[MAX_PE]; /*
+ * Msg mailbox PFE internal
+ * address, initialized
+ * when parsing elf images
+ */
+ unsigned int sys_clk; /* AXI clock value, in KHz */
+};
+
+int pfe_ctrl_init(struct pfe *pfe);
+void pfe_ctrl_exit(struct pfe *pfe);
+int pe_sync_stop(struct pfe_ctrl *ctrl, int pe_mask);
+void pe_start(struct pfe_ctrl *ctrl, int pe_mask);
+int pe_reset_all(struct pfe_ctrl *ctrl);
+void pfe_ctrl_suspend(struct pfe_ctrl *ctrl);
+void pfe_ctrl_resume(struct pfe_ctrl *ctrl);
+int relax(unsigned long end);
+
+#endif /* _PFE_CTRL_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_debugfs.c b/drivers/staging/fsl_ppfe/pfe_debugfs.c
new file mode 100644
index 000000000000..993ff219736f
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/platform_device.h>
+
+#include "pfe_mod.h"
+
+static int dmem_show(struct seq_file *s, void *unused)
+{
+ u32 dmem_addr, val;
+ int id = (long int)s->private;
+ int i;
+
+ for (dmem_addr = 0; dmem_addr < CLASS_DMEM_SIZE; dmem_addr += 8 * 4) {
+ seq_printf(s, "%04x:", dmem_addr);
+
+ for (i = 0; i < 8; i++) {
+ val = pe_dmem_read(id, dmem_addr + i * 4, 4);
+ seq_printf(s, " %02x %02x %02x %02x", val & 0xff,
+ (val >> 8) & 0xff, (val >> 16) & 0xff,
+ (val >> 24) & 0xff);
+ }
+
+ seq_puts(s, "\n");
+ }
+
+ return 0;
+}
+
+static int dmem_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, dmem_show, inode->i_private);
+}
+
+static const struct file_operations dmem_fops = {
+ .open = dmem_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int pfe_debugfs_init(struct pfe *pfe)
+{
+ struct dentry *d;
+
+ pr_info("%s\n", __func__);
+
+ pfe->dentry = debugfs_create_dir("pfe", NULL);
+ if (IS_ERR_OR_NULL(pfe->dentry))
+ goto err_dir;
+
+ d = debugfs_create_file("pe0_dmem", 0444, pfe->dentry, (void *)0,
+ &dmem_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_pe;
+
+ d = debugfs_create_file("pe1_dmem", 0444, pfe->dentry, (void *)1,
+ &dmem_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_pe;
+
+ d = debugfs_create_file("pe2_dmem", 0444, pfe->dentry, (void *)2,
+ &dmem_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_pe;
+
+ d = debugfs_create_file("pe3_dmem", 0444, pfe->dentry, (void *)3,
+ &dmem_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_pe;
+
+ d = debugfs_create_file("pe4_dmem", 0444, pfe->dentry, (void *)4,
+ &dmem_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_pe;
+
+ d = debugfs_create_file("pe5_dmem", 0444, pfe->dentry, (void *)5,
+ &dmem_fops);
+ if (IS_ERR_OR_NULL(d))
+ goto err_pe;
+
+ return 0;
+
+err_pe:
+ debugfs_remove_recursive(pfe->dentry);
+
+err_dir:
+ return -1;
+}
+
+void pfe_debugfs_exit(struct pfe *pfe)
+{
+ debugfs_remove_recursive(pfe->dentry);
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_debugfs.h b/drivers/staging/fsl_ppfe/pfe_debugfs.h
new file mode 100644
index 000000000000..d80f6bfea3cc
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_debugfs.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_DEBUGFS_H_
+#define _PFE_DEBUGFS_H_
+
+int pfe_debugfs_init(struct pfe *pfe);
+void pfe_debugfs_exit(struct pfe *pfe);
+
+#endif /* _PFE_DEBUGFS_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_eth.c b/drivers/staging/fsl_ppfe/pfe_eth.c
new file mode 100644
index 000000000000..a224a495425f
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_eth.c
@@ -0,0 +1,2569 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+/* @pfe_eth.c.
+ * Ethernet driver for to handle exception path for PFE.
+ * - uses HIF functions to send/receive packets.
+ * - uses ctrl function to start/stop interfaces.
+ * - uses direct register accesses to control phy operation.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/platform_device.h>
+
+#include <net/ip.h>
+#include <net/sock.h>
+
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <linux/delay.h>
+#include <linux/regmap.h>
+#include <linux/i2c.h>
+#include <linux/sys_soc.h>
+
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+#include "pfe_mod.h"
+#include "pfe_eth.h"
+#include "pfe_cdev.h"
+
+#define LS1012A_REV_1_0 0x87040010
+
+bool pfe_use_old_dts_phy;
+bool pfe_errata_a010897;
+
+static void *cbus_emac_base[3];
+static void *cbus_gpi_base[3];
+
+/* Forward Declaration */
+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv);
+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv);
+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
+ from_tx, int n_desc);
+
+/* MDIO registers */
+#define MDIO_SGMII_CR 0x00
+#define MDIO_SGMII_SR 0x01
+#define MDIO_SGMII_DEV_ABIL_SGMII 0x04
+#define MDIO_SGMII_LINK_TMR_L 0x12
+#define MDIO_SGMII_LINK_TMR_H 0x13
+#define MDIO_SGMII_IF_MODE 0x14
+
+/* SGMII Control defines */
+#define SGMII_CR_RST 0x8000
+#define SGMII_CR_AN_EN 0x1000
+#define SGMII_CR_RESTART_AN 0x0200
+#define SGMII_CR_FD 0x0100
+#define SGMII_CR_SPEED_SEL1_1G 0x0040
+#define SGMII_CR_DEF_VAL (SGMII_CR_AN_EN | SGMII_CR_FD | \
+ SGMII_CR_SPEED_SEL1_1G)
+
+/* SGMII IF Mode */
+#define SGMII_DUPLEX_HALF 0x10
+#define SGMII_SPEED_10MBPS 0x00
+#define SGMII_SPEED_100MBPS 0x04
+#define SGMII_SPEED_1GBPS 0x08
+#define SGMII_USE_SGMII_AN 0x02
+#define SGMII_EN 0x01
+
+/* SGMII Device Ability for SGMII */
+#define SGMII_DEV_ABIL_ACK 0x4000
+#define SGMII_DEV_ABIL_EEE_CLK_STP_EN 0x0100
+#define SGMII_DEV_ABIL_SGMII 0x0001
+
+unsigned int gemac_regs[] = {
+ 0x0004, /* Interrupt event */
+ 0x0008, /* Interrupt mask */
+ 0x0024, /* Ethernet control */
+ 0x0064, /* MIB Control/Status */
+ 0x0084, /* Receive control/status */
+ 0x00C4, /* Transmit control */
+ 0x00E4, /* Physical address low */
+ 0x00E8, /* Physical address high */
+ 0x0144, /* Transmit FIFO Watermark and Store and Forward Control*/
+ 0x0190, /* Receive FIFO Section Full Threshold */
+ 0x01A0, /* Transmit FIFO Section Empty Threshold */
+ 0x01B0, /* Frame Truncation Length */
+};
+
+const struct soc_device_attribute ls1012a_rev1_soc_attr[] = {
+ { .family = "QorIQ LS1012A",
+ .soc_id = "svr:0x87040010",
+ .revision = "1.0",
+ .data = NULL },
+ { },
+};
+
+/********************************************************************/
+/* SYSFS INTERFACE */
+/********************************************************************/
+
+#ifdef PFE_ETH_NAPI_STATS
+/*
+ * pfe_eth_show_napi_stats
+ */
+static ssize_t pfe_eth_show_napi_stats(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "sched: %u\n",
+ priv->napi_counters[NAPI_SCHED_COUNT]);
+ len += sprintf(buf + len, "poll: %u\n",
+ priv->napi_counters[NAPI_POLL_COUNT]);
+ len += sprintf(buf + len, "packet: %u\n",
+ priv->napi_counters[NAPI_PACKET_COUNT]);
+ len += sprintf(buf + len, "budget: %u\n",
+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]);
+ len += sprintf(buf + len, "desc: %u\n",
+ priv->napi_counters[NAPI_DESC_COUNT]);
+
+ return len;
+}
+
+/*
+ * pfe_eth_set_napi_stats
+ */
+static ssize_t pfe_eth_set_napi_stats(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+
+ memset(priv->napi_counters, 0, sizeof(priv->napi_counters));
+
+ return count;
+}
+#endif
+#ifdef PFE_ETH_TX_STATS
+/* pfe_eth_show_tx_stats
+ *
+ */
+static ssize_t pfe_eth_show_tx_stats(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t len = 0;
+ int i;
+
+ len += sprintf(buf + len, "TX queues stats:\n");
+
+ for (i = 0; i < emac_txq_cnt; i++) {
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
+ i);
+
+ len += sprintf(buf + len, "\n");
+ __netif_tx_lock_bh(tx_queue);
+
+ hif_tx_lock(&pfe->hif);
+ len += sprintf(buf + len,
+ "Queue %2d : credits = %10d\n"
+ , i, hif_lib_tx_credit_avail(pfe, priv->id, i));
+ len += sprintf(buf + len,
+ " tx packets = %10d\n"
+ , pfe->tmu_credit.tx_packets[priv->id][i]);
+ hif_tx_unlock(&pfe->hif);
+
+ /* Don't output additionnal stats if queue never used */
+ if (!pfe->tmu_credit.tx_packets[priv->id][i])
+ goto skip;
+
+ len += sprintf(buf + len,
+ " clean_fail = %10d\n"
+ , priv->clean_fail[i]);
+ len += sprintf(buf + len,
+ " stop_queue = %10d\n"
+ , priv->stop_queue_total[i]);
+ len += sprintf(buf + len,
+ " stop_queue_hif = %10d\n"
+ , priv->stop_queue_hif[i]);
+ len += sprintf(buf + len,
+ " stop_queue_hif_client = %10d\n"
+ , priv->stop_queue_hif_client[i]);
+ len += sprintf(buf + len,
+ " stop_queue_credit = %10d\n"
+ , priv->stop_queue_credit[i]);
+skip:
+ __netif_tx_unlock_bh(tx_queue);
+ }
+ return len;
+}
+
+/* pfe_eth_set_tx_stats
+ *
+ */
+static ssize_t pfe_eth_set_tx_stats(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ int i;
+
+ for (i = 0; i < emac_txq_cnt; i++) {
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
+ i);
+
+ __netif_tx_lock_bh(tx_queue);
+ priv->clean_fail[i] = 0;
+ priv->stop_queue_total[i] = 0;
+ priv->stop_queue_hif[i] = 0;
+ priv->stop_queue_hif_client[i] = 0;
+ priv->stop_queue_credit[i] = 0;
+ __netif_tx_unlock_bh(tx_queue);
+ }
+
+ return count;
+}
+#endif
+/* pfe_eth_show_txavail
+ *
+ */
+static ssize_t pfe_eth_show_txavail(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ ssize_t len = 0;
+ int i;
+
+ for (i = 0; i < emac_txq_cnt; i++) {
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
+ i);
+
+ __netif_tx_lock_bh(tx_queue);
+
+ len += sprintf(buf + len, "%d",
+ hif_lib_tx_avail(&priv->client, i));
+
+ __netif_tx_unlock_bh(tx_queue);
+
+ if (i == (emac_txq_cnt - 1))
+ len += sprintf(buf + len, "\n");
+ else
+ len += sprintf(buf + len, " ");
+ }
+
+ return len;
+}
+
+/* pfe_eth_show_default_priority
+ *
+ */
+static ssize_t pfe_eth_show_default_priority(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ rc = sprintf(buf, "%d\n", priv->default_priority);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return rc;
+}
+
+/* pfe_eth_set_default_priority
+ *
+ */
+
+static ssize_t pfe_eth_set_default_priority(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(to_net_dev(dev));
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->default_priority = kstrtoul(buf, 0, 0);
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return count;
+}
+
+static DEVICE_ATTR(txavail, 0444, pfe_eth_show_txavail, NULL);
+static DEVICE_ATTR(default_priority, 0644, pfe_eth_show_default_priority,
+ pfe_eth_set_default_priority);
+
+#ifdef PFE_ETH_NAPI_STATS
+static DEVICE_ATTR(napi_stats, 0644, pfe_eth_show_napi_stats,
+ pfe_eth_set_napi_stats);
+#endif
+
+#ifdef PFE_ETH_TX_STATS
+static DEVICE_ATTR(tx_stats, 0644, pfe_eth_show_tx_stats,
+ pfe_eth_set_tx_stats);
+#endif
+
+/*
+ * pfe_eth_sysfs_init
+ *
+ */
+static int pfe_eth_sysfs_init(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ int err;
+
+ /* Initialize the default values */
+
+ /*
+ * By default, packets without conntrack will use this default low
+ * priority queue
+ */
+ priv->default_priority = 0;
+
+ /* Create our sysfs files */
+ err = device_create_file(&ndev->dev, &dev_attr_default_priority);
+ if (err) {
+ netdev_err(ndev,
+ "failed to create default_priority sysfs files\n");
+ goto err_priority;
+ }
+
+ err = device_create_file(&ndev->dev, &dev_attr_txavail);
+ if (err) {
+ netdev_err(ndev,
+ "failed to create default_priority sysfs files\n");
+ goto err_txavail;
+ }
+
+#ifdef PFE_ETH_NAPI_STATS
+ err = device_create_file(&ndev->dev, &dev_attr_napi_stats);
+ if (err) {
+ netdev_err(ndev, "failed to create napi stats sysfs files\n");
+ goto err_napi;
+ }
+#endif
+
+#ifdef PFE_ETH_TX_STATS
+ err = device_create_file(&ndev->dev, &dev_attr_tx_stats);
+ if (err) {
+ netdev_err(ndev, "failed to create tx stats sysfs files\n");
+ goto err_tx;
+ }
+#endif
+
+ return 0;
+
+#ifdef PFE_ETH_TX_STATS
+err_tx:
+#endif
+#ifdef PFE_ETH_NAPI_STATS
+ device_remove_file(&ndev->dev, &dev_attr_napi_stats);
+
+err_napi:
+#endif
+ device_remove_file(&ndev->dev, &dev_attr_txavail);
+
+err_txavail:
+ device_remove_file(&ndev->dev, &dev_attr_default_priority);
+
+err_priority:
+ return -1;
+}
+
+/* pfe_eth_sysfs_exit
+ *
+ */
+void pfe_eth_sysfs_exit(struct net_device *ndev)
+{
+#ifdef PFE_ETH_TX_STATS
+ device_remove_file(&ndev->dev, &dev_attr_tx_stats);
+#endif
+
+#ifdef PFE_ETH_NAPI_STATS
+ device_remove_file(&ndev->dev, &dev_attr_napi_stats);
+#endif
+ device_remove_file(&ndev->dev, &dev_attr_txavail);
+ device_remove_file(&ndev->dev, &dev_attr_default_priority);
+}
+
+/*************************************************************************/
+/* ETHTOOL INTERCAE */
+/*************************************************************************/
+
+/*MTIP GEMAC */
+static const struct fec_stat {
+ char name[ETH_GSTRING_LEN];
+ u16 offset;
+} fec_stats[] = {
+ /* RMON TX */
+ { "tx_dropped", RMON_T_DROP },
+ { "tx_packets", RMON_T_PACKETS },
+ { "tx_broadcast", RMON_T_BC_PKT },
+ { "tx_multicast", RMON_T_MC_PKT },
+ { "tx_crc_errors", RMON_T_CRC_ALIGN },
+ { "tx_undersize", RMON_T_UNDERSIZE },
+ { "tx_oversize", RMON_T_OVERSIZE },
+ { "tx_fragment", RMON_T_FRAG },
+ { "tx_jabber", RMON_T_JAB },
+ { "tx_collision", RMON_T_COL },
+ { "tx_64byte", RMON_T_P64 },
+ { "tx_65to127byte", RMON_T_P65TO127 },
+ { "tx_128to255byte", RMON_T_P128TO255 },
+ { "tx_256to511byte", RMON_T_P256TO511 },
+ { "tx_512to1023byte", RMON_T_P512TO1023 },
+ { "tx_1024to2047byte", RMON_T_P1024TO2047 },
+ { "tx_GTE2048byte", RMON_T_P_GTE2048 },
+ { "tx_octets", RMON_T_OCTETS },
+
+ /* IEEE TX */
+ { "IEEE_tx_drop", IEEE_T_DROP },
+ { "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
+ { "IEEE_tx_1col", IEEE_T_1COL },
+ { "IEEE_tx_mcol", IEEE_T_MCOL },
+ { "IEEE_tx_def", IEEE_T_DEF },
+ { "IEEE_tx_lcol", IEEE_T_LCOL },
+ { "IEEE_tx_excol", IEEE_T_EXCOL },
+ { "IEEE_tx_macerr", IEEE_T_MACERR },
+ { "IEEE_tx_cserr", IEEE_T_CSERR },
+ { "IEEE_tx_sqe", IEEE_T_SQE },
+ { "IEEE_tx_fdxfc", IEEE_T_FDXFC },
+ { "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
+
+ /* RMON RX */
+ { "rx_packets", RMON_R_PACKETS },
+ { "rx_broadcast", RMON_R_BC_PKT },
+ { "rx_multicast", RMON_R_MC_PKT },
+ { "rx_crc_errors", RMON_R_CRC_ALIGN },
+ { "rx_undersize", RMON_R_UNDERSIZE },
+ { "rx_oversize", RMON_R_OVERSIZE },
+ { "rx_fragment", RMON_R_FRAG },
+ { "rx_jabber", RMON_R_JAB },
+ { "rx_64byte", RMON_R_P64 },
+ { "rx_65to127byte", RMON_R_P65TO127 },
+ { "rx_128to255byte", RMON_R_P128TO255 },
+ { "rx_256to511byte", RMON_R_P256TO511 },
+ { "rx_512to1023byte", RMON_R_P512TO1023 },
+ { "rx_1024to2047byte", RMON_R_P1024TO2047 },
+ { "rx_GTE2048byte", RMON_R_P_GTE2048 },
+ { "rx_octets", RMON_R_OCTETS },
+
+ /* IEEE RX */
+ { "IEEE_rx_drop", IEEE_R_DROP },
+ { "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
+ { "IEEE_rx_crc", IEEE_R_CRC },
+ { "IEEE_rx_align", IEEE_R_ALIGN },
+ { "IEEE_rx_macerr", IEEE_R_MACERR },
+ { "IEEE_rx_fdxfc", IEEE_R_FDXFC },
+ { "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
+};
+
+static void pfe_eth_fill_stats(struct net_device *ndev, struct ethtool_stats
+ *stats, u64 *data)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+ data[i] = readl(priv->EMAC_baseaddr + fec_stats[i].offset);
+}
+
+static void pfe_eth_gstrings(struct net_device *netdev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+ memcpy(data + i * ETH_GSTRING_LEN,
+ fec_stats[i].name, ETH_GSTRING_LEN);
+ break;
+ }
+}
+
+static int pfe_eth_stats_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(fec_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/*
+ * pfe_eth_gemac_reglen - Return the length of the register structure.
+ *
+ */
+static int pfe_eth_gemac_reglen(struct net_device *ndev)
+{
+ pr_info("%s()\n", __func__);
+ return (sizeof(gemac_regs) / sizeof(u32));
+}
+
+/*
+ * pfe_eth_gemac_get_regs - Return the gemac register structure.
+ *
+ */
+static void pfe_eth_gemac_get_regs(struct net_device *ndev, struct ethtool_regs
+ *regs, void *regbuf)
+{
+ int i;
+
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ u32 *buf = (u32 *)regbuf;
+
+ pr_info("%s()\n", __func__);
+ for (i = 0; i < sizeof(gemac_regs) / sizeof(u32); i++)
+ buf[i] = readl(priv->EMAC_baseaddr + gemac_regs[i]);
+}
+
+/*
+ * pfe_eth_set_wol - Set the magic packet option, in WoL register.
+ *
+ */
+static int pfe_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ /* for MTIP we store wol->wolopts */
+ priv->wol = wol->wolopts;
+
+ device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
+
+ return 0;
+}
+
+/*
+ *
+ * pfe_eth_get_wol - Get the WoL options.
+ *
+ */
+static void pfe_eth_get_wol(struct net_device *ndev, struct ethtool_wolinfo
+ *wol)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ wol->supported = WAKE_MAGIC;
+ wol->wolopts = 0;
+
+ if (priv->wol & WAKE_MAGIC)
+ wol->wolopts = WAKE_MAGIC;
+
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/*
+ * pfe_eth_get_drvinfo - Fills in the drvinfo structure with some basic info
+ *
+ */
+static void pfe_eth_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo
+ *drvinfo)
+{
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+ strlcpy(drvinfo->bus_info, "N/A", sizeof(drvinfo->bus_info));
+}
+
+/*
+ * pfe_eth_set_settings - Used to send commands to PHY.
+ *
+ */
+static int pfe_eth_set_settings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ return phy_ethtool_ksettings_set(phydev, cmd);
+}
+
+/*
+ * pfe_eth_getsettings - Return the current settings in the ethtool_cmd
+ * structure.
+ *
+ */
+static int pfe_eth_get_settings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+
+ if (!phydev)
+ return -ENODEV;
+
+ phy_ethtool_ksettings_get(phydev, cmd);
+
+ return 0;
+}
+
+/*
+ * pfe_eth_get_msglevel - Gets the debug message mask.
+ *
+ */
+static uint32_t pfe_eth_get_msglevel(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+/*
+ * pfe_eth_set_msglevel - Sets the debug message mask.
+ *
+ */
+static void pfe_eth_set_msglevel(struct net_device *ndev, uint32_t data)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ priv->msg_enable = data;
+}
+
+#define HIF_RX_COAL_MAX_CLKS (~(1 << 31))
+#define HIF_RX_COAL_CLKS_PER_USEC (pfe->ctrl.sys_clk / 1000)
+#define HIF_RX_COAL_MAX_USECS (HIF_RX_COAL_MAX_CLKS / \
+ HIF_RX_COAL_CLKS_PER_USEC)
+
+/*
+ * pfe_eth_set_coalesce - Sets rx interrupt coalescing timer.
+ *
+ */
+static int pfe_eth_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ if (ec->rx_coalesce_usecs > HIF_RX_COAL_MAX_USECS)
+ return -EINVAL;
+
+ if (!ec->rx_coalesce_usecs) {
+ writel(0, HIF_INT_COAL);
+ return 0;
+ }
+
+ writel((ec->rx_coalesce_usecs * HIF_RX_COAL_CLKS_PER_USEC) |
+ HIF_INT_COAL_ENABLE, HIF_INT_COAL);
+
+ return 0;
+}
+
+/*
+ * pfe_eth_get_coalesce - Gets rx interrupt coalescing timer value.
+ *
+ */
+static int pfe_eth_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *ec)
+{
+ int reg_val = readl(HIF_INT_COAL);
+
+ if (reg_val & HIF_INT_COAL_ENABLE)
+ ec->rx_coalesce_usecs = (reg_val & HIF_RX_COAL_MAX_CLKS) /
+ HIF_RX_COAL_CLKS_PER_USEC;
+ else
+ ec->rx_coalesce_usecs = 0;
+
+ return 0;
+}
+
+/*
+ * pfe_eth_set_pauseparam - Sets pause parameters
+ *
+ */
+static int pfe_eth_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *epause)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ if (epause->tx_pause != epause->rx_pause) {
+ netdev_info(ndev,
+ "hardware only support enable/disable both tx and rx\n");
+ return -EINVAL;
+ }
+
+ priv->pause_flag = 0;
+ priv->pause_flag |= epause->rx_pause ? PFE_PAUSE_FLAG_ENABLE : 0;
+ priv->pause_flag |= epause->autoneg ? PFE_PAUSE_FLAG_AUTONEG : 0;
+
+ if (epause->rx_pause || epause->autoneg) {
+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
+ writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) |
+ EGPI_PAUSE_ENABLE),
+ priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
+ if (priv->phydev) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ priv->phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ priv->phydev->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ priv->phydev->advertising);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ priv->phydev->advertising);
+ }
+ } else {
+ gemac_disable_pause_rx(priv->EMAC_baseaddr);
+ writel((readl(priv->GPI_baseaddr + GPI_TX_PAUSE_TIME) &
+ ~EGPI_PAUSE_ENABLE),
+ priv->GPI_baseaddr + GPI_TX_PAUSE_TIME);
+ if (priv->phydev) {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ priv->phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ priv->phydev->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
+ priv->phydev->advertising);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ priv->phydev->advertising);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * pfe_eth_get_pauseparam - Gets pause parameters
+ *
+ */
+static void pfe_eth_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *epause)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ epause->autoneg = (priv->pause_flag & PFE_PAUSE_FLAG_AUTONEG) != 0;
+ epause->tx_pause = (priv->pause_flag & PFE_PAUSE_FLAG_ENABLE) != 0;
+ epause->rx_pause = epause->tx_pause;
+}
+
+/*
+ * pfe_eth_get_hash
+ */
+#define PFE_HASH_BITS 6 /* #bits in hash */
+#define CRC32_POLY 0xEDB88320
+
+static int pfe_eth_get_hash(u8 *addr)
+{
+ unsigned int i, bit, data, crc, hash;
+
+ /* calculate crc32 value of mac address */
+ crc = 0xffffffff;
+
+ for (i = 0; i < 6; i++) {
+ data = addr[i];
+ for (bit = 0; bit < 8; bit++, data >>= 1) {
+ crc = (crc >> 1) ^
+ (((crc ^ data) & 1) ? CRC32_POLY : 0);
+ }
+ }
+
+ /*
+ * only upper 6 bits (PFE_HASH_BITS) are used
+ * which point to specific bit in the hash registers
+ */
+ hash = (crc >> (32 - PFE_HASH_BITS)) & 0x3f;
+
+ return hash;
+}
+
+const struct ethtool_ops pfe_ethtool_ops = {
+ .get_drvinfo = pfe_eth_get_drvinfo,
+ .get_regs_len = pfe_eth_gemac_reglen,
+ .get_regs = pfe_eth_gemac_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_wol = pfe_eth_get_wol,
+ .set_wol = pfe_eth_set_wol,
+ .set_pauseparam = pfe_eth_set_pauseparam,
+ .get_pauseparam = pfe_eth_get_pauseparam,
+ .get_strings = pfe_eth_gstrings,
+ .get_sset_count = pfe_eth_stats_count,
+ .get_ethtool_stats = pfe_eth_fill_stats,
+ .get_msglevel = pfe_eth_get_msglevel,
+ .set_msglevel = pfe_eth_set_msglevel,
+ .set_coalesce = pfe_eth_set_coalesce,
+ .get_coalesce = pfe_eth_get_coalesce,
+ .get_link_ksettings = pfe_eth_get_settings,
+ .set_link_ksettings = pfe_eth_set_settings,
+};
+
+/* pfe_eth_mdio_reset
+ */
+int pfe_eth_mdio_reset(struct mii_bus *bus)
+{
+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
+ u32 phy_speed;
+
+
+ mutex_lock(&bus->mdio_lock);
+
+ /*
+ * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed)
+ *
+ * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
+ * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.
+ */
+ phy_speed = (DIV_ROUND_UP((pfe->ctrl.sys_clk * 1000), 4000000)
+ << EMAC_MII_SPEED_SHIFT);
+ phy_speed |= EMAC_HOLDTIME(0x5);
+ __raw_writel(phy_speed, priv->mdio_base + EMAC_MII_CTRL_REG);
+
+ mutex_unlock(&bus->mdio_lock);
+
+ return 0;
+}
+
+/* pfe_eth_mdio_timeout
+ *
+ */
+static int pfe_eth_mdio_timeout(struct pfe_mdio_priv_s *priv, int timeout)
+{
+ while (!(__raw_readl(priv->mdio_base + EMAC_IEVENT_REG) &
+ EMAC_IEVENT_MII)) {
+ if (timeout-- <= 0)
+ return -1;
+ usleep_range(10, 20);
+ }
+ __raw_writel(EMAC_IEVENT_MII, priv->mdio_base + EMAC_IEVENT_REG);
+ return 0;
+}
+
+static int pfe_eth_mdio_mux(u8 muxval)
+{
+ struct i2c_adapter *a;
+ struct i2c_msg msg;
+ unsigned char buf[2];
+ int ret;
+
+ a = i2c_get_adapter(0);
+ if (!a)
+ return -ENODEV;
+
+ /* set bit 1 (the second bit) of chip at 0x09, register 0x13 */
+ buf[0] = 0x54; /* reg number */
+ buf[1] = (muxval << 6) | 0x3; /* data */
+ msg.addr = 0x66;
+ msg.buf = buf;
+ msg.len = 2;
+ msg.flags = 0;
+ ret = i2c_transfer(a, &msg, 1);
+ i2c_put_adapter(a);
+ if (ret != 1)
+ return -ENODEV;
+ return 0;
+}
+
+static int pfe_eth_mdio_write_addr(struct mii_bus *bus, int mii_id,
+ int dev_addr, int regnum)
+{
+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
+
+ __raw_writel(EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA(dev_addr) |
+ EMAC_MII_DATA_TA | EMAC_MII_DATA(regnum),
+ priv->mdio_base + EMAC_MII_DATA_REG);
+
+ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
+ dev_err(&bus->dev, "phy MDIO address write timeout\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pfe_eth_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+ u16 value)
+{
+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
+
+ /*To access external PHYs on QDS board mux needs to be configured*/
+ if ((mii_id) && (pfe->mdio_muxval[mii_id]))
+ pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
+
+ if (regnum & MII_ADDR_C45) {
+ pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
+ regnum & 0xffff);
+ __raw_writel(EMAC_MII_DATA_OP_CL45_WR |
+ EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
+ priv->mdio_base + EMAC_MII_DATA_REG);
+ } else {
+ /* start a write op */
+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_WR |
+ EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA(regnum) |
+ EMAC_MII_DATA_TA | EMAC_MII_DATA(value),
+ priv->mdio_base + EMAC_MII_DATA_REG);
+ }
+
+ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
+ dev_err(&bus->dev, "%s: phy MDIO write timeout\n", __func__);
+ return -1;
+ }
+ return 0;
+}
+
+static int pfe_eth_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+ struct pfe_mdio_priv_s *priv = (struct pfe_mdio_priv_s *)bus->priv;
+ u16 value = 0;
+
+ /*To access external PHYs on QDS board mux needs to be configured*/
+ if ((mii_id) && (pfe->mdio_muxval[mii_id]))
+ pfe_eth_mdio_mux(pfe->mdio_muxval[mii_id]);
+
+ if (regnum & MII_ADDR_C45) {
+ pfe_eth_mdio_write_addr(bus, mii_id, (regnum >> 16) & 0x1f,
+ regnum & 0xffff);
+ __raw_writel(EMAC_MII_DATA_OP_CL45_RD |
+ EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA((regnum >> 16) & 0x1f) |
+ EMAC_MII_DATA_TA,
+ priv->mdio_base + EMAC_MII_DATA_REG);
+ } else {
+ /* start a read op */
+ __raw_writel(EMAC_MII_DATA_ST | EMAC_MII_DATA_OP_RD |
+ EMAC_MII_DATA_PA(mii_id) |
+ EMAC_MII_DATA_RA(regnum) |
+ EMAC_MII_DATA_TA, priv->mdio_base +
+ EMAC_MII_DATA_REG);
+ }
+
+ if (pfe_eth_mdio_timeout(priv, EMAC_MDIO_TIMEOUT)) {
+ dev_err(&bus->dev, "%s: phy MDIO read timeout\n", __func__);
+ return -1;
+ }
+
+ value = EMAC_MII_DATA(__raw_readl(priv->mdio_base +
+ EMAC_MII_DATA_REG));
+ return value;
+}
+
+static int pfe_eth_mdio_init(struct pfe *pfe,
+ struct ls1012a_pfe_platform_data *pfe_info,
+ int ii)
+{
+ struct pfe_mdio_priv_s *priv = NULL;
+ struct ls1012a_mdio_platform_data *mdio_info;
+ struct mii_bus *bus;
+ struct device_node *mdio_node;
+ int rc = 0;
+
+ mdio_info = (struct ls1012a_mdio_platform_data *)
+ pfe_info->ls1012a_mdio_pdata;
+ mdio_info->id = ii;
+
+ bus = mdiobus_alloc_size(sizeof(struct pfe_mdio_priv_s));
+ if (!bus) {
+ pr_err("mdiobus_alloc() failed\n");
+ rc = -ENOMEM;
+ goto err_mdioalloc;
+ }
+
+ bus->name = "ls1012a MDIO Bus";
+ snprintf(bus->id, MII_BUS_ID_SIZE, "ls1012a-%x", mdio_info->id);
+
+ bus->read = &pfe_eth_mdio_read;
+ bus->write = &pfe_eth_mdio_write;
+ bus->reset = &pfe_eth_mdio_reset;
+ bus->parent = pfe->dev;
+ bus->phy_mask = mdio_info->phy_mask;
+ bus->irq[0] = mdio_info->irq[0];
+ priv = bus->priv;
+ priv->mdio_base = cbus_emac_base[ii];
+
+ priv->mdc_div = mdio_info->mdc_div;
+ if (!priv->mdc_div)
+ priv->mdc_div = 64;
+ dev_info(bus->parent, "%s: mdc_div: %d, phy_mask: %x\n",
+ __func__, priv->mdc_div, bus->phy_mask);
+
+ mdio_node = of_get_child_by_name(pfe->dev->of_node, "mdio");
+ if ((mdio_info->id == 0) && mdio_node) {
+ rc = of_mdiobus_register(bus, mdio_node);
+ of_node_put(mdio_node);
+ } else {
+ rc = mdiobus_register(bus);
+ }
+
+ if (rc) {
+ dev_err(bus->parent, "mdiobus_register(%s) failed\n",
+ bus->name);
+ goto err_mdioregister;
+ }
+
+ priv->mii_bus = bus;
+ pfe->mdio.mdio_priv[ii] = priv;
+
+ pfe_eth_mdio_reset(bus);
+
+ return 0;
+
+err_mdioregister:
+ mdiobus_free(bus);
+err_mdioalloc:
+ return rc;
+}
+
+/* pfe_eth_mdio_exit
+ */
+static void pfe_eth_mdio_exit(struct pfe *pfe,
+ int ii)
+{
+ struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[ii];
+ struct mii_bus *bus = mdio_priv->mii_bus;
+
+ if (!bus)
+ return;
+ mdiobus_unregister(bus);
+ mdiobus_free(bus);
+}
+
+/* pfe_get_phydev_speed
+ */
+static int pfe_get_phydev_speed(struct phy_device *phydev)
+{
+ switch (phydev->speed) {
+ case 10:
+ return SPEED_10M;
+ case 100:
+ return SPEED_100M;
+ case 1000:
+ default:
+ return SPEED_1000M;
+ }
+}
+
+/* pfe_set_rgmii_speed
+ */
+#define RGMIIPCR 0x434
+/* RGMIIPCR bit definitions*/
+#define SCFG_RGMIIPCR_EN_AUTO (0x00000008)
+#define SCFG_RGMIIPCR_SETSP_1000M (0x00000004)
+#define SCFG_RGMIIPCR_SETSP_100M (0x00000000)
+#define SCFG_RGMIIPCR_SETSP_10M (0x00000002)
+#define SCFG_RGMIIPCR_SETFD (0x00000001)
+
+#define MDIOSELCR 0x484
+#define MDIOSEL_SERDES 0x0
+#define MDIOSEL_EXTPHY 0x80000000
+
+static void pfe_set_rgmii_speed(struct phy_device *phydev)
+{
+ u32 rgmii_pcr;
+
+ regmap_read(pfe->scfg, RGMIIPCR, &rgmii_pcr);
+ rgmii_pcr &= ~(SCFG_RGMIIPCR_SETSP_1000M | SCFG_RGMIIPCR_SETSP_10M);
+
+ switch (phydev->speed) {
+ case 10:
+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_10M;
+ break;
+ case 1000:
+ rgmii_pcr |= SCFG_RGMIIPCR_SETSP_1000M;
+ break;
+ case 100:
+ default:
+ /* Default is 100M */
+ break;
+ }
+ regmap_write(pfe->scfg, RGMIIPCR, rgmii_pcr);
+}
+
+/* pfe_get_phydev_duplex
+ */
+static int pfe_get_phydev_duplex(struct phy_device *phydev)
+{
+ /*return (phydev->duplex == DUPLEX_HALF) ? DUP_HALF:DUP_FULL ; */
+ return DUPLEX_FULL;
+}
+
+/* pfe_eth_adjust_link
+ */
+static void pfe_eth_adjust_link(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ unsigned long flags;
+ struct phy_device *phydev = priv->phydev;
+ int new_state = 0;
+
+ netif_info(priv, drv, ndev, "%s\n", __func__);
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ /*
+ * Now we make sure that we can be in full duplex mode.
+ * If not, we operate in half-duplex mode.
+ */
+ if (phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ gemac_set_duplex(priv->EMAC_baseaddr,
+ pfe_get_phydev_duplex(phydev));
+ priv->oldduplex = phydev->duplex;
+ }
+
+ if (phydev->speed != priv->oldspeed) {
+ new_state = 1;
+ gemac_set_speed(priv->EMAC_baseaddr,
+ pfe_get_phydev_speed(phydev));
+ if (priv->einfo->mii_config ==
+ PHY_INTERFACE_MODE_RGMII_TXID)
+ pfe_set_rgmii_speed(phydev);
+ priv->oldspeed = phydev->speed;
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv))
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ /* Now, dump the details to the cdev.
+ * XXX: Locking would be required? (uniprocess arch)
+ * Or, maybe move it in spinlock above
+ */
+ if (us && priv->einfo->gem_id < PFE_CDEV_ETH_COUNT) {
+ pr_debug("Changing link state from (%u) to (%u) for ID=(%u)\n",
+ link_states[priv->einfo->gem_id].state,
+ phydev->link,
+ priv->einfo->gem_id);
+ link_states[priv->einfo->gem_id].phy_id = priv->einfo->gem_id;
+ link_states[priv->einfo->gem_id].state = phydev->link;
+ }
+}
+
+/* pfe_phy_exit
+ */
+static void pfe_phy_exit(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ netif_info(priv, drv, ndev, "%s\n", __func__);
+
+ phy_disconnect(priv->phydev);
+ priv->phydev = NULL;
+}
+
+/* pfe_eth_stop
+ */
+static void pfe_eth_stop(struct net_device *ndev, int wake)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ netif_info(priv, drv, ndev, "%s\n", __func__);
+
+ if (wake) {
+ gemac_tx_disable(priv->EMAC_baseaddr);
+ } else {
+ gemac_disable(priv->EMAC_baseaddr);
+ gpi_disable(priv->GPI_baseaddr);
+
+ if (priv->phydev)
+ phy_stop(priv->phydev);
+ }
+}
+
+/* pfe_eth_start
+ */
+static int pfe_eth_start(struct pfe_eth_priv_s *priv)
+{
+ netif_info(priv, drv, priv->ndev, "%s\n", __func__);
+
+ if (priv->phydev)
+ phy_start(priv->phydev);
+
+ gpi_enable(priv->GPI_baseaddr);
+ gemac_enable(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+/*
+ * Configure on chip serdes through mdio
+ */
+static void ls1012a_configure_serdes(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *eth_priv = netdev_priv(ndev);
+ struct pfe_mdio_priv_s *mdio_priv = pfe->mdio.mdio_priv[eth_priv->id];
+ int sgmii_2500 = 0;
+ struct mii_bus *bus = mdio_priv->mii_bus;
+ u16 value = 0;
+
+ if (eth_priv->einfo->mii_config == PHY_INTERFACE_MODE_2500SGMII)
+ sgmii_2500 = 1;
+
+ netif_info(eth_priv, drv, ndev, "%s\n", __func__);
+ /* PCS configuration done with corresponding GEMAC */
+
+ pfe_eth_mdio_read(bus, 0, MDIO_SGMII_CR);
+ pfe_eth_mdio_read(bus, 0, MDIO_SGMII_SR);
+
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, SGMII_CR_RST);
+
+ if (sgmii_2500) {
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE, SGMII_SPEED_1GBPS
+ | SGMII_EN);
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
+ SGMII_DEV_ABIL_ACK | SGMII_DEV_ABIL_SGMII);
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0xa120);
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x7);
+ /* Autonegotiation need to be disabled for 2.5G SGMII mode*/
+ value = SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
+ } else {
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_IF_MODE,
+ SGMII_SPEED_1GBPS
+ | SGMII_USE_SGMII_AN
+ | SGMII_EN);
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_DEV_ABIL_SGMII,
+ SGMII_DEV_ABIL_EEE_CLK_STP_EN
+ | 0xa0
+ | SGMII_DEV_ABIL_SGMII);
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_L, 0x400);
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_LINK_TMR_H, 0x0);
+ value = SGMII_CR_AN_EN | SGMII_CR_FD | SGMII_CR_SPEED_SEL1_1G;
+ pfe_eth_mdio_write(bus, 0, MDIO_SGMII_CR, value);
+ }
+}
+
+/*
+ * pfe_phy_init
+ *
+ */
+static int pfe_phy_init(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ struct phy_device *phydev;
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
+ phy_interface_t interface;
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+
+ snprintf(bus_id, MII_BUS_ID_SIZE, "ls1012a-%d", 0);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->einfo->phy_id);
+ netif_info(priv, drv, ndev, "%s: %s\n", __func__, phy_id);
+ interface = priv->einfo->mii_config;
+ if ((interface == PHY_INTERFACE_MODE_SGMII) ||
+ (interface == PHY_INTERFACE_MODE_2500SGMII)) {
+ /*Configure SGMII PCS */
+ if (pfe->scfg) {
+ /* Config MDIO from serdes */
+ regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_SERDES);
+ }
+ ls1012a_configure_serdes(ndev);
+ }
+
+ if (pfe->scfg) {
+ /*Config MDIO from PAD */
+ regmap_write(pfe->scfg, MDIOSELCR, MDIOSEL_EXTPHY);
+ }
+
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ pr_info("%s interface %x\n", __func__, interface);
+
+ if (priv->phy_node) {
+ phydev = of_phy_connect(ndev, priv->phy_node,
+ pfe_eth_adjust_link, 0,
+ priv->einfo->mii_config);
+ if (!(phydev)) {
+ netdev_err(ndev, "Unable to connect to phy\n");
+ return -ENODEV;
+ }
+
+ } else {
+ phydev = phy_connect(ndev, phy_id,
+ &pfe_eth_adjust_link, interface);
+ if (IS_ERR(phydev)) {
+ netdev_err(ndev, "Unable to connect to phy\n");
+ return PTR_ERR(phydev);
+ }
+ }
+
+ priv->phydev = phydev;
+ phydev->irq = PHY_POLL;
+
+ return 0;
+}
+
+/* pfe_gemac_init
+ */
+static int pfe_gemac_init(struct pfe_eth_priv_s *priv)
+{
+ struct gemac_cfg cfg;
+
+ netif_info(priv, ifup, priv->ndev, "%s\n", __func__);
+
+ cfg.speed = SPEED_1000M;
+ cfg.duplex = DUPLEX_FULL;
+
+ gemac_set_config(priv->EMAC_baseaddr, &cfg);
+ gemac_allow_broadcast(priv->EMAC_baseaddr);
+ gemac_enable_1536_rx(priv->EMAC_baseaddr);
+ gemac_enable_stacked_vlan(priv->EMAC_baseaddr);
+ gemac_enable_pause_rx(priv->EMAC_baseaddr);
+ gemac_set_bus_width(priv->EMAC_baseaddr, 64);
+
+ /*GEM will perform checksum verifications*/
+ if (priv->ndev->features & NETIF_F_RXCSUM)
+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
+ else
+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
+
+ return 0;
+}
+
+/* pfe_eth_event_handler
+ */
+static int pfe_eth_event_handler(void *data, int event, int qno)
+{
+ struct pfe_eth_priv_s *priv = data;
+
+ switch (event) {
+ case EVENT_RX_PKT_IND:
+
+ if (qno == 0) {
+ if (napi_schedule_prep(&priv->high_napi)) {
+ netif_info(priv, intr, priv->ndev,
+ "%s: schedule high prio poll\n"
+ , __func__);
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_SCHED_COUNT]++;
+#endif
+
+ __napi_schedule(&priv->high_napi);
+ }
+ } else if (qno == 1) {
+ if (napi_schedule_prep(&priv->low_napi)) {
+ netif_info(priv, intr, priv->ndev,
+ "%s: schedule low prio poll\n"
+ , __func__);
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_SCHED_COUNT]++;
+#endif
+ __napi_schedule(&priv->low_napi);
+ }
+ } else if (qno == 2) {
+ if (napi_schedule_prep(&priv->lro_napi)) {
+ netif_info(priv, intr, priv->ndev,
+ "%s: schedule lro prio poll\n"
+ , __func__);
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_SCHED_COUNT]++;
+#endif
+ __napi_schedule(&priv->lro_napi);
+ }
+ }
+
+ break;
+
+ case EVENT_TXDONE_IND:
+ pfe_eth_flush_tx(priv);
+ hif_lib_event_handler_start(&priv->client, EVENT_TXDONE_IND, 0);
+ break;
+ case EVENT_HIGH_RX_WM:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int pfe_eth_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ ndev->mtu = new_mtu;
+ new_mtu += ETH_HLEN + ETH_FCS_LEN;
+ gemac_set_rx_max_fl(priv->EMAC_baseaddr, new_mtu);
+
+ return 0;
+}
+
+/* pfe_eth_open
+ */
+static int pfe_eth_open(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ struct hif_client_s *client;
+ int rc;
+
+ netif_info(priv, ifup, ndev, "%s\n", __func__);
+
+ /* Register client driver with HIF */
+ client = &priv->client;
+ memset(client, 0, sizeof(*client));
+ client->id = PFE_CL_GEM0 + priv->id;
+ client->tx_qn = emac_txq_cnt;
+ client->rx_qn = EMAC_RXQ_CNT;
+ client->priv = priv;
+ client->pfe = priv->pfe;
+ client->event_handler = pfe_eth_event_handler;
+
+ client->tx_qsize = EMAC_TXQ_DEPTH;
+ client->rx_qsize = EMAC_RXQ_DEPTH;
+
+ rc = hif_lib_client_register(client);
+ if (rc) {
+ netdev_err(ndev, "%s: hif_lib_client_register(%d) failed\n",
+ __func__, client->id);
+ goto err0;
+ }
+
+ netif_info(priv, drv, ndev, "%s: registered client: %p\n", __func__,
+ client);
+
+ pfe_gemac_init(priv);
+
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ netdev_err(ndev, "%s: invalid MAC address\n", __func__);
+ rc = -EADDRNOTAVAIL;
+ goto err1;
+ }
+
+ gemac_set_laddrN(priv->EMAC_baseaddr,
+ (struct pfe_mac_addr *)ndev->dev_addr, 1);
+
+ napi_enable(&priv->high_napi);
+ napi_enable(&priv->low_napi);
+ napi_enable(&priv->lro_napi);
+
+ rc = pfe_eth_start(priv);
+
+ netif_tx_wake_all_queues(ndev);
+
+ return rc;
+
+err1:
+ hif_lib_client_unregister(&priv->client);
+
+err0:
+ return rc;
+}
+
+/*
+ * pfe_eth_shutdown
+ */
+int pfe_eth_shutdown(struct net_device *ndev, int wake)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ int i, qstatus;
+ unsigned long next_poll = jiffies + 1, end = jiffies +
+ (TX_POLL_TIMEOUT_MS * HZ) / 1000;
+ int tx_pkts, prv_tx_pkts;
+
+ netif_info(priv, ifdown, ndev, "%s\n", __func__);
+
+ for (i = 0; i < emac_txq_cnt; i++)
+ hrtimer_cancel(&priv->fast_tx_timeout[i].timer);
+
+ netif_tx_stop_all_queues(ndev);
+
+ do {
+ tx_pkts = 0;
+ pfe_eth_flush_tx(priv);
+
+ for (i = 0; i < emac_txq_cnt; i++)
+ tx_pkts += hif_lib_tx_pending(&priv->client, i);
+
+ if (tx_pkts) {
+ /*Don't wait forever, break if we cross max timeout */
+ if (time_after(jiffies, end)) {
+ pr_err(
+ "(%s)Tx is not complete after %dmsec\n",
+ ndev->name, TX_POLL_TIMEOUT_MS);
+ break;
+ }
+
+ pr_info("%s : (%s) Waiting for tx packets to free. Pending tx pkts = %d.\n"
+ , __func__, ndev->name, tx_pkts);
+ if (need_resched())
+ schedule();
+ }
+
+ } while (tx_pkts);
+
+ end = jiffies + (TX_POLL_TIMEOUT_MS * HZ) / 1000;
+
+ prv_tx_pkts = tmu_pkts_processed(priv->id);
+ /*
+ * Wait till TMU transmits all pending packets
+ * poll tmu_qstatus and pkts processed by TMU for every 10ms
+ * Consider TMU is busy, If we see TMU qeueu pending or any packets
+ * processed by TMU
+ */
+ while (1) {
+ if (time_after(jiffies, next_poll)) {
+ tx_pkts = tmu_pkts_processed(priv->id);
+ qstatus = tmu_qstatus(priv->id) & 0x7ffff;
+
+ if (!qstatus && (tx_pkts == prv_tx_pkts))
+ break;
+ /* Don't wait forever, break if we cross max
+ * timeout(TX_POLL_TIMEOUT_MS)
+ */
+ if (time_after(jiffies, end)) {
+ pr_err("TMU%d is busy after %dmsec\n",
+ priv->id, TX_POLL_TIMEOUT_MS);
+ break;
+ }
+ prv_tx_pkts = tx_pkts;
+ next_poll++;
+ }
+ if (need_resched())
+ schedule();
+ }
+ /* Wait for some more time to complete transmitting packet if any */
+ next_poll = jiffies + 1;
+ while (1) {
+ if (time_after(jiffies, next_poll))
+ break;
+ if (need_resched())
+ schedule();
+ }
+
+ pfe_eth_stop(ndev, wake);
+
+ napi_disable(&priv->lro_napi);
+ napi_disable(&priv->low_napi);
+ napi_disable(&priv->high_napi);
+
+ hif_lib_client_unregister(&priv->client);
+
+ return 0;
+}
+
+/* pfe_eth_close
+ *
+ */
+static int pfe_eth_close(struct net_device *ndev)
+{
+ pfe_eth_shutdown(ndev, 0);
+
+ return 0;
+}
+
+/* pfe_eth_suspend
+ *
+ * return value : 1 if netdevice is configured to wakeup system
+ * 0 otherwise
+ */
+int pfe_eth_suspend(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ int retval = 0;
+
+ if (priv->wol) {
+ gemac_set_wol(priv->EMAC_baseaddr, priv->wol);
+ retval = 1;
+ }
+ pfe_eth_shutdown(ndev, priv->wol);
+
+ return retval;
+}
+
+/* pfe_eth_resume
+ *
+ */
+int pfe_eth_resume(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ if (priv->wol)
+ gemac_set_wol(priv->EMAC_baseaddr, 0);
+ gemac_tx_enable(priv->EMAC_baseaddr);
+
+ return pfe_eth_open(ndev);
+}
+
+/* pfe_eth_get_queuenum
+ */
+static int pfe_eth_get_queuenum(struct pfe_eth_priv_s *priv, struct sk_buff
+ *skb)
+{
+ int queuenum = 0;
+ unsigned long flags;
+
+ /* Get the Fast Path queue number */
+ /*
+ * Use conntrack mark (if conntrack exists), then packet mark (if any),
+ * then fallback to default
+ */
+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
+ if (skb->_nfct) {
+ enum ip_conntrack_info cinfo;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb, &cinfo);
+
+ if (ct) {
+ u32 connmark;
+
+ connmark = ct->mark;
+
+ if ((connmark & 0x80000000) && priv->id != 0)
+ connmark >>= 16;
+
+ queuenum = connmark & EMAC_QUEUENUM_MASK;
+ }
+ } else {/* continued after #endif ... */
+#endif
+ if (skb->mark) {
+ queuenum = skb->mark & EMAC_QUEUENUM_MASK;
+ } else {
+ spin_lock_irqsave(&priv->lock, flags);
+ queuenum = priv->default_priority & EMAC_QUEUENUM_MASK;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
+#if defined(CONFIG_IP_NF_CONNTRACK_MARK) || defined(CONFIG_NF_CONNTRACK_MARK)
+ }
+#endif
+ return queuenum;
+}
+
+/* pfe_eth_might_stop_tx
+ *
+ */
+static int pfe_eth_might_stop_tx(struct pfe_eth_priv_s *priv, int queuenum,
+ struct netdev_queue *tx_queue,
+ unsigned int n_desc,
+ unsigned int n_segs)
+{
+ ktime_t kt;
+ int tried = 0;
+
+try_again:
+ if (unlikely((__hif_tx_avail(&pfe->hif) < n_desc) ||
+ (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) ||
+ (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) < n_segs))) {
+ if (!tried) {
+ __hif_lib_update_credit(&priv->client, queuenum);
+ tried = 1;
+ goto try_again;
+ }
+#ifdef PFE_ETH_TX_STATS
+ if (__hif_tx_avail(&pfe->hif) < n_desc) {
+ priv->stop_queue_hif[queuenum]++;
+ } else if (hif_lib_tx_avail(&priv->client, queuenum) < n_desc) {
+ priv->stop_queue_hif_client[queuenum]++;
+ } else if (hif_lib_tx_credit_avail(pfe, priv->id, queuenum) <
+ n_segs) {
+ priv->stop_queue_credit[queuenum]++;
+ }
+ priv->stop_queue_total[queuenum]++;
+#endif
+ netif_tx_stop_queue(tx_queue);
+
+ kt = ktime_set(0, LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS *
+ NSEC_PER_MSEC);
+ hrtimer_start(&priv->fast_tx_timeout[queuenum].timer, kt,
+ HRTIMER_MODE_REL);
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+#define SA_MAX_OP 2
+/* pfe_hif_send_packet
+ *
+ * At this level if TX fails we drop the packet
+ */
+static void pfe_hif_send_packet(struct sk_buff *skb, struct pfe_eth_priv_s
+ *priv, int queuenum)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int nr_frags;
+ u32 ctrl = 0;
+
+ netif_info(priv, tx_queued, priv->ndev, "%s\n", __func__);
+
+ if (skb_is_gso(skb)) {
+ priv->stats.tx_dropped++;
+ return;
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ ctrl = HIF_CTRL_TX_CHECKSUM;
+
+ nr_frags = sh->nr_frags;
+
+ if (nr_frags) {
+ skb_frag_t *f;
+ int i;
+
+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
+ skb_headlen(skb), ctrl, HIF_FIRST_BUFFER,
+ skb);
+
+ for (i = 0; i < nr_frags - 1; i++) {
+ f = &sh->frags[i];
+ __hif_lib_xmit_pkt(&priv->client, queuenum,
+ skb_frag_address(f),
+ skb_frag_size(f),
+ 0x0, 0x0, skb);
+ }
+
+ f = &sh->frags[i];
+
+ __hif_lib_xmit_pkt(&priv->client, queuenum,
+ skb_frag_address(f), skb_frag_size(f),
+ 0x0, HIF_LAST_BUFFER | HIF_DATA_VALID,
+ skb);
+
+ netif_info(priv, tx_queued, priv->ndev,
+ "%s: pkt sent successfully skb:%p nr_frags:%d len:%d\n",
+ __func__, skb, nr_frags, skb->len);
+ } else {
+ __hif_lib_xmit_pkt(&priv->client, queuenum, skb->data,
+ skb->len, ctrl, HIF_FIRST_BUFFER |
+ HIF_LAST_BUFFER | HIF_DATA_VALID,
+ skb);
+ netif_info(priv, tx_queued, priv->ndev,
+ "%s: pkt sent successfully skb:%p len:%d\n",
+ __func__, skb, skb->len);
+ }
+ hif_tx_dma_start();
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += skb->len;
+ hif_lib_tx_credit_use(pfe, priv->id, queuenum, 1);
+}
+
+/* pfe_eth_flush_txQ
+ */
+static void pfe_eth_flush_txQ(struct pfe_eth_priv_s *priv, int tx_q_num, int
+ from_tx, int n_desc)
+{
+ struct sk_buff *skb;
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
+ tx_q_num);
+ unsigned int flags;
+
+ netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
+
+ if (!from_tx)
+ __netif_tx_lock_bh(tx_queue);
+
+ /* Clean HIF and client queue */
+ while ((skb = hif_lib_tx_get_next_complete(&priv->client,
+ tx_q_num, &flags,
+ HIF_TX_DESC_NT))) {
+ if (flags & HIF_DATA_VALID)
+ dev_kfree_skb_any(skb);
+ }
+ if (!from_tx)
+ __netif_tx_unlock_bh(tx_queue);
+}
+
+/* pfe_eth_flush_tx
+ */
+static void pfe_eth_flush_tx(struct pfe_eth_priv_s *priv)
+{
+ int ii;
+
+ netif_info(priv, tx_done, priv->ndev, "%s\n", __func__);
+
+ for (ii = 0; ii < emac_txq_cnt; ii++) {
+ pfe_eth_flush_txQ(priv, ii, 0, 0);
+ __hif_lib_update_credit(&priv->client, ii);
+ }
+}
+
+void pfe_tx_get_req_desc(struct sk_buff *skb, unsigned int *n_desc, unsigned int
+ *n_segs)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+
+ /* Scattered data */
+ if (sh->nr_frags) {
+ *n_desc = sh->nr_frags + 1;
+ *n_segs = 1;
+ /* Regular case */
+ } else {
+ *n_desc = 1;
+ *n_segs = 1;
+ }
+}
+
+/* pfe_eth_send_packet
+ */
+static int pfe_eth_send_packet(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ int tx_q_num = skb_get_queue_mapping(skb);
+ int n_desc, n_segs;
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
+ tx_q_num);
+
+ netif_info(priv, tx_queued, ndev, "%s\n", __func__);
+
+ if ((!skb_is_gso(skb)) && (skb_headroom(skb) < (PFE_PKT_HEADER_SZ +
+ sizeof(unsigned long)))) {
+ netif_warn(priv, tx_err, priv->ndev, "%s: copying skb\n",
+ __func__);
+
+ if (pskb_expand_head(skb, (PFE_PKT_HEADER_SZ + sizeof(unsigned
+ long)), 0, GFP_ATOMIC)) {
+ /* No need to re-transmit, no way to recover*/
+ kfree_skb(skb);
+ priv->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+
+ pfe_tx_get_req_desc(skb, &n_desc, &n_segs);
+
+ hif_tx_lock(&pfe->hif);
+ if (unlikely(pfe_eth_might_stop_tx(priv, tx_q_num, tx_queue, n_desc,
+ n_segs))) {
+#ifdef PFE_ETH_TX_STATS
+ if (priv->was_stopped[tx_q_num]) {
+ priv->clean_fail[tx_q_num]++;
+ priv->was_stopped[tx_q_num] = 0;
+ }
+#endif
+ hif_tx_unlock(&pfe->hif);
+ return NETDEV_TX_BUSY;
+ }
+
+ pfe_hif_send_packet(skb, priv, tx_q_num);
+
+ hif_tx_unlock(&pfe->hif);
+
+ tx_queue->trans_start = jiffies;
+
+#ifdef PFE_ETH_TX_STATS
+ priv->was_stopped[tx_q_num] = 0;
+#endif
+
+ return NETDEV_TX_OK;
+}
+
+/* pfe_eth_select_queue
+ *
+ */
+static u16 pfe_eth_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ return pfe_eth_get_queuenum(priv, skb);
+}
+
+/* pfe_eth_get_stats
+ */
+static struct net_device_stats *pfe_eth_get_stats(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+
+ netif_info(priv, drv, ndev, "%s\n", __func__);
+
+ return &priv->stats;
+}
+
+/* pfe_eth_set_mac_address
+ */
+static int pfe_eth_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ struct sockaddr *sa = addr;
+
+ netif_info(priv, drv, ndev, "%s\n", __func__);
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(ndev->dev_addr, sa->sa_data, ETH_ALEN);
+
+ gemac_set_laddrN(priv->EMAC_baseaddr,
+ (struct pfe_mac_addr *)ndev->dev_addr, 1);
+
+ return 0;
+}
+
+/* pfe_eth_enet_addr_byte_mac
+ */
+int pfe_eth_enet_addr_byte_mac(u8 *enet_byte_addr,
+ struct pfe_mac_addr *enet_addr)
+{
+ if (!enet_byte_addr || !enet_addr) {
+ return -1;
+
+ } else {
+ enet_addr->bottom = enet_byte_addr[0] |
+ (enet_byte_addr[1] << 8) |
+ (enet_byte_addr[2] << 16) |
+ (enet_byte_addr[3] << 24);
+ enet_addr->top = enet_byte_addr[4] |
+ (enet_byte_addr[5] << 8);
+ return 0;
+ }
+}
+
+/* pfe_eth_set_multi
+ */
+static void pfe_eth_set_multi(struct net_device *ndev)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ struct pfe_mac_addr hash_addr; /* hash register structure */
+ /* specific mac address register structure */
+ struct pfe_mac_addr spec_addr;
+ int result; /* index into hash register to set.. */
+ int uc_count = 0;
+ struct netdev_hw_addr *ha;
+
+ if (ndev->flags & IFF_PROMISC) {
+ netif_info(priv, drv, ndev, "entering promiscuous mode\n");
+
+ priv->promisc = 1;
+ gemac_enable_copy_all(priv->EMAC_baseaddr);
+ } else {
+ priv->promisc = 0;
+ gemac_disable_copy_all(priv->EMAC_baseaddr);
+ }
+
+ /* Enable broadcast frame reception if required. */
+ if (ndev->flags & IFF_BROADCAST) {
+ gemac_allow_broadcast(priv->EMAC_baseaddr);
+ } else {
+ netif_info(priv, drv, ndev,
+ "disabling broadcast frame reception\n");
+
+ gemac_no_broadcast(priv->EMAC_baseaddr);
+ }
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ /* Set the hash to rx all multicast frames */
+ hash_addr.bottom = 0xFFFFFFFF;
+ hash_addr.top = 0xFFFFFFFF;
+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
+ netdev_for_each_uc_addr(ha, ndev) {
+ if (uc_count >= MAX_UC_SPEC_ADDR_REG)
+ break;
+ pfe_eth_enet_addr_byte_mac(ha->addr, &spec_addr);
+ gemac_set_laddrN(priv->EMAC_baseaddr, &spec_addr,
+ uc_count + 2);
+ uc_count++;
+ }
+ } else if ((netdev_mc_count(ndev) > 0) || (netdev_uc_count(ndev))) {
+ u8 *addr;
+
+ hash_addr.bottom = 0;
+ hash_addr.top = 0;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ addr = ha->addr;
+
+ netif_info(priv, drv, ndev,
+ "adding multicast address %X:%X:%X:%X:%X:%X to gem filter\n",
+ addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+
+ result = pfe_eth_get_hash(addr);
+
+ if (result < EMAC_HASH_REG_BITS) {
+ if (result < 32)
+ hash_addr.bottom |= (1 << result);
+ else
+ hash_addr.top |= (1 << (result - 32));
+ } else {
+ break;
+ }
+ }
+
+ uc_count = -1;
+ netdev_for_each_uc_addr(ha, ndev) {
+ addr = ha->addr;
+
+ if (++uc_count < MAX_UC_SPEC_ADDR_REG) {
+ netdev_info(ndev,
+ "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem filter\n",
+ addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+ pfe_eth_enet_addr_byte_mac(addr, &spec_addr);
+ gemac_set_laddrN(priv->EMAC_baseaddr,
+ &spec_addr, uc_count + 2);
+ } else {
+ netif_info(priv, drv, ndev,
+ "adding unicast address %02x:%02x:%02x:%02x:%02x:%02x to gem hash\n",
+ addr[0], addr[1], addr[2],
+ addr[3], addr[4], addr[5]);
+
+ result = pfe_eth_get_hash(addr);
+ if (result >= EMAC_HASH_REG_BITS) {
+ break;
+
+ } else {
+ if (result < 32)
+ hash_addr.bottom |= (1 <<
+ result);
+ else
+ hash_addr.top |= (1 <<
+ (result - 32));
+ }
+ }
+ }
+
+ gemac_set_hash(priv->EMAC_baseaddr, &hash_addr);
+ }
+
+ if (!(netdev_uc_count(ndev) >= MAX_UC_SPEC_ADDR_REG)) {
+ /*
+ * Check if there are any specific address HW registers that
+ * need to be flushed
+ */
+ for (uc_count = netdev_uc_count(ndev); uc_count <
+ MAX_UC_SPEC_ADDR_REG; uc_count++)
+ gemac_clear_laddrN(priv->EMAC_baseaddr, uc_count + 2);
+ }
+
+ if (ndev->flags & IFF_LOOPBACK)
+ gemac_set_loop(priv->EMAC_baseaddr, LB_LOCAL);
+}
+
+/* pfe_eth_set_features
+ */
+static int pfe_eth_set_features(struct net_device *ndev, netdev_features_t
+ features)
+{
+ struct pfe_eth_priv_s *priv = netdev_priv(ndev);
+ int rc = 0;
+
+ if (features & NETIF_F_RXCSUM)
+ gemac_enable_rx_checksum_offload(priv->EMAC_baseaddr);
+ else
+ gemac_disable_rx_checksum_offload(priv->EMAC_baseaddr);
+ return rc;
+}
+
+/* pfe_eth_fast_tx_timeout
+ */
+static enum hrtimer_restart pfe_eth_fast_tx_timeout(struct hrtimer *timer)
+{
+ struct pfe_eth_fast_timer *fast_tx_timeout = container_of(timer, struct
+ pfe_eth_fast_timer,
+ timer);
+ struct pfe_eth_priv_s *priv = container_of(fast_tx_timeout->base,
+ struct pfe_eth_priv_s,
+ fast_tx_timeout);
+ struct netdev_queue *tx_queue = netdev_get_tx_queue(priv->ndev,
+ fast_tx_timeout->queuenum);
+
+ if (netif_tx_queue_stopped(tx_queue)) {
+#ifdef PFE_ETH_TX_STATS
+ priv->was_stopped[fast_tx_timeout->queuenum] = 1;
+#endif
+ netif_tx_wake_queue(tx_queue);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+/* pfe_eth_fast_tx_timeout_init
+ */
+static void pfe_eth_fast_tx_timeout_init(struct pfe_eth_priv_s *priv)
+{
+ int i;
+
+ for (i = 0; i < emac_txq_cnt; i++) {
+ priv->fast_tx_timeout[i].queuenum = i;
+ hrtimer_init(&priv->fast_tx_timeout[i].timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ priv->fast_tx_timeout[i].timer.function =
+ pfe_eth_fast_tx_timeout;
+ priv->fast_tx_timeout[i].base = priv->fast_tx_timeout;
+ }
+}
+
+static struct sk_buff *pfe_eth_rx_skb(struct net_device *ndev,
+ struct pfe_eth_priv_s *priv,
+ unsigned int qno)
+{
+ void *buf_addr;
+ unsigned int rx_ctrl;
+ unsigned int desc_ctrl = 0;
+ struct hif_ipsec_hdr *ipsec_hdr = NULL;
+ struct sk_buff *skb;
+ struct sk_buff *skb_frag, *skb_frag_last = NULL;
+ int length = 0, offset;
+
+ skb = priv->skb_inflight[qno];
+
+ if (skb) {
+ skb_frag_last = skb_shinfo(skb)->frag_list;
+ if (skb_frag_last) {
+ while (skb_frag_last->next)
+ skb_frag_last = skb_frag_last->next;
+ }
+ }
+
+ while (!(desc_ctrl & CL_DESC_LAST)) {
+ buf_addr = hif_lib_receive_pkt(&priv->client, qno, &length,
+ &offset, &rx_ctrl, &desc_ctrl,
+ (void **)&ipsec_hdr);
+ if (!buf_addr)
+ goto incomplete;
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_DESC_COUNT]++;
+#endif
+
+ /* First frag */
+ if (desc_ctrl & CL_DESC_FIRST) {
+ skb = build_skb(buf_addr, 0);
+ if (unlikely(!skb))
+ goto pkt_drop;
+
+ skb_reserve(skb, offset);
+ skb_put(skb, length);
+ skb->dev = ndev;
+
+ if ((ndev->features & NETIF_F_RXCSUM) && (rx_ctrl &
+ HIF_CTRL_RX_CHECKSUMMED))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+
+ } else {
+ /* Next frags */
+ if (unlikely(!skb)) {
+ pr_err("%s: NULL skb_inflight\n",
+ __func__);
+ goto pkt_drop;
+ }
+
+ skb_frag = build_skb(buf_addr, 0);
+
+ if (unlikely(!skb_frag)) {
+ kfree(buf_addr);
+ goto pkt_drop;
+ }
+
+ skb_reserve(skb_frag, offset);
+ skb_put(skb_frag, length);
+
+ skb_frag->dev = ndev;
+
+ if (skb_shinfo(skb)->frag_list)
+ skb_frag_last->next = skb_frag;
+ else
+ skb_shinfo(skb)->frag_list = skb_frag;
+
+ skb->truesize += skb_frag->truesize;
+ skb->data_len += length;
+ skb->len += length;
+ skb_frag_last = skb_frag;
+ }
+ }
+
+ priv->skb_inflight[qno] = NULL;
+ return skb;
+
+incomplete:
+ priv->skb_inflight[qno] = skb;
+ return NULL;
+
+pkt_drop:
+ priv->skb_inflight[qno] = NULL;
+
+ if (skb)
+ kfree_skb(skb);
+ else
+ kfree(buf_addr);
+
+ priv->stats.rx_errors++;
+
+ return NULL;
+}
+
+/* pfe_eth_poll
+ */
+static int pfe_eth_poll(struct pfe_eth_priv_s *priv, struct napi_struct *napi,
+ unsigned int qno, int budget)
+{
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *skb;
+ int work_done = 0;
+ unsigned int len;
+
+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_POLL_COUNT]++;
+#endif
+
+ do {
+ skb = pfe_eth_rx_skb(ndev, priv, qno);
+
+ if (!skb)
+ break;
+
+ len = skb->len;
+
+ /* Packet will be processed */
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ netif_receive_skb(skb);
+
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += len;
+
+ work_done++;
+
+#ifdef PFE_ETH_NAPI_STATS
+ priv->napi_counters[NAPI_PACKET_COUNT]++;
+#endif
+
+ } while (work_done < budget);
+
+ /*
+ * If no Rx receive nor cleanup work was done, exit polling mode.
+ * No more netif_running(dev) check is required here , as this is
+ * checked in net/core/dev.c (2.6.33.5 kernel specific).
+ */
+ if (work_done < budget) {
+ napi_complete(napi);
+
+ hif_lib_event_handler_start(&priv->client, EVENT_RX_PKT_IND,
+ qno);
+ }
+#ifdef PFE_ETH_NAPI_STATS
+ else
+ priv->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
+#endif
+
+ return work_done;
+}
+
+/*
+ * pfe_eth_lro_poll
+ */
+static int pfe_eth_lro_poll(struct napi_struct *napi, int budget)
+{
+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
+ lro_napi);
+
+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
+
+ return pfe_eth_poll(priv, napi, 2, budget);
+}
+
+/* pfe_eth_low_poll
+ */
+static int pfe_eth_low_poll(struct napi_struct *napi, int budget)
+{
+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
+ low_napi);
+
+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
+
+ return pfe_eth_poll(priv, napi, 1, budget);
+}
+
+/* pfe_eth_high_poll
+ */
+static int pfe_eth_high_poll(struct napi_struct *napi, int budget)
+{
+ struct pfe_eth_priv_s *priv = container_of(napi, struct pfe_eth_priv_s,
+ high_napi);
+
+ netif_info(priv, intr, priv->ndev, "%s\n", __func__);
+
+ return pfe_eth_poll(priv, napi, 0, budget);
+}
+
+static const struct net_device_ops pfe_netdev_ops = {
+ .ndo_open = pfe_eth_open,
+ .ndo_stop = pfe_eth_close,
+ .ndo_start_xmit = pfe_eth_send_packet,
+ .ndo_select_queue = pfe_eth_select_queue,
+ .ndo_set_rx_mode = pfe_eth_set_multi,
+ .ndo_set_mac_address = pfe_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = pfe_eth_change_mtu,
+ .ndo_get_stats = pfe_eth_get_stats,
+ .ndo_set_features = pfe_eth_set_features,
+};
+
+/* pfe_eth_init_one
+ */
+static int pfe_eth_init_one(struct pfe *pfe,
+ struct ls1012a_pfe_platform_data *pfe_info,
+ int id)
+{
+ struct net_device *ndev = NULL;
+ struct pfe_eth_priv_s *priv = NULL;
+ struct ls1012a_eth_platform_data *einfo;
+ int err;
+
+ einfo = (struct ls1012a_eth_platform_data *)
+ pfe_info->ls1012a_eth_pdata;
+
+ /* einfo never be NULL, but no harm in having this check */
+ if (!einfo) {
+ pr_err(
+ "%s: pfe missing additional gemacs platform data\n"
+ , __func__);
+ err = -ENODEV;
+ goto err0;
+ }
+
+ if (us)
+ emac_txq_cnt = EMAC_TXQ_CNT;
+ /* Create an ethernet device instance */
+ ndev = alloc_etherdev_mq(sizeof(*priv), emac_txq_cnt);
+
+ if (!ndev) {
+ pr_err("%s: gemac %d device allocation failed\n",
+ __func__, einfo[id].gem_id);
+ err = -ENOMEM;
+ goto err0;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->id = einfo[id].gem_id;
+ priv->pfe = pfe;
+ priv->phy_node = einfo[id].phy_node;
+
+ SET_NETDEV_DEV(priv->ndev, priv->pfe->dev);
+
+ pfe->eth.eth_priv[id] = priv;
+
+ /* Set the info in the priv to the current info */
+ priv->einfo = &einfo[id];
+ priv->EMAC_baseaddr = cbus_emac_base[id];
+ priv->GPI_baseaddr = cbus_gpi_base[id];
+
+ spin_lock_init(&priv->lock);
+
+ pfe_eth_fast_tx_timeout_init(priv);
+
+ /* Copy the station address into the dev structure, */
+ memcpy(ndev->dev_addr, einfo[id].mac_addr, ETH_ALEN);
+
+ if (us)
+ goto phy_init;
+
+ ndev->mtu = 1500;
+
+ /* Set MTU limits */
+ ndev->min_mtu = ETH_MIN_MTU;
+
+/*
+ * Jumbo frames are not supported on LS1012A rev-1.0.
+ * So max mtu should be restricted to supported frame length.
+ */
+ if (pfe_errata_a010897)
+ ndev->max_mtu = JUMBO_FRAME_SIZE_V1 - ETH_HLEN - ETH_FCS_LEN;
+ else
+ ndev->max_mtu = JUMBO_FRAME_SIZE_V2 - ETH_HLEN - ETH_FCS_LEN;
+
+ /*Enable after checksum offload is validated */
+ ndev->hw_features = NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_SG;
+
+ /* enabled by default */
+ ndev->features = ndev->hw_features;
+
+ priv->usr_features = ndev->features;
+
+ ndev->netdev_ops = &pfe_netdev_ops;
+
+ ndev->ethtool_ops = &pfe_ethtool_ops;
+
+ /* Enable basic messages by default */
+ priv->msg_enable = NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_LINK |
+ NETIF_MSG_PROBE;
+
+ netif_napi_add(ndev, &priv->low_napi, pfe_eth_low_poll,
+ HIF_RX_POLL_WEIGHT - 16);
+ netif_napi_add(ndev, &priv->high_napi, pfe_eth_high_poll,
+ HIF_RX_POLL_WEIGHT - 16);
+ netif_napi_add(ndev, &priv->lro_napi, pfe_eth_lro_poll,
+ HIF_RX_POLL_WEIGHT - 16);
+
+ err = register_netdev(ndev);
+ if (err) {
+ netdev_err(ndev, "register_netdev() failed\n");
+ goto err1;
+ }
+
+ if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
+ ((pfe_use_old_dts_phy) &&
+ (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
+ pr_info("%s: No PHY or fixed-link\n", __func__);
+ goto skip_phy_init;
+ }
+
+phy_init:
+ device_init_wakeup(&ndev->dev, WAKE_MAGIC);
+
+ err = pfe_phy_init(ndev);
+ if (err) {
+ netdev_err(ndev, "%s: pfe_phy_init() failed\n",
+ __func__);
+ goto err2;
+ }
+
+ if (us) {
+ if (priv->phydev)
+ phy_start(priv->phydev);
+ return 0;
+ }
+
+ netif_carrier_on(ndev);
+
+skip_phy_init:
+ /* Create all the sysfs files */
+ if (pfe_eth_sysfs_init(ndev))
+ goto err3;
+
+ netif_info(priv, probe, ndev, "%s: created interface, baseaddr: %p\n",
+ __func__, priv->EMAC_baseaddr);
+
+ return 0;
+
+err3:
+ pfe_phy_exit(priv->ndev);
+err2:
+ if (us)
+ goto err1;
+ unregister_netdev(ndev);
+err1:
+ free_netdev(priv->ndev);
+err0:
+ return err;
+}
+
+/* pfe_eth_init
+ */
+int pfe_eth_init(struct pfe *pfe)
+{
+ int ii = 0;
+ int err;
+ struct ls1012a_pfe_platform_data *pfe_info;
+
+ pr_info("%s\n", __func__);
+
+ cbus_emac_base[0] = EMAC1_BASE_ADDR;
+ cbus_emac_base[1] = EMAC2_BASE_ADDR;
+
+ cbus_gpi_base[0] = EGPI1_BASE_ADDR;
+ cbus_gpi_base[1] = EGPI2_BASE_ADDR;
+
+ pfe_info = (struct ls1012a_pfe_platform_data *)
+ pfe->dev->platform_data;
+ if (!pfe_info) {
+ pr_err("%s: pfe missing additional platform data\n", __func__);
+ err = -ENODEV;
+ goto err_pdata;
+ }
+
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
+ err = pfe_eth_mdio_init(pfe, pfe_info, ii);
+ if (err) {
+ pr_err("%s: pfe_eth_mdio_init() failed\n", __func__);
+ goto err_mdio_init;
+ }
+ }
+
+ if (soc_device_match(ls1012a_rev1_soc_attr))
+ pfe_errata_a010897 = true;
+ else
+ pfe_errata_a010897 = false;
+
+ for (ii = 0; ii < NUM_GEMAC_SUPPORT; ii++) {
+ err = pfe_eth_init_one(pfe, pfe_info, ii);
+ if (err)
+ goto err_eth_init;
+ }
+
+ return 0;
+
+err_eth_init:
+ while (ii--) {
+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
+ pfe_eth_mdio_exit(pfe, ii);
+ }
+
+err_mdio_init:
+err_pdata:
+ return err;
+}
+
+/* pfe_eth_exit_one
+ */
+static void pfe_eth_exit_one(struct pfe_eth_priv_s *priv)
+{
+ netif_info(priv, probe, priv->ndev, "%s\n", __func__);
+
+ if (!us)
+ pfe_eth_sysfs_exit(priv->ndev);
+
+ if ((!(pfe_use_old_dts_phy) && !(priv->phy_node)) ||
+ ((pfe_use_old_dts_phy) &&
+ (priv->einfo->phy_flags & GEMAC_NO_PHY))) {
+ pr_info("%s: No PHY or fixed-link\n", __func__);
+ goto skip_phy_exit;
+ }
+
+ pfe_phy_exit(priv->ndev);
+
+skip_phy_exit:
+ if (!us)
+ unregister_netdev(priv->ndev);
+
+ free_netdev(priv->ndev);
+}
+
+/* pfe_eth_exit
+ */
+void pfe_eth_exit(struct pfe *pfe)
+{
+ int ii;
+
+ pr_info("%s\n", __func__);
+
+ for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
+ pfe_eth_exit_one(pfe->eth.eth_priv[ii]);
+
+ for (ii = NUM_GEMAC_SUPPORT - 1; ii >= 0; ii--)
+ pfe_eth_mdio_exit(pfe, ii);
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_eth.h b/drivers/staging/fsl_ppfe/pfe_eth.h
new file mode 100644
index 000000000000..26e28a29f8d6
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_eth.h
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_ETH_H_
+#define _PFE_ETH_H_
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/time.h>
+
+#define PFE_ETH_NAPI_STATS
+#define PFE_ETH_TX_STATS
+
+#define PFE_ETH_FRAGS_MAX (65536 / HIF_RX_PKT_MIN_SIZE)
+#define LRO_LEN_COUNT_MAX 32
+#define LRO_NB_COUNT_MAX 32
+
+#define PFE_PAUSE_FLAG_ENABLE 1
+#define PFE_PAUSE_FLAG_AUTONEG 2
+
+/* GEMAC configured by SW */
+/* GEMAC configured by phy lines (not for MII/GMII) */
+
+#define GEMAC_SW_FULL_DUPLEX BIT(9)
+#define GEMAC_SW_SPEED_10M (0 << 12)
+#define GEMAC_SW_SPEED_100M BIT(12)
+#define GEMAC_SW_SPEED_1G (2 << 12)
+
+#define GEMAC_NO_PHY BIT(0)
+
+struct ls1012a_eth_platform_data {
+ /* board specific information */
+ u32 mii_config;
+ u32 phy_flags;
+ u32 gem_id;
+ u32 phy_id;
+ u32 mdio_muxval;
+ u8 mac_addr[ETH_ALEN];
+ struct device_node *phy_node;
+};
+
+struct ls1012a_mdio_platform_data {
+ int id;
+ int irq[32];
+ u32 phy_mask;
+ int mdc_div;
+};
+
+struct ls1012a_pfe_platform_data {
+ struct ls1012a_eth_platform_data ls1012a_eth_pdata[3];
+ struct ls1012a_mdio_platform_data ls1012a_mdio_pdata[3];
+};
+
+#define NUM_GEMAC_SUPPORT 2
+#define DRV_NAME "pfe-eth"
+#define DRV_VERSION "1.0"
+
+#define LS1012A_TX_FAST_RECOVERY_TIMEOUT_MS 3
+#define TX_POLL_TIMEOUT_MS 1000
+
+#define EMAC_TXQ_CNT 16
+#define EMAC_TXQ_DEPTH (HIF_TX_DESC_NT)
+
+#define JUMBO_FRAME_SIZE_V1 1900
+#define JUMBO_FRAME_SIZE_V2 10258
+/*
+ * Client Tx queue threshold, for txQ flush condition.
+ * It must be smaller than the queue size (in case we ever change it in the
+ * future).
+ */
+#define HIF_CL_TX_FLUSH_MARK 32
+
+/*
+ * Max number of TX resources (HIF descriptors or skbs) that will be released
+ * in a single go during batch recycling.
+ * Should be lower than the flush mark so the SW can provide the HW with a
+ * continuous stream of packets instead of bursts.
+ */
+#define TX_FREE_MAX_COUNT 16
+#define EMAC_RXQ_CNT 3
+#define EMAC_RXQ_DEPTH HIF_RX_DESC_NT
+/* make sure clients can receive a full burst of packets */
+#define EMAC_RMON_TXBYTES_POS 0x00
+#define EMAC_RMON_RXBYTES_POS 0x14
+
+#define EMAC_QUEUENUM_MASK (emac_txq_cnt - 1)
+#define EMAC_MDIO_TIMEOUT 1000
+#define MAX_UC_SPEC_ADDR_REG 31
+
+struct pfe_eth_fast_timer {
+ int queuenum;
+ struct hrtimer timer;
+ void *base;
+};
+
+struct pfe_eth_priv_s {
+ struct pfe *pfe;
+ struct hif_client_s client;
+ struct napi_struct lro_napi;
+ struct napi_struct low_napi;
+ struct napi_struct high_napi;
+ int low_tmu_q;
+ int high_tmu_q;
+ struct net_device_stats stats;
+ struct net_device *ndev;
+ int id;
+ int promisc;
+ unsigned int msg_enable;
+ unsigned int usr_features;
+
+ spinlock_t lock; /* protect member variables */
+ unsigned int event_status;
+ int irq;
+ void *EMAC_baseaddr;
+ void *GPI_baseaddr;
+ /* PHY stuff */
+ struct phy_device *phydev;
+ int oldspeed;
+ int oldduplex;
+ int oldlink;
+ struct device_node *phy_node;
+ struct clk *gemtx_clk;
+ int wol;
+ int pause_flag;
+
+ int default_priority;
+ struct pfe_eth_fast_timer fast_tx_timeout[EMAC_TXQ_CNT];
+
+ struct ls1012a_eth_platform_data *einfo;
+ struct sk_buff *skb_inflight[EMAC_RXQ_CNT + 6];
+
+#ifdef PFE_ETH_TX_STATS
+ unsigned int stop_queue_total[EMAC_TXQ_CNT];
+ unsigned int stop_queue_hif[EMAC_TXQ_CNT];
+ unsigned int stop_queue_hif_client[EMAC_TXQ_CNT];
+ unsigned int stop_queue_credit[EMAC_TXQ_CNT];
+ unsigned int clean_fail[EMAC_TXQ_CNT];
+ unsigned int was_stopped[EMAC_TXQ_CNT];
+#endif
+
+#ifdef PFE_ETH_NAPI_STATS
+ unsigned int napi_counters[NAPI_MAX_COUNT];
+#endif
+ unsigned int frags_inflight[EMAC_RXQ_CNT + 6];
+};
+
+struct pfe_eth {
+ struct pfe_eth_priv_s *eth_priv[3];
+};
+
+struct pfe_mdio_priv_s {
+ void __iomem *mdio_base;
+ int mdc_div;
+ struct mii_bus *mii_bus;
+};
+
+struct pfe_mdio {
+ struct pfe_mdio_priv_s *mdio_priv[3];
+};
+
+int pfe_eth_init(struct pfe *pfe);
+void pfe_eth_exit(struct pfe *pfe);
+int pfe_eth_suspend(struct net_device *dev);
+int pfe_eth_resume(struct net_device *dev);
+int pfe_eth_mdio_reset(struct mii_bus *bus);
+
+#endif /* _PFE_ETH_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_firmware.c b/drivers/staging/fsl_ppfe/pfe_firmware.c
new file mode 100644
index 000000000000..51cd44dc322c
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_firmware.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+/*
+ * @file
+ * Contains all the functions to handle parsing and loading of PE firmware
+ * files.
+ */
+#include <linux/firmware.h>
+
+#include "pfe_mod.h"
+#include "pfe_firmware.h"
+#include "pfe/pfe.h"
+
+static struct elf32_shdr *get_elf_section_header(const struct firmware *fw,
+ const char *section)
+{
+ struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
+ struct elf32_shdr *shdr;
+ struct elf32_shdr *shdr_shstr;
+ Elf32_Off e_shoff = be32_to_cpu(elf_hdr->e_shoff);
+ Elf32_Half e_shentsize = be16_to_cpu(elf_hdr->e_shentsize);
+ Elf32_Half e_shnum = be16_to_cpu(elf_hdr->e_shnum);
+ Elf32_Half e_shstrndx = be16_to_cpu(elf_hdr->e_shstrndx);
+ Elf32_Off shstr_offset;
+ Elf32_Word sh_name;
+ const char *name;
+ int i;
+
+ /* Section header strings */
+ shdr_shstr = (struct elf32_shdr *)(fw->data + e_shoff + e_shstrndx *
+ e_shentsize);
+ shstr_offset = be32_to_cpu(shdr_shstr->sh_offset);
+
+ for (i = 0; i < e_shnum; i++) {
+ shdr = (struct elf32_shdr *)(fw->data + e_shoff
+ + i * e_shentsize);
+
+ sh_name = be32_to_cpu(shdr->sh_name);
+
+ name = (const char *)(fw->data + shstr_offset + sh_name);
+
+ if (!strcmp(name, section))
+ return shdr;
+ }
+
+ pr_err("%s: didn't find section %s\n", __func__, section);
+
+ return NULL;
+}
+
+#if defined(CFG_DIAGS)
+static int pfe_get_diags_info(const struct firmware *fw, struct pfe_diags_info
+ *diags_info)
+{
+ struct elf32_shdr *shdr;
+ unsigned long offset, size;
+
+ shdr = get_elf_section_header(fw, ".pfe_diags_str");
+ if (shdr) {
+ offset = be32_to_cpu(shdr->sh_offset);
+ size = be32_to_cpu(shdr->sh_size);
+ diags_info->diags_str_base = be32_to_cpu(shdr->sh_addr);
+ diags_info->diags_str_size = size;
+ diags_info->diags_str_array = kmalloc(size, GFP_KERNEL);
+ memcpy(diags_info->diags_str_array, fw->data + offset, size);
+
+ return 0;
+ } else {
+ return -1;
+ }
+}
+#endif
+
+static void pfe_check_version_info(const struct firmware *fw)
+{
+ /*static char *version = NULL;*/
+ static char *version;
+
+ struct elf32_shdr *shdr = get_elf_section_header(fw, ".version");
+
+ if (shdr) {
+ if (!version) {
+ /*
+ * this is the first fw we load, use its version
+ * string as reference (whatever it is)
+ */
+ version = (char *)(fw->data +
+ be32_to_cpu(shdr->sh_offset));
+
+ pr_info("PFE binary version: %s\n", version);
+ } else {
+ /*
+ * already have loaded at least one firmware, check
+ * sequence can start now
+ */
+ if (strcmp(version, (char *)(fw->data +
+ be32_to_cpu(shdr->sh_offset)))) {
+ pr_info(
+ "WARNING: PFE firmware binaries from incompatible version\n");
+ }
+ }
+ } else {
+ /*
+ * version cannot be verified, a potential issue that should
+ * be reported
+ */
+ pr_info(
+ "WARNING: PFE firmware binaries from incompatible version\n");
+ }
+}
+
+/* PFE elf firmware loader.
+ * Loads an elf firmware image into a list of PE's (specified using a bitmask)
+ *
+ * @param pe_mask Mask of PE id's to load firmware to
+ * @param fw Pointer to the firmware image
+ *
+ * @return 0 on success, a negative value on error
+ *
+ */
+int pfe_load_elf(int pe_mask, const struct firmware *fw, struct pfe *pfe)
+{
+ struct elf32_hdr *elf_hdr = (struct elf32_hdr *)fw->data;
+ Elf32_Half sections = be16_to_cpu(elf_hdr->e_shnum);
+ struct elf32_shdr *shdr = (struct elf32_shdr *)(fw->data +
+ be32_to_cpu(elf_hdr->e_shoff));
+ int id, section;
+ int rc;
+
+ pr_info("%s\n", __func__);
+
+ /* Some sanity checks */
+ if (strncmp(&elf_hdr->e_ident[EI_MAG0], ELFMAG, SELFMAG)) {
+ pr_err("%s: incorrect elf magic number\n", __func__);
+ return -EINVAL;
+ }
+
+ if (elf_hdr->e_ident[EI_CLASS] != ELFCLASS32) {
+ pr_err("%s: incorrect elf class(%x)\n", __func__,
+ elf_hdr->e_ident[EI_CLASS]);
+ return -EINVAL;
+ }
+
+ if (elf_hdr->e_ident[EI_DATA] != ELFDATA2MSB) {
+ pr_err("%s: incorrect elf data(%x)\n", __func__,
+ elf_hdr->e_ident[EI_DATA]);
+ return -EINVAL;
+ }
+
+ if (be16_to_cpu(elf_hdr->e_type) != ET_EXEC) {
+ pr_err("%s: incorrect elf file type(%x)\n", __func__,
+ be16_to_cpu(elf_hdr->e_type));
+ return -EINVAL;
+ }
+
+ for (section = 0; section < sections; section++, shdr++) {
+ if (!(be32_to_cpu(shdr->sh_flags) & (SHF_WRITE | SHF_ALLOC |
+ SHF_EXECINSTR)))
+ continue;
+
+ for (id = 0; id < MAX_PE; id++)
+ if (pe_mask & (1 << id)) {
+ rc = pe_load_elf_section(id, fw->data, shdr,
+ pfe->dev);
+ if (rc < 0)
+ goto err;
+ }
+ }
+
+ pfe_check_version_info(fw);
+
+ return 0;
+
+err:
+ return rc;
+}
+
+/* PFE firmware initialization.
+ * Loads different firmware files from filesystem.
+ * Initializes PE IMEM/DMEM and UTIL-PE DDR
+ * Initializes control path symbol addresses (by looking them up in the elf
+ * firmware files
+ * Takes PE's out of reset
+ *
+ * @return 0 on success, a negative value on error
+ *
+ */
+int pfe_firmware_init(struct pfe *pfe)
+{
+ const struct firmware *class_fw, *tmu_fw;
+ int rc = 0;
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ const char *util_fw_name;
+ const struct firmware *util_fw;
+#endif
+
+ pr_info("%s\n", __func__);
+
+ if (request_firmware(&class_fw, CLASS_FIRMWARE_FILENAME, pfe->dev)) {
+ pr_err("%s: request firmware %s failed\n", __func__,
+ CLASS_FIRMWARE_FILENAME);
+ rc = -ETIMEDOUT;
+ goto err0;
+ }
+
+ if (request_firmware(&tmu_fw, TMU_FIRMWARE_FILENAME, pfe->dev)) {
+ pr_err("%s: request firmware %s failed\n", __func__,
+ TMU_FIRMWARE_FILENAME);
+ rc = -ETIMEDOUT;
+ goto err1;
+}
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ util_fw_name = UTIL_FIRMWARE_FILENAME;
+
+ if (request_firmware(&util_fw, util_fw_name, pfe->dev)) {
+ pr_err("%s: request firmware %s failed\n", __func__,
+ util_fw_name);
+ rc = -ETIMEDOUT;
+ goto err2;
+ }
+#endif
+ rc = pfe_load_elf(CLASS_MASK, class_fw, pfe);
+ if (rc < 0) {
+ pr_err("%s: class firmware load failed\n", __func__);
+ goto err3;
+ }
+
+#if defined(CFG_DIAGS)
+ rc = pfe_get_diags_info(class_fw, &pfe->diags.class_diags_info);
+ if (rc < 0) {
+ pr_warn(
+ "PFE diags won't be available for class PEs\n");
+ rc = 0;
+ }
+#endif
+
+ rc = pfe_load_elf(TMU_MASK, tmu_fw, pfe);
+ if (rc < 0) {
+ pr_err("%s: tmu firmware load failed\n", __func__);
+ goto err3;
+ }
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ rc = pfe_load_elf(UTIL_MASK, util_fw, pfe);
+ if (rc < 0) {
+ pr_err("%s: util firmware load failed\n", __func__);
+ goto err3;
+ }
+
+#if defined(CFG_DIAGS)
+ rc = pfe_get_diags_info(util_fw, &pfe->diags.util_diags_info);
+ if (rc < 0) {
+ pr_warn(
+ "PFE diags won't be available for util PE\n");
+ rc = 0;
+ }
+#endif
+
+ util_enable();
+#endif
+
+ tmu_enable(0xf);
+ class_enable();
+
+err3:
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ release_firmware(util_fw);
+
+err2:
+#endif
+ release_firmware(tmu_fw);
+
+err1:
+ release_firmware(class_fw);
+
+err0:
+ return rc;
+}
+
+/* PFE firmware cleanup
+ * Puts PE's in reset
+ *
+ *
+ */
+void pfe_firmware_exit(struct pfe *pfe)
+{
+ pr_info("%s\n", __func__);
+
+ if (pe_reset_all(&pfe->ctrl) != 0)
+ pr_err("Error: Failed to stop PEs, PFE reload may not work correctly\n");
+
+ class_disable();
+ tmu_disable(0xf);
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ util_disable();
+#endif
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_firmware.h b/drivers/staging/fsl_ppfe/pfe_firmware.h
new file mode 100644
index 000000000000..afb24facbca9
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_firmware.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_FIRMWARE_H_
+#define _PFE_FIRMWARE_H_
+
+#define CLASS_FIRMWARE_FILENAME "ppfe_class_ls1012a.elf"
+#define TMU_FIRMWARE_FILENAME "ppfe_tmu_ls1012a.elf"
+
+#define PFE_FW_CHECK_PASS 0
+#define PFE_FW_CHECK_FAIL 1
+#define NUM_PFE_FW 3
+
+int pfe_firmware_init(struct pfe *pfe);
+void pfe_firmware_exit(struct pfe *pfe);
+
+#endif /* _PFE_FIRMWARE_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_hal.c b/drivers/staging/fsl_ppfe/pfe_hal.c
new file mode 100644
index 000000000000..df71079ee5b1
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hal.c
@@ -0,0 +1,1517 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include "pfe_mod.h"
+#include "pfe/pfe.h"
+
+/* A-010897: Jumbo frame is not supported */
+extern bool pfe_errata_a010897;
+
+#define PFE_RCR_MAX_FL_MASK 0xC000FFFF
+
+void *cbus_base_addr;
+void *ddr_base_addr;
+unsigned long ddr_phys_base_addr;
+unsigned int ddr_size;
+
+static struct pe_info pe[MAX_PE];
+
+/* Initializes the PFE library.
+ * Must be called before using any of the library functions.
+ *
+ * @param[in] cbus_base CBUS virtual base address (as mapped in
+ * the host CPU address space)
+ * @param[in] ddr_base PFE DDR range virtual base address (as
+ * mapped in the host CPU address space)
+ * @param[in] ddr_phys_base PFE DDR range physical base address (as
+ * mapped in platform)
+ * @param[in] size PFE DDR range size (as defined by the host
+ * software)
+ */
+void pfe_lib_init(void *cbus_base, void *ddr_base, unsigned long ddr_phys_base,
+ unsigned int size)
+{
+ cbus_base_addr = cbus_base;
+ ddr_base_addr = ddr_base;
+ ddr_phys_base_addr = ddr_phys_base;
+ ddr_size = size;
+
+ pe[CLASS0_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(0);
+ pe[CLASS0_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(0);
+ pe[CLASS0_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS0_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS0_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS0_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS1_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(1);
+ pe[CLASS1_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(1);
+ pe[CLASS1_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS1_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS1_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS1_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS2_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(2);
+ pe[CLASS2_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(2);
+ pe[CLASS2_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS2_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS2_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS2_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS3_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(3);
+ pe[CLASS3_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(3);
+ pe[CLASS3_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS3_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS3_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS3_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS4_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(4);
+ pe[CLASS4_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(4);
+ pe[CLASS4_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS4_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS4_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS4_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[CLASS5_ID].dmem_base_addr = CLASS_DMEM_BASE_ADDR(5);
+ pe[CLASS5_ID].pmem_base_addr = CLASS_IMEM_BASE_ADDR(5);
+ pe[CLASS5_ID].pmem_size = CLASS_IMEM_SIZE;
+ pe[CLASS5_ID].mem_access_wdata = CLASS_MEM_ACCESS_WDATA;
+ pe[CLASS5_ID].mem_access_addr = CLASS_MEM_ACCESS_ADDR;
+ pe[CLASS5_ID].mem_access_rdata = CLASS_MEM_ACCESS_RDATA;
+
+ pe[TMU0_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(0);
+ pe[TMU0_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(0);
+ pe[TMU0_ID].pmem_size = TMU_IMEM_SIZE;
+ pe[TMU0_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
+ pe[TMU0_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
+ pe[TMU0_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
+
+ pe[TMU1_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(1);
+ pe[TMU1_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(1);
+ pe[TMU1_ID].pmem_size = TMU_IMEM_SIZE;
+ pe[TMU1_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
+ pe[TMU1_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
+ pe[TMU1_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
+
+ pe[TMU3_ID].dmem_base_addr = TMU_DMEM_BASE_ADDR(3);
+ pe[TMU3_ID].pmem_base_addr = TMU_IMEM_BASE_ADDR(3);
+ pe[TMU3_ID].pmem_size = TMU_IMEM_SIZE;
+ pe[TMU3_ID].mem_access_wdata = TMU_MEM_ACCESS_WDATA;
+ pe[TMU3_ID].mem_access_addr = TMU_MEM_ACCESS_ADDR;
+ pe[TMU3_ID].mem_access_rdata = TMU_MEM_ACCESS_RDATA;
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ pe[UTIL_ID].dmem_base_addr = UTIL_DMEM_BASE_ADDR;
+ pe[UTIL_ID].mem_access_wdata = UTIL_MEM_ACCESS_WDATA;
+ pe[UTIL_ID].mem_access_addr = UTIL_MEM_ACCESS_ADDR;
+ pe[UTIL_ID].mem_access_rdata = UTIL_MEM_ACCESS_RDATA;
+#endif
+}
+
+/* Writes a buffer to PE internal memory from the host
+ * through indirect access registers.
+ *
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
+ * ..., UTIL_ID)
+ * @param[in] src Buffer source address
+ * @param[in] mem_access_addr DMEM destination address (must be 32bit
+ * aligned)
+ * @param[in] len Number of bytes to copy
+ */
+void pe_mem_memcpy_to32(int id, u32 mem_access_addr, const void *src, unsigned
+int len)
+{
+ u32 offset = 0, val, addr;
+ unsigned int len32 = len >> 2;
+ int i;
+
+ addr = mem_access_addr | PE_MEM_ACCESS_WRITE |
+ PE_MEM_ACCESS_BYTE_ENABLE(0, 4);
+
+ for (i = 0; i < len32; i++, offset += 4, src += 4) {
+ val = *(u32 *)src;
+ writel(cpu_to_be32(val), pe[id].mem_access_wdata);
+ writel(addr + offset, pe[id].mem_access_addr);
+ }
+
+ len = (len & 0x3);
+ if (len) {
+ val = 0;
+
+ addr = (mem_access_addr | PE_MEM_ACCESS_WRITE |
+ PE_MEM_ACCESS_BYTE_ENABLE(0, len)) + offset;
+
+ for (i = 0; i < len; i++, src++)
+ val |= (*(u8 *)src) << (8 * i);
+
+ writel(cpu_to_be32(val), pe[id].mem_access_wdata);
+ writel(addr, pe[id].mem_access_addr);
+ }
+}
+
+/* Writes a buffer to PE internal data memory (DMEM) from the host
+ * through indirect access registers.
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
+ * ..., UTIL_ID)
+ * @param[in] src Buffer source address
+ * @param[in] dst DMEM destination address (must be 32bit
+ * aligned)
+ * @param[in] len Number of bytes to copy
+ */
+void pe_dmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
+{
+ pe_mem_memcpy_to32(id, pe[id].dmem_base_addr | dst |
+ PE_MEM_ACCESS_DMEM, src, len);
+}
+
+/* Writes a buffer to PE internal program memory (PMEM) from the host
+ * through indirect access registers.
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
+ * ..., TMU3_ID)
+ * @param[in] src Buffer source address
+ * @param[in] dst PMEM destination address (must be 32bit
+ * aligned)
+ * @param[in] len Number of bytes to copy
+ */
+void pe_pmem_memcpy_to32(int id, u32 dst, const void *src, unsigned int len)
+{
+ pe_mem_memcpy_to32(id, pe[id].pmem_base_addr | (dst & (pe[id].pmem_size
+ - 1)) | PE_MEM_ACCESS_IMEM, src, len);
+}
+
+/* Reads PE internal program memory (IMEM) from the host
+ * through indirect access registers.
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
+ * ..., TMU3_ID)
+ * @param[in] addr PMEM read address (must be aligned on size)
+ * @param[in] size Number of bytes to read (maximum 4, must not
+ * cross 32bit boundaries)
+ * @return the data read (in PE endianness, i.e BE).
+ */
+u32 pe_pmem_read(int id, u32 addr, u8 size)
+{
+ u32 offset = addr & 0x3;
+ u32 mask = 0xffffffff >> ((4 - size) << 3);
+ u32 val;
+
+ addr = pe[id].pmem_base_addr | ((addr & ~0x3) & (pe[id].pmem_size - 1))
+ | PE_MEM_ACCESS_IMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
+
+ writel(addr, pe[id].mem_access_addr);
+ val = be32_to_cpu(readl(pe[id].mem_access_rdata));
+
+ return (val >> (offset << 3)) & mask;
+}
+
+/* Writes PE internal data memory (DMEM) from the host
+ * through indirect access registers.
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
+ * ..., UTIL_ID)
+ * @param[in] addr DMEM write address (must be aligned on size)
+ * @param[in] val Value to write (in PE endianness, i.e BE)
+ * @param[in] size Number of bytes to write (maximum 4, must not
+ * cross 32bit boundaries)
+ */
+void pe_dmem_write(int id, u32 val, u32 addr, u8 size)
+{
+ u32 offset = addr & 0x3;
+
+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_WRITE |
+ PE_MEM_ACCESS_DMEM | PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
+
+ /* Indirect access interface is byte swapping data being written */
+ writel(cpu_to_be32(val << (offset << 3)), pe[id].mem_access_wdata);
+ writel(addr, pe[id].mem_access_addr);
+}
+
+/* Reads PE internal data memory (DMEM) from the host
+ * through indirect access registers.
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
+ * ..., UTIL_ID)
+ * @param[in] addr DMEM read address (must be aligned on size)
+ * @param[in] size Number of bytes to read (maximum 4, must not
+ * cross 32bit boundaries)
+ * @return the data read (in PE endianness, i.e BE).
+ */
+u32 pe_dmem_read(int id, u32 addr, u8 size)
+{
+ u32 offset = addr & 0x3;
+ u32 mask = 0xffffffff >> ((4 - size) << 3);
+ u32 val;
+
+ addr = pe[id].dmem_base_addr | (addr & ~0x3) | PE_MEM_ACCESS_DMEM |
+ PE_MEM_ACCESS_BYTE_ENABLE(offset, size);
+
+ writel(addr, pe[id].mem_access_addr);
+
+ /* Indirect access interface is byte swapping data being read */
+ val = be32_to_cpu(readl(pe[id].mem_access_rdata));
+
+ return (val >> (offset << 3)) & mask;
+}
+
+/* This function is used to write to CLASS internal bus peripherals (ccu,
+ * pe-lem) from the host
+ * through indirect access registers.
+ * @param[in] val value to write
+ * @param[in] addr Address to write to (must be aligned on size)
+ * @param[in] size Number of bytes to write (1, 2 or 4)
+ *
+ */
+void class_bus_write(u32 val, u32 addr, u8 size)
+{
+ u32 offset = addr & 0x3;
+
+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
+
+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | PE_MEM_ACCESS_WRITE |
+ (size << 24);
+
+ writel(cpu_to_be32(val << (offset << 3)), CLASS_BUS_ACCESS_WDATA);
+ writel(addr, CLASS_BUS_ACCESS_ADDR);
+}
+
+/* Reads from CLASS internal bus peripherals (ccu, pe-lem) from the host
+ * through indirect access registers.
+ * @param[in] addr Address to read from (must be aligned on size)
+ * @param[in] size Number of bytes to read (1, 2 or 4)
+ * @return the read data
+ *
+ */
+u32 class_bus_read(u32 addr, u8 size)
+{
+ u32 offset = addr & 0x3;
+ u32 mask = 0xffffffff >> ((4 - size) << 3);
+ u32 val;
+
+ writel((addr & CLASS_BUS_ACCESS_BASE_MASK), CLASS_BUS_ACCESS_BASE);
+
+ addr = (addr & ~CLASS_BUS_ACCESS_BASE_MASK) | (size << 24);
+
+ writel(addr, CLASS_BUS_ACCESS_ADDR);
+ val = be32_to_cpu(readl(CLASS_BUS_ACCESS_RDATA));
+
+ return (val >> (offset << 3)) & mask;
+}
+
+/* Writes data to the cluster memory (PE_LMEM)
+ * @param[in] dst PE LMEM destination address (must be 32bit aligned)
+ * @param[in] src Buffer source address
+ * @param[in] len Number of bytes to copy
+ */
+void class_pe_lmem_memcpy_to32(u32 dst, const void *src, unsigned int len)
+{
+ u32 len32 = len >> 2;
+ int i;
+
+ for (i = 0; i < len32; i++, src += 4, dst += 4)
+ class_bus_write(*(u32 *)src, dst, 4);
+
+ if (len & 0x2) {
+ class_bus_write(*(u16 *)src, dst, 2);
+ src += 2;
+ dst += 2;
+ }
+
+ if (len & 0x1) {
+ class_bus_write(*(u8 *)src, dst, 1);
+ src++;
+ dst++;
+ }
+}
+
+/* Writes value to the cluster memory (PE_LMEM)
+ * @param[in] dst PE LMEM destination address (must be 32bit aligned)
+ * @param[in] val Value to write
+ * @param[in] len Number of bytes to write
+ */
+void class_pe_lmem_memset(u32 dst, int val, unsigned int len)
+{
+ u32 len32 = len >> 2;
+ int i;
+
+ val = val | (val << 8) | (val << 16) | (val << 24);
+
+ for (i = 0; i < len32; i++, dst += 4)
+ class_bus_write(val, dst, 4);
+
+ if (len & 0x2) {
+ class_bus_write(val, dst, 2);
+ dst += 2;
+ }
+
+ if (len & 0x1) {
+ class_bus_write(val, dst, 1);
+ dst++;
+ }
+}
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+
+/* Writes UTIL program memory (DDR) from the host.
+ *
+ * @param[in] addr Address to write (virtual, must be aligned on size)
+ * @param[in] val Value to write (in PE endianness, i.e BE)
+ * @param[in] size Number of bytes to write (2 or 4)
+ */
+static void util_pmem_write(u32 val, void *addr, u8 size)
+{
+ void *addr64 = (void *)((unsigned long)addr & ~0x7);
+ unsigned long off = 8 - ((unsigned long)addr & 0x7) - size;
+
+ /*
+ * IMEM should be loaded as a 64bit swapped value in a 64bit aligned
+ * location
+ */
+ if (size == 4)
+ writel(be32_to_cpu(val), addr64 + off);
+ else
+ writew(be16_to_cpu((u16)val), addr64 + off);
+}
+
+/* Writes a buffer to UTIL program memory (DDR) from the host.
+ *
+ * @param[in] dst Address to write (virtual, must be at least 16bit
+ * aligned)
+ * @param[in] src Buffer to write (in PE endianness, i.e BE, must have
+ * same alignment as dst)
+ * @param[in] len Number of bytes to write (must be at least 16bit
+ * aligned)
+ */
+static void util_pmem_memcpy(void *dst, const void *src, unsigned int len)
+{
+ unsigned int len32;
+ int i;
+
+ if ((unsigned long)src & 0x2) {
+ util_pmem_write(*(u16 *)src, dst, 2);
+ src += 2;
+ dst += 2;
+ len -= 2;
+ }
+
+ len32 = len >> 2;
+
+ for (i = 0; i < len32; i++, dst += 4, src += 4)
+ util_pmem_write(*(u32 *)src, dst, 4);
+
+ if (len & 0x2)
+ util_pmem_write(*(u16 *)src, dst, len & 0x2);
+}
+#endif
+
+/* Loads an elf section into pmem
+ * Code needs to be at least 16bit aligned and only PROGBITS sections are
+ * supported
+ *
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID, ...,
+ * TMU3_ID)
+ * @param[in] data pointer to the elf firmware
+ * @param[in] shdr pointer to the elf section header
+ *
+ */
+static int pe_load_pmem_section(int id, const void *data,
+ struct elf32_shdr *shdr)
+{
+ u32 offset = be32_to_cpu(shdr->sh_offset);
+ u32 addr = be32_to_cpu(shdr->sh_addr);
+ u32 size = be32_to_cpu(shdr->sh_size);
+ u32 type = be32_to_cpu(shdr->sh_type);
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ if (id == UTIL_ID) {
+ pr_err("%s: unsupported pmem section for UTIL\n",
+ __func__);
+ return -EINVAL;
+ }
+#endif
+
+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
+ pr_err(
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
+ , __func__, addr, (unsigned long)data + offset);
+
+ return -EINVAL;
+ }
+
+ if (addr & 0x1) {
+ pr_err("%s: load address(%x) is not 16bit aligned\n",
+ __func__, addr);
+ return -EINVAL;
+ }
+
+ if (size & 0x1) {
+ pr_err("%s: load size(%x) is not 16bit aligned\n",
+ __func__, size);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case SHT_PROGBITS:
+ pe_pmem_memcpy_to32(id, addr, data + offset, size);
+
+ break;
+
+ default:
+ pr_err("%s: unsupported section type(%x)\n", __func__,
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Loads an elf section into dmem
+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
+ * initialized to 0
+ *
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
+ * ..., UTIL_ID)
+ * @param[in] data pointer to the elf firmware
+ * @param[in] shdr pointer to the elf section header
+ *
+ */
+static int pe_load_dmem_section(int id, const void *data,
+ struct elf32_shdr *shdr)
+{
+ u32 offset = be32_to_cpu(shdr->sh_offset);
+ u32 addr = be32_to_cpu(shdr->sh_addr);
+ u32 size = be32_to_cpu(shdr->sh_size);
+ u32 type = be32_to_cpu(shdr->sh_type);
+ u32 size32 = size >> 2;
+ int i;
+
+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
+ pr_err(
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
+ __func__, addr, (unsigned long)data + offset);
+
+ return -EINVAL;
+ }
+
+ if (addr & 0x3) {
+ pr_err("%s: load address(%x) is not 32bit aligned\n",
+ __func__, addr);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case SHT_PROGBITS:
+ pe_dmem_memcpy_to32(id, addr, data + offset, size);
+ break;
+
+ case SHT_NOBITS:
+ for (i = 0; i < size32; i++, addr += 4)
+ pe_dmem_write(id, 0, addr, 4);
+
+ if (size & 0x3)
+ pe_dmem_write(id, 0, addr, size & 0x3);
+
+ break;
+
+ default:
+ pr_err("%s: unsupported section type(%x)\n", __func__,
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Loads an elf section into DDR
+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
+ * initialized to 0
+ *
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
+ * ..., UTIL_ID)
+ * @param[in] data pointer to the elf firmware
+ * @param[in] shdr pointer to the elf section header
+ *
+ */
+static int pe_load_ddr_section(int id, const void *data,
+ struct elf32_shdr *shdr,
+ struct device *dev) {
+ u32 offset = be32_to_cpu(shdr->sh_offset);
+ u32 addr = be32_to_cpu(shdr->sh_addr);
+ u32 size = be32_to_cpu(shdr->sh_size);
+ u32 type = be32_to_cpu(shdr->sh_type);
+ u32 flags = be32_to_cpu(shdr->sh_flags);
+
+ switch (type) {
+ case SHT_PROGBITS:
+ if (flags & SHF_EXECINSTR) {
+ if (id <= CLASS_MAX_ID) {
+ /* DO the loading only once in DDR */
+ if (id == CLASS0_ID) {
+ pr_err(
+ "%s: load address(%x) and elf file address(%lx) rcvd\n",
+ __func__, addr,
+ (unsigned long)data + offset);
+ if (((unsigned long)(data + offset)
+ & 0x3) != (addr & 0x3)) {
+ pr_err(
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
+ , __func__, addr,
+ (unsigned long)data + offset);
+
+ return -EINVAL;
+ }
+
+ if (addr & 0x1) {
+ pr_err(
+ "%s: load address(%x) is not 16bit aligned\n"
+ , __func__, addr);
+ return -EINVAL;
+ }
+
+ if (size & 0x1) {
+ pr_err(
+ "%s: load length(%x) is not 16bit aligned\n"
+ , __func__, size);
+ return -EINVAL;
+ }
+ memcpy(DDR_PHYS_TO_VIRT(
+ DDR_PFE_TO_PHYS(addr)),
+ data + offset, size);
+ }
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ } else if (id == UTIL_ID) {
+ if (((unsigned long)(data + offset) & 0x3)
+ != (addr & 0x3)) {
+ pr_err(
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n"
+ , __func__, addr,
+ (unsigned long)data + offset);
+
+ return -EINVAL;
+ }
+
+ if (addr & 0x1) {
+ pr_err(
+ "%s: load address(%x) is not 16bit aligned\n"
+ , __func__, addr);
+ return -EINVAL;
+ }
+
+ if (size & 0x1) {
+ pr_err(
+ "%s: load length(%x) is not 16bit aligned\n"
+ , __func__, size);
+ return -EINVAL;
+ }
+
+ util_pmem_memcpy(DDR_PHYS_TO_VIRT(
+ DDR_PFE_TO_PHYS(addr)),
+ data + offset, size);
+ }
+#endif
+ } else {
+ pr_err(
+ "%s: unsupported ddr section type(%x) for PE(%d)\n"
+ , __func__, type, id);
+ return -EINVAL;
+ }
+
+ } else {
+ memcpy(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), data
+ + offset, size);
+ }
+
+ break;
+
+ case SHT_NOBITS:
+ memset(DDR_PHYS_TO_VIRT(DDR_PFE_TO_PHYS(addr)), 0, size);
+
+ break;
+
+ default:
+ pr_err("%s: unsupported section type(%x)\n", __func__,
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Loads an elf section into pe lmem
+ * Data needs to be at least 32bit aligned, NOBITS sections are correctly
+ * initialized to 0
+ *
+ * @param[in] id PE identification (CLASS0_ID,..., CLASS5_ID)
+ * @param[in] data pointer to the elf firmware
+ * @param[in] shdr pointer to the elf section header
+ *
+ */
+static int pe_load_pe_lmem_section(int id, const void *data,
+ struct elf32_shdr *shdr)
+{
+ u32 offset = be32_to_cpu(shdr->sh_offset);
+ u32 addr = be32_to_cpu(shdr->sh_addr);
+ u32 size = be32_to_cpu(shdr->sh_size);
+ u32 type = be32_to_cpu(shdr->sh_type);
+
+ if (id > CLASS_MAX_ID) {
+ pr_err(
+ "%s: unsupported pe-lmem section type(%x) for PE(%d)\n",
+ __func__, type, id);
+ return -EINVAL;
+ }
+
+ if (((unsigned long)(data + offset) & 0x3) != (addr & 0x3)) {
+ pr_err(
+ "%s: load address(%x) and elf file address(%lx) don't have the same alignment\n",
+ __func__, addr, (unsigned long)data + offset);
+
+ return -EINVAL;
+ }
+
+ if (addr & 0x3) {
+ pr_err("%s: load address(%x) is not 32bit aligned\n",
+ __func__, addr);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case SHT_PROGBITS:
+ class_pe_lmem_memcpy_to32(addr, data + offset, size);
+ break;
+
+ case SHT_NOBITS:
+ class_pe_lmem_memset(addr, 0, size);
+ break;
+
+ default:
+ pr_err("%s: unsupported section type(%x)\n", __func__,
+ type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Loads an elf section into a PE
+ * For now only supports loading a section to dmem (all PE's), pmem (class and
+ * tmu PE's),
+ * DDDR (util PE code)
+ *
+ * @param[in] id PE identification (CLASS0_ID, ..., TMU0_ID,
+ * ..., UTIL_ID)
+ * @param[in] data pointer to the elf firmware
+ * @param[in] shdr pointer to the elf section header
+ *
+ */
+int pe_load_elf_section(int id, const void *data, struct elf32_shdr *shdr,
+ struct device *dev) {
+ u32 addr = be32_to_cpu(shdr->sh_addr);
+ u32 size = be32_to_cpu(shdr->sh_size);
+
+ if (IS_DMEM(addr, size))
+ return pe_load_dmem_section(id, data, shdr);
+ else if (IS_PMEM(addr, size))
+ return pe_load_pmem_section(id, data, shdr);
+ else if (IS_PFE_LMEM(addr, size))
+ return 0;
+ else if (IS_PHYS_DDR(addr, size))
+ return pe_load_ddr_section(id, data, shdr, dev);
+ else if (IS_PE_LMEM(addr, size))
+ return pe_load_pe_lmem_section(id, data, shdr);
+
+ pr_err("%s: unsupported memory range(%x)\n", __func__,
+ addr);
+ return 0;
+}
+
+/**************************** BMU ***************************/
+
+/* Initializes a BMU block.
+ * @param[in] base BMU block base address
+ * @param[in] cfg BMU configuration
+ */
+void bmu_init(void *base, struct BMU_CFG *cfg)
+{
+ bmu_disable(base);
+
+ bmu_set_config(base, cfg);
+
+ bmu_reset(base);
+}
+
+/* Resets a BMU block.
+ * @param[in] base BMU block base address
+ */
+void bmu_reset(void *base)
+{
+ writel(CORE_SW_RESET, base + BMU_CTRL);
+
+ /* Wait for self clear */
+ while (readl(base + BMU_CTRL) & CORE_SW_RESET)
+ ;
+}
+
+/* Enabled a BMU block.
+ * @param[in] base BMU block base address
+ */
+void bmu_enable(void *base)
+{
+ writel(CORE_ENABLE, base + BMU_CTRL);
+}
+
+/* Disables a BMU block.
+ * @param[in] base BMU block base address
+ */
+void bmu_disable(void *base)
+{
+ writel(CORE_DISABLE, base + BMU_CTRL);
+}
+
+/* Sets the configuration of a BMU block.
+ * @param[in] base BMU block base address
+ * @param[in] cfg BMU configuration
+ */
+void bmu_set_config(void *base, struct BMU_CFG *cfg)
+{
+ writel(cfg->baseaddr, base + BMU_UCAST_BASE_ADDR);
+ writel(cfg->count & 0xffff, base + BMU_UCAST_CONFIG);
+ writel(cfg->size & 0xffff, base + BMU_BUF_SIZE);
+
+ /* Interrupts are never used */
+ writel(cfg->low_watermark, base + BMU_LOW_WATERMARK);
+ writel(cfg->high_watermark, base + BMU_HIGH_WATERMARK);
+ writel(0x0, base + BMU_INT_ENABLE);
+}
+
+/**************************** MTIP GEMAC ***************************/
+
+/* Enable Rx Checksum Engine. With this enabled, Frame with bad IP,
+ * TCP or UDP checksums are discarded
+ *
+ * @param[in] base GEMAC base address.
+ */
+void gemac_enable_rx_checksum_offload(void *base)
+{
+ /*Do not find configuration to do this */
+}
+
+/* Disable Rx Checksum Engine.
+ *
+ * @param[in] base GEMAC base address.
+ */
+void gemac_disable_rx_checksum_offload(void *base)
+{
+ /*Do not find configuration to do this */
+}
+
+/* GEMAC set speed.
+ * @param[in] base GEMAC base address
+ * @param[in] speed GEMAC speed (10, 100 or 1000 Mbps)
+ */
+void gemac_set_speed(void *base, enum mac_speed gem_speed)
+{
+ u32 ecr = readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_SPEED;
+ u32 rcr = readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_RMII_10T;
+
+ switch (gem_speed) {
+ case SPEED_10M:
+ rcr |= EMAC_RCNTRL_RMII_10T;
+ break;
+
+ case SPEED_1000M:
+ ecr |= EMAC_ECNTRL_SPEED;
+ break;
+
+ case SPEED_100M:
+ default:
+ /*It is in 100M mode */
+ break;
+ }
+ writel(ecr, (base + EMAC_ECNTRL_REG));
+ writel(rcr, (base + EMAC_RCNTRL_REG));
+}
+
+/* GEMAC set duplex.
+ * @param[in] base GEMAC base address
+ * @param[in] duplex GEMAC duplex mode (Full, Half)
+ */
+void gemac_set_duplex(void *base, int duplex)
+{
+ if (duplex == DUPLEX_HALF) {
+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_FDEN, base
+ + EMAC_TCNTRL_REG);
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_DRT, (base
+ + EMAC_RCNTRL_REG));
+ } else{
+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_FDEN, base
+ + EMAC_TCNTRL_REG);
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_DRT, (base
+ + EMAC_RCNTRL_REG));
+ }
+}
+
+/* GEMAC set mode.
+ * @param[in] base GEMAC base address
+ * @param[in] mode GEMAC operation mode (MII, RMII, RGMII, SGMII)
+ */
+void gemac_set_mode(void *base, int mode)
+{
+ u32 val = readl(base + EMAC_RCNTRL_REG);
+
+ /*Remove loopbank*/
+ val &= ~EMAC_RCNTRL_LOOP;
+
+ /* Enable flow control and MII mode.PFE firmware always expects
+ CRC should be forwarded by MAC to validate CRC in software.*/
+ val |= (EMAC_RCNTRL_FCE | EMAC_RCNTRL_MII_MODE);
+
+ writel(val, base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC enable function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_enable(void *base)
+{
+ writel(readl(base + EMAC_ECNTRL_REG) | EMAC_ECNTRL_ETHER_EN, base +
+ EMAC_ECNTRL_REG);
+}
+
+/* GEMAC disable function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_disable(void *base)
+{
+ writel(readl(base + EMAC_ECNTRL_REG) & ~EMAC_ECNTRL_ETHER_EN, base +
+ EMAC_ECNTRL_REG);
+}
+
+/* GEMAC TX disable function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_tx_disable(void *base)
+{
+ writel(readl(base + EMAC_TCNTRL_REG) | EMAC_TCNTRL_GTS, base +
+ EMAC_TCNTRL_REG);
+}
+
+void gemac_tx_enable(void *base)
+{
+ writel(readl(base + EMAC_TCNTRL_REG) & ~EMAC_TCNTRL_GTS, base +
+ EMAC_TCNTRL_REG);
+}
+
+/* Sets the hash register of the MAC.
+ * This register is used for matching unicast and multicast frames.
+ *
+ * @param[in] base GEMAC base address.
+ * @param[in] hash 64-bit hash to be configured.
+ */
+void gemac_set_hash(void *base, struct pfe_mac_addr *hash)
+{
+ writel(hash->bottom, base + EMAC_GALR);
+ writel(hash->top, base + EMAC_GAUR);
+}
+
+void gemac_set_laddrN(void *base, struct pfe_mac_addr *address,
+ unsigned int entry_index)
+{
+ if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
+ return;
+
+ entry_index = entry_index - 1;
+ if (entry_index < 1) {
+ writel(htonl(address->bottom), base + EMAC_PHY_ADDR_LOW);
+ writel((htonl(address->top) | 0x8808), base +
+ EMAC_PHY_ADDR_HIGH);
+ } else {
+ writel(htonl(address->bottom), base + ((entry_index - 1) * 8)
+ + EMAC_SMAC_0_0);
+ writel((htonl(address->top) | 0x8808), base + ((entry_index -
+ 1) * 8) + EMAC_SMAC_0_1);
+ }
+}
+
+void gemac_clear_laddrN(void *base, unsigned int entry_index)
+{
+ if ((entry_index < 1) || (entry_index > EMAC_SPEC_ADDR_MAX))
+ return;
+
+ entry_index = entry_index - 1;
+ if (entry_index < 1) {
+ writel(0, base + EMAC_PHY_ADDR_LOW);
+ writel(0, base + EMAC_PHY_ADDR_HIGH);
+ } else {
+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_0);
+ writel(0, base + ((entry_index - 1) * 8) + EMAC_SMAC_0_1);
+ }
+}
+
+/* Set the loopback mode of the MAC. This can be either no loopback for
+ * normal operation, local loopback through MAC internal loopback module or PHY
+ * loopback for external loopback through a PHY. This asserts the external
+ * loop pin.
+ *
+ * @param[in] base GEMAC base address.
+ * @param[in] gem_loop Loopback mode to be enabled. LB_LOCAL - MAC
+ * Loopback,
+ * LB_EXT - PHY Loopback.
+ */
+void gemac_set_loop(void *base, enum mac_loop gem_loop)
+{
+ pr_info("%s()\n", __func__);
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_LOOP, (base +
+ EMAC_RCNTRL_REG));
+}
+
+/* GEMAC allow frames
+ * @param[in] base GEMAC base address
+ */
+void gemac_enable_copy_all(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_PROM, (base +
+ EMAC_RCNTRL_REG));
+}
+
+/* GEMAC do not allow frames
+ * @param[in] base GEMAC base address
+ */
+void gemac_disable_copy_all(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_PROM, (base +
+ EMAC_RCNTRL_REG));
+}
+
+/* GEMAC allow broadcast function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_allow_broadcast(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_BC_REJ, base +
+ EMAC_RCNTRL_REG);
+}
+
+/* GEMAC no broadcast function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_no_broadcast(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_BC_REJ, base +
+ EMAC_RCNTRL_REG);
+}
+
+/* GEMAC enable 1536 rx function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_enable_1536_rx(void *base)
+{
+ /* Set 1536 as Maximum frame length */
+ writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
+ | (1536 << 16), base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC set rx Max frame length.
+ * @param[in] base GEMAC base address
+ * @param[in] mtu new mtu
+ */
+void gemac_set_rx_max_fl(void *base, int mtu)
+{
+ /* Set mtu as Maximum frame length */
+ writel((readl(base + EMAC_RCNTRL_REG) & PFE_RCR_MAX_FL_MASK)
+ | (mtu << 16), base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC enable stacked vlan function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_enable_stacked_vlan(void *base)
+{
+ /* MTIP doesn't support stacked vlan */
+}
+
+/* GEMAC enable pause rx function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_enable_pause_rx(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) | EMAC_RCNTRL_FCE,
+ base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC disable pause rx function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_disable_pause_rx(void *base)
+{
+ writel(readl(base + EMAC_RCNTRL_REG) & ~EMAC_RCNTRL_FCE,
+ base + EMAC_RCNTRL_REG);
+}
+
+/* GEMAC enable pause tx function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_enable_pause_tx(void *base)
+{
+ writel(EMAC_RX_SECTION_EMPTY_V, base + EMAC_RX_SECTION_EMPTY);
+}
+
+/* GEMAC disable pause tx function.
+ * @param[in] base GEMAC base address
+ */
+void gemac_disable_pause_tx(void *base)
+{
+ writel(0x0, base + EMAC_RX_SECTION_EMPTY);
+}
+
+/* GEMAC wol configuration
+ * @param[in] base GEMAC base address
+ * @param[in] wol_conf WoL register configuration
+ */
+void gemac_set_wol(void *base, u32 wol_conf)
+{
+ u32 val = readl(base + EMAC_ECNTRL_REG);
+
+ if (wol_conf)
+ val |= (EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
+ else
+ val &= ~(EMAC_ECNTRL_MAGIC_ENA | EMAC_ECNTRL_SLEEP);
+ writel(val, base + EMAC_ECNTRL_REG);
+}
+
+/* Sets Gemac bus width to 64bit
+ * @param[in] base GEMAC base address
+ * @param[in] width gemac bus width to be set possible values are 32/64/128
+ */
+void gemac_set_bus_width(void *base, int width)
+{
+}
+
+/* Sets Gemac configuration.
+ * @param[in] base GEMAC base address
+ * @param[in] cfg GEMAC configuration
+ */
+void gemac_set_config(void *base, struct gemac_cfg *cfg)
+{
+ /*GEMAC config taken from VLSI */
+ writel(0x00000004, base + EMAC_TFWR_STR_FWD);
+ writel(0x00000005, base + EMAC_RX_SECTION_FULL);
+
+ if (pfe_errata_a010897)
+ writel(0x0000076c, base + EMAC_TRUNC_FL);
+ else
+ writel(0x00003fff, base + EMAC_TRUNC_FL);
+
+ writel(0x00000030, base + EMAC_TX_SECTION_EMPTY);
+ writel(0x00000000, base + EMAC_MIB_CTRL_STS_REG);
+
+ gemac_set_mode(base, cfg->mode);
+
+ gemac_set_speed(base, cfg->speed);
+
+ gemac_set_duplex(base, cfg->duplex);
+}
+
+/**************************** GPI ***************************/
+
+/* Initializes a GPI block.
+ * @param[in] base GPI base address
+ * @param[in] cfg GPI configuration
+ */
+void gpi_init(void *base, struct gpi_cfg *cfg)
+{
+ gpi_reset(base);
+
+ gpi_disable(base);
+
+ gpi_set_config(base, cfg);
+}
+
+/* Resets a GPI block.
+ * @param[in] base GPI base address
+ */
+void gpi_reset(void *base)
+{
+ writel(CORE_SW_RESET, base + GPI_CTRL);
+}
+
+/* Enables a GPI block.
+ * @param[in] base GPI base address
+ */
+void gpi_enable(void *base)
+{
+ writel(CORE_ENABLE, base + GPI_CTRL);
+}
+
+/* Disables a GPI block.
+ * @param[in] base GPI base address
+ */
+void gpi_disable(void *base)
+{
+ writel(CORE_DISABLE, base + GPI_CTRL);
+}
+
+/* Sets the configuration of a GPI block.
+ * @param[in] base GPI base address
+ * @param[in] cfg GPI configuration
+ */
+void gpi_set_config(void *base, struct gpi_cfg *cfg)
+{
+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_ALLOC_CTRL), base
+ + GPI_LMEM_ALLOC_ADDR);
+ writel(CBUS_VIRT_TO_PFE(BMU1_BASE_ADDR + BMU_FREE_CTRL), base
+ + GPI_LMEM_FREE_ADDR);
+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_ALLOC_CTRL), base
+ + GPI_DDR_ALLOC_ADDR);
+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL), base
+ + GPI_DDR_FREE_ADDR);
+ writel(CBUS_VIRT_TO_PFE(CLASS_INQ_PKTPTR), base + GPI_CLASS_ADDR);
+ writel(DDR_HDR_SIZE, base + GPI_DDR_DATA_OFFSET);
+ writel(LMEM_HDR_SIZE, base + GPI_LMEM_DATA_OFFSET);
+ writel(0, base + GPI_LMEM_SEC_BUF_DATA_OFFSET);
+ writel(0, base + GPI_DDR_SEC_BUF_DATA_OFFSET);
+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, base + GPI_HDR_SIZE);
+ writel((DDR_BUF_SIZE << 16) | LMEM_BUF_SIZE, base + GPI_BUF_SIZE);
+
+ writel(((cfg->lmem_rtry_cnt << 16) | (GPI_DDR_BUF_EN << 1) |
+ GPI_LMEM_BUF_EN), base + GPI_RX_CONFIG);
+ writel(cfg->tmlf_txthres, base + GPI_TMLF_TX);
+ writel(cfg->aseq_len, base + GPI_DTX_ASEQ);
+ writel(1, base + GPI_TOE_CHKSUM_EN);
+
+ if (cfg->mtip_pause_reg) {
+ writel(cfg->mtip_pause_reg, base + GPI_CSR_MTIP_PAUSE_REG);
+ writel(EGPI_PAUSE_TIME, base + GPI_TX_PAUSE_TIME);
+ }
+}
+
+/**************************** CLASSIFIER ***************************/
+
+/* Initializes CLASSIFIER block.
+ * @param[in] cfg CLASSIFIER configuration
+ */
+void class_init(struct class_cfg *cfg)
+{
+ class_reset();
+
+ class_disable();
+
+ class_set_config(cfg);
+}
+
+/* Resets CLASSIFIER block.
+ *
+ */
+void class_reset(void)
+{
+ writel(CORE_SW_RESET, CLASS_TX_CTRL);
+}
+
+/* Enables all CLASS-PE's cores.
+ *
+ */
+void class_enable(void)
+{
+ writel(CORE_ENABLE, CLASS_TX_CTRL);
+}
+
+/* Disables all CLASS-PE's cores.
+ *
+ */
+void class_disable(void)
+{
+ writel(CORE_DISABLE, CLASS_TX_CTRL);
+}
+
+/*
+ * Sets the configuration of the CLASSIFIER block.
+ * @param[in] cfg CLASSIFIER configuration
+ */
+void class_set_config(struct class_cfg *cfg)
+{
+ u32 val;
+
+ /* Initialize route table */
+ if (!cfg->resume)
+ memset(DDR_PHYS_TO_VIRT(cfg->route_table_baseaddr), 0, (1 <<
+ cfg->route_table_hash_bits) * CLASS_ROUTE_SIZE);
+
+#if !defined(LS1012A_PFE_RESET_WA)
+ writel(cfg->pe_sys_clk_ratio, CLASS_PE_SYS_CLK_RATIO);
+#endif
+
+ writel((DDR_HDR_SIZE << 16) | LMEM_HDR_SIZE, CLASS_HDR_SIZE);
+ writel(LMEM_BUF_SIZE, CLASS_LMEM_BUF_SIZE);
+ writel(CLASS_ROUTE_ENTRY_SIZE(CLASS_ROUTE_SIZE) |
+ CLASS_ROUTE_HASH_SIZE(cfg->route_table_hash_bits),
+ CLASS_ROUTE_HASH_ENTRY_SIZE);
+ writel(HIF_PKT_CLASS_EN | HIF_PKT_OFFSET(sizeof(struct hif_hdr)),
+ CLASS_HIF_PARSE);
+
+ val = HASH_CRC_PORT_IP | QB2BUS_LE;
+
+#if defined(CONFIG_IP_ALIGNED)
+ val |= IP_ALIGNED;
+#endif
+
+ /*
+ * Class PE packet steering will only work if TOE mode, bridge fetch or
+ * route fetch are enabled (see class/qb_fet.v). Route fetch would
+ * trigger additional memory copies (likely from DDR because of hash
+ * table size, which cannot be reduced because PE software still
+ * relies on hash value computed in HW), so when not in TOE mode we
+ * simply enable HW bridge fetch even though we don't use it.
+ */
+ if (cfg->toe_mode)
+ val |= CLASS_TOE;
+ else
+ val |= HW_BRIDGE_FETCH;
+
+ writel(val, CLASS_ROUTE_MULTI);
+
+ writel(DDR_PHYS_TO_PFE(cfg->route_table_baseaddr),
+ CLASS_ROUTE_TABLE_BASE);
+ writel(CLASS_PE0_RO_DM_ADDR0_VAL, CLASS_PE0_RO_DM_ADDR0);
+ writel(CLASS_PE0_RO_DM_ADDR1_VAL, CLASS_PE0_RO_DM_ADDR1);
+ writel(CLASS_PE0_QB_DM_ADDR0_VAL, CLASS_PE0_QB_DM_ADDR0);
+ writel(CLASS_PE0_QB_DM_ADDR1_VAL, CLASS_PE0_QB_DM_ADDR1);
+ writel(CBUS_VIRT_TO_PFE(TMU_PHY_INQ_PKTPTR), CLASS_TM_INQ_ADDR);
+
+ writel(23, CLASS_AFULL_THRES);
+ writel(23, CLASS_TSQ_FIFO_THRES);
+
+ writel(24, CLASS_MAX_BUF_CNT);
+ writel(24, CLASS_TSQ_MAX_CNT);
+}
+
+/**************************** TMU ***************************/
+
+void tmu_reset(void)
+{
+ writel(SW_RESET, TMU_CTRL);
+}
+
+/* Initializes TMU block.
+ * @param[in] cfg TMU configuration
+ */
+void tmu_init(struct tmu_cfg *cfg)
+{
+ int q, phyno;
+
+ tmu_disable(0xF);
+ mdelay(10);
+
+#if !defined(LS1012A_PFE_RESET_WA)
+ /* keep in soft reset */
+ writel(SW_RESET, TMU_CTRL);
+#endif
+ writel(0x3, TMU_SYS_GENERIC_CONTROL);
+ writel(750, TMU_INQ_WATERMARK);
+ writel(CBUS_VIRT_TO_PFE(EGPI1_BASE_ADDR +
+ GPI_INQ_PKTPTR), TMU_PHY0_INQ_ADDR);
+ writel(CBUS_VIRT_TO_PFE(EGPI2_BASE_ADDR +
+ GPI_INQ_PKTPTR), TMU_PHY1_INQ_ADDR);
+ writel(CBUS_VIRT_TO_PFE(HGPI_BASE_ADDR +
+ GPI_INQ_PKTPTR), TMU_PHY3_INQ_ADDR);
+ writel(CBUS_VIRT_TO_PFE(HIF_NOCPY_RX_INQ0_PKTPTR), TMU_PHY4_INQ_ADDR);
+ writel(CBUS_VIRT_TO_PFE(UTIL_INQ_PKTPTR), TMU_PHY5_INQ_ADDR);
+ writel(CBUS_VIRT_TO_PFE(BMU2_BASE_ADDR + BMU_FREE_CTRL),
+ TMU_BMU_INQ_ADDR);
+
+ writel(0x3FF, TMU_TDQ0_SCH_CTRL); /*
+ * enabling all 10
+ * schedulers [9:0] of each TDQ
+ */
+ writel(0x3FF, TMU_TDQ1_SCH_CTRL);
+ writel(0x3FF, TMU_TDQ3_SCH_CTRL);
+
+#if !defined(LS1012A_PFE_RESET_WA)
+ writel(cfg->pe_sys_clk_ratio, TMU_PE_SYS_CLK_RATIO);
+#endif
+
+#if !defined(LS1012A_PFE_RESET_WA)
+ writel(DDR_PHYS_TO_PFE(cfg->llm_base_addr), TMU_LLM_BASE_ADDR);
+ /* Extra packet pointers will be stored from this address onwards */
+
+ writel(cfg->llm_queue_len, TMU_LLM_QUE_LEN);
+ writel(5, TMU_TDQ_IIFG_CFG);
+ writel(DDR_BUF_SIZE, TMU_BMU_BUF_SIZE);
+
+ writel(0x0, TMU_CTRL);
+
+ /* MEM init */
+ pr_info("%s: mem init\n", __func__);
+ writel(MEM_INIT, TMU_CTRL);
+
+ while (!(readl(TMU_CTRL) & MEM_INIT_DONE))
+ ;
+
+ /* LLM init */
+ pr_info("%s: lmem init\n", __func__);
+ writel(LLM_INIT, TMU_CTRL);
+
+ while (!(readl(TMU_CTRL) & LLM_INIT_DONE))
+ ;
+#endif
+ /* set up each queue for tail drop */
+ for (phyno = 0; phyno < 4; phyno++) {
+ if (phyno == 2)
+ continue;
+ for (q = 0; q < 16; q++) {
+ u32 qdepth;
+
+ writel((phyno << 8) | q, TMU_TEQ_CTRL);
+ writel(1 << 22, TMU_TEQ_QCFG); /*Enable tail drop */
+
+ if (phyno == 3)
+ qdepth = DEFAULT_TMU3_QDEPTH;
+ else
+ qdepth = (q == 0) ? DEFAULT_Q0_QDEPTH :
+ DEFAULT_MAX_QDEPTH;
+
+ /* LOG: 68855 */
+ /*
+ * The following is a workaround for the reordered
+ * packet and BMU2 buffer leakage issue.
+ */
+ if (CHIP_REVISION() == 0)
+ qdepth = 31;
+
+ writel(qdepth << 18, TMU_TEQ_HW_PROB_CFG2);
+ writel(qdepth >> 14, TMU_TEQ_HW_PROB_CFG3);
+ }
+ }
+
+#ifdef CFG_LRO
+ /* Set TMU-3 queue 5 (LRO) in no-drop mode */
+ writel((3 << 8) | TMU_QUEUE_LRO, TMU_TEQ_CTRL);
+ writel(0, TMU_TEQ_QCFG);
+#endif
+
+ writel(0x05, TMU_TEQ_DISABLE_DROPCHK);
+
+ writel(0x0, TMU_CTRL);
+}
+
+/* Enables TMU-PE cores.
+ * @param[in] pe_mask TMU PE mask
+ */
+void tmu_enable(u32 pe_mask)
+{
+ writel(readl(TMU_TX_CTRL) | (pe_mask & 0xF), TMU_TX_CTRL);
+}
+
+/* Disables TMU cores.
+ * @param[in] pe_mask TMU PE mask
+ */
+void tmu_disable(u32 pe_mask)
+{
+ writel(readl(TMU_TX_CTRL) & ~(pe_mask & 0xF), TMU_TX_CTRL);
+}
+
+/* This will return the tmu queue status
+ * @param[in] if_id gem interface id or TMU index
+ * @return returns the bit mask of busy queues, zero means all
+ * queues are empty
+ */
+u32 tmu_qstatus(u32 if_id)
+{
+ return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
+ offsetof(struct pe_status, tmu_qstatus), 4));
+}
+
+u32 tmu_pkts_processed(u32 if_id)
+{
+ return cpu_to_be32(pe_dmem_read(TMU0_ID + if_id, TMU_DM_PESTATUS +
+ offsetof(struct pe_status, rx), 4));
+}
+
+/**************************** UTIL ***************************/
+
+/* Resets UTIL block.
+ */
+void util_reset(void)
+{
+ writel(CORE_SW_RESET, UTIL_TX_CTRL);
+}
+
+/* Initializes UTIL block.
+ * @param[in] cfg UTIL configuration
+ */
+void util_init(struct util_cfg *cfg)
+{
+ writel(cfg->pe_sys_clk_ratio, UTIL_PE_SYS_CLK_RATIO);
+}
+
+/* Enables UTIL-PE core.
+ *
+ */
+void util_enable(void)
+{
+ writel(CORE_ENABLE, UTIL_TX_CTRL);
+}
+
+/* Disables UTIL-PE core.
+ *
+ */
+void util_disable(void)
+{
+ writel(CORE_DISABLE, UTIL_TX_CTRL);
+}
+
+/**************************** HIF ***************************/
+/* Initializes HIF copy block.
+ *
+ */
+void hif_init(void)
+{
+ /*Initialize HIF registers*/
+ writel((HIF_RX_POLL_CTRL_CYCLE << 16) | HIF_TX_POLL_CTRL_CYCLE,
+ HIF_POLL_CTRL);
+}
+
+/* Enable hif tx DMA and interrupt
+ *
+ */
+void hif_tx_enable(void)
+{
+ writel(HIF_CTRL_DMA_EN, HIF_TX_CTRL);
+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_TXPKT_INT_EN),
+ HIF_INT_ENABLE);
+}
+
+/* Disable hif tx DMA and interrupt
+ *
+ */
+void hif_tx_disable(void)
+{
+ u32 hif_int;
+
+ writel(0, HIF_TX_CTRL);
+
+ hif_int = readl(HIF_INT_ENABLE);
+ hif_int &= HIF_TXPKT_INT_EN;
+ writel(hif_int, HIF_INT_ENABLE);
+}
+
+/* Enable hif rx DMA and interrupt
+ *
+ */
+void hif_rx_enable(void)
+{
+ hif_rx_dma_start();
+ writel((readl(HIF_INT_ENABLE) | HIF_INT_EN | HIF_RXPKT_INT_EN),
+ HIF_INT_ENABLE);
+}
+
+/* Disable hif rx DMA and interrupt
+ *
+ */
+void hif_rx_disable(void)
+{
+ u32 hif_int;
+
+ writel(0, HIF_RX_CTRL);
+
+ hif_int = readl(HIF_INT_ENABLE);
+ hif_int &= HIF_RXPKT_INT_EN;
+ writel(hif_int, HIF_INT_ENABLE);
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_hif.c b/drivers/staging/fsl_ppfe/pfe_hif.c
new file mode 100644
index 000000000000..966f9686bde2
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif.c
@@ -0,0 +1,1060 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+#include <linux/io.h>
+#include <asm/irq.h>
+
+#include "pfe_mod.h"
+
+#define HIF_INT_MASK (HIF_INT | HIF_RXPKT_INT | HIF_TXPKT_INT)
+
+unsigned char napi_first_batch;
+
+static void pfe_tx_do_cleanup(unsigned long data);
+
+static int pfe_hif_alloc_descr(struct pfe_hif *hif)
+{
+ void *addr;
+ dma_addr_t dma_addr;
+ int err = 0;
+
+ pr_info("%s\n", __func__);
+ addr = dma_alloc_coherent(pfe->dev,
+ HIF_RX_DESC_NT * sizeof(struct hif_desc) +
+ HIF_TX_DESC_NT * sizeof(struct hif_desc),
+ &dma_addr, GFP_KERNEL);
+
+ if (!addr) {
+ pr_err("%s: Could not allocate buffer descriptors!\n"
+ , __func__);
+ err = -ENOMEM;
+ goto err0;
+ }
+
+ hif->descr_baseaddr_p = dma_addr;
+ hif->descr_baseaddr_v = addr;
+ hif->rx_ring_size = HIF_RX_DESC_NT;
+ hif->tx_ring_size = HIF_TX_DESC_NT;
+
+ return 0;
+
+err0:
+ return err;
+}
+
+#if defined(LS1012A_PFE_RESET_WA)
+static void pfe_hif_disable_rx_desc(struct pfe_hif *hif)
+{
+ int ii;
+ struct hif_desc *desc = hif->rx_base;
+
+ /*Mark all descriptors as LAST_BD */
+ for (ii = 0; ii < hif->rx_ring_size; ii++) {
+ desc->ctrl |= BD_CTRL_LAST_BD;
+ desc++;
+ }
+}
+
+struct class_rx_hdr_t {
+ u32 next_ptr; /* ptr to the start of the first DDR buffer */
+ u16 length; /* total packet length */
+ u16 phyno; /* input physical port number */
+ u32 status; /* gemac status bits */
+ u32 status2; /* reserved for software usage */
+};
+
+/* STATUS_BAD_FRAME_ERR is set for all errors (including checksums if enabled)
+ * except overflow
+ */
+#define STATUS_BAD_FRAME_ERR BIT(16)
+#define STATUS_LENGTH_ERR BIT(17)
+#define STATUS_CRC_ERR BIT(18)
+#define STATUS_TOO_SHORT_ERR BIT(19)
+#define STATUS_TOO_LONG_ERR BIT(20)
+#define STATUS_CODE_ERR BIT(21)
+#define STATUS_MC_HASH_MATCH BIT(22)
+#define STATUS_CUMULATIVE_ARC_HIT BIT(23)
+#define STATUS_UNICAST_HASH_MATCH BIT(24)
+#define STATUS_IP_CHECKSUM_CORRECT BIT(25)
+#define STATUS_TCP_CHECKSUM_CORRECT BIT(26)
+#define STATUS_UDP_CHECKSUM_CORRECT BIT(27)
+#define STATUS_OVERFLOW_ERR BIT(28) /* GPI error */
+#define MIN_PKT_SIZE 64
+
+static inline void copy_to_lmem(u32 *dst, u32 *src, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i += sizeof(u32)) {
+ *dst = htonl(*src);
+ dst++; src++;
+ }
+}
+
+static void send_dummy_pkt_to_hif(void)
+{
+ void *lmem_ptr, *ddr_ptr, *lmem_virt_addr;
+ u32 physaddr;
+ struct class_rx_hdr_t local_hdr;
+ static u32 dummy_pkt[] = {
+ 0x33221100, 0x2b785544, 0xd73093cb, 0x01000608,
+ 0x04060008, 0x2b780200, 0xd73093cb, 0x0a01a8c0,
+ 0x33221100, 0xa8c05544, 0x00000301, 0x00000000,
+ 0x00000000, 0x00000000, 0x00000000, 0xbe86c51f };
+
+ ddr_ptr = (void *)((u64)readl(BMU2_BASE_ADDR + BMU_ALLOC_CTRL));
+ if (!ddr_ptr)
+ return;
+
+ lmem_ptr = (void *)((u64)readl(BMU1_BASE_ADDR + BMU_ALLOC_CTRL));
+ if (!lmem_ptr)
+ return;
+
+ pr_info("Sending a dummy pkt to HIF %p %p\n", ddr_ptr, lmem_ptr);
+ physaddr = (u32)DDR_VIRT_TO_PFE(ddr_ptr);
+
+ lmem_virt_addr = (void *)CBUS_PFE_TO_VIRT((unsigned long int)lmem_ptr);
+
+ local_hdr.phyno = htons(0); /* RX_PHY_0 */
+ local_hdr.length = htons(MIN_PKT_SIZE);
+
+ local_hdr.next_ptr = htonl((u32)physaddr);
+ /*Mark checksum is correct */
+ local_hdr.status = htonl((STATUS_IP_CHECKSUM_CORRECT |
+ STATUS_UDP_CHECKSUM_CORRECT |
+ STATUS_TCP_CHECKSUM_CORRECT |
+ STATUS_UNICAST_HASH_MATCH |
+ STATUS_CUMULATIVE_ARC_HIT));
+ copy_to_lmem((u32 *)lmem_virt_addr, (u32 *)&local_hdr,
+ sizeof(local_hdr));
+
+ copy_to_lmem((u32 *)(lmem_virt_addr + LMEM_HDR_SIZE), (u32 *)dummy_pkt,
+ 0x40);
+
+ writel((unsigned long int)lmem_ptr, CLASS_INQ_PKTPTR);
+}
+
+void pfe_hif_rx_idle(struct pfe_hif *hif)
+{
+ int hif_stop_loop = 10;
+ u32 rx_status;
+
+ pfe_hif_disable_rx_desc(hif);
+ pr_info("Bringing hif to idle state...");
+ writel(0, HIF_INT_ENABLE);
+ /*If HIF Rx BDP is busy send a dummy packet */
+ do {
+ rx_status = readl(HIF_RX_STATUS);
+ if (rx_status & BDP_CSR_RX_DMA_ACTV)
+ send_dummy_pkt_to_hif();
+
+ usleep_range(100, 150);
+ } while (--hif_stop_loop);
+
+ if (readl(HIF_RX_STATUS) & BDP_CSR_RX_DMA_ACTV)
+ pr_info("Failed\n");
+ else
+ pr_info("Done\n");
+}
+#endif
+
+static void pfe_hif_free_descr(struct pfe_hif *hif)
+{
+ pr_info("%s\n", __func__);
+
+ dma_free_coherent(pfe->dev,
+ hif->rx_ring_size * sizeof(struct hif_desc) +
+ hif->tx_ring_size * sizeof(struct hif_desc),
+ hif->descr_baseaddr_v, hif->descr_baseaddr_p);
+}
+
+void pfe_hif_desc_dump(struct pfe_hif *hif)
+{
+ struct hif_desc *desc;
+ unsigned long desc_p;
+ int ii = 0;
+
+ pr_info("%s\n", __func__);
+
+ desc = hif->rx_base;
+ desc_p = (u32)((u64)desc - (u64)hif->descr_baseaddr_v +
+ hif->descr_baseaddr_p);
+
+ pr_info("HIF Rx desc base %p physical %x\n", desc, (u32)desc_p);
+ for (ii = 0; ii < hif->rx_ring_size; ii++) {
+ pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
+ readl(&desc->status), readl(&desc->ctrl),
+ readl(&desc->data), readl(&desc->next));
+ desc++;
+ }
+
+ desc = hif->tx_base;
+ desc_p = ((u64)desc - (u64)hif->descr_baseaddr_v +
+ hif->descr_baseaddr_p);
+
+ pr_info("HIF Tx desc base %p physical %x\n", desc, (u32)desc_p);
+ for (ii = 0; ii < hif->tx_ring_size; ii++) {
+ pr_info("status: %08x, ctrl: %08x, data: %08x, next: %x\n",
+ readl(&desc->status), readl(&desc->ctrl),
+ readl(&desc->data), readl(&desc->next));
+ desc++;
+ }
+}
+
+/* pfe_hif_release_buffers */
+static void pfe_hif_release_buffers(struct pfe_hif *hif)
+{
+ struct hif_desc *desc;
+ int i = 0;
+
+ hif->rx_base = hif->descr_baseaddr_v;
+
+ pr_info("%s\n", __func__);
+
+ /*Free Rx buffers */
+ desc = hif->rx_base;
+ for (i = 0; i < hif->rx_ring_size; i++) {
+ if (readl(&desc->data)) {
+ if ((i < hif->shm->rx_buf_pool_cnt) &&
+ (!hif->shm->rx_buf_pool[i])) {
+ /*
+ * dma_unmap_single(hif->dev, desc->data,
+ * hif->rx_buf_len[i], DMA_FROM_DEVICE);
+ */
+ dma_unmap_single(hif->dev,
+ DDR_PFE_TO_PHYS(
+ readl(&desc->data)),
+ hif->rx_buf_len[i],
+ DMA_FROM_DEVICE);
+ hif->shm->rx_buf_pool[i] = hif->rx_buf_addr[i];
+ } else {
+ pr_err("%s: buffer pool already full\n"
+ , __func__);
+ }
+ }
+
+ writel(0, &desc->data);
+ writel(0, &desc->status);
+ writel(0, &desc->ctrl);
+ desc++;
+ }
+}
+
+/*
+ * pfe_hif_init_buffers
+ * This function initializes the HIF Rx/Tx ring descriptors and
+ * initialize Rx queue with buffers.
+ */
+static int pfe_hif_init_buffers(struct pfe_hif *hif)
+{
+ struct hif_desc *desc, *first_desc_p;
+ u32 data;
+ int i = 0;
+
+ pr_info("%s\n", __func__);
+
+ /* Check enough Rx buffers available in the shared memory */
+ if (hif->shm->rx_buf_pool_cnt < hif->rx_ring_size)
+ return -ENOMEM;
+
+ hif->rx_base = hif->descr_baseaddr_v;
+ memset(hif->rx_base, 0, hif->rx_ring_size * sizeof(struct hif_desc));
+
+ /*Initialize Rx descriptors */
+ desc = hif->rx_base;
+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p;
+
+ for (i = 0; i < hif->rx_ring_size; i++) {
+ /* Initialize Rx buffers from the shared memory */
+
+ data = (u32)dma_map_single(hif->dev, hif->shm->rx_buf_pool[i],
+ pfe_pkt_size, DMA_FROM_DEVICE);
+ hif->rx_buf_addr[i] = hif->shm->rx_buf_pool[i];
+ hif->rx_buf_len[i] = pfe_pkt_size;
+ hif->shm->rx_buf_pool[i] = NULL;
+
+ if (likely(dma_mapping_error(hif->dev, data) == 0)) {
+ writel(DDR_PHYS_TO_PFE(data), &desc->data);
+ } else {
+ pr_err("%s : low on mem\n", __func__);
+
+ goto err;
+ }
+
+ writel(0, &desc->status);
+
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ wmb();
+
+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM
+ | BD_CTRL_DIR | BD_CTRL_DESC_EN
+ | BD_BUF_LEN(pfe_pkt_size)), &desc->ctrl);
+
+ /* Chain descriptors */
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
+ desc++;
+ }
+
+ /* Overwrite last descriptor to chain it to first one*/
+ desc--;
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
+
+ hif->rxtoclean_index = 0;
+
+ /*Initialize Rx buffer descriptor ring base address */
+ writel(DDR_PHYS_TO_PFE(hif->descr_baseaddr_p), HIF_RX_BDP_ADDR);
+
+ hif->tx_base = hif->rx_base + hif->rx_ring_size;
+ first_desc_p = (struct hif_desc *)hif->descr_baseaddr_p +
+ hif->rx_ring_size;
+ memset(hif->tx_base, 0, hif->tx_ring_size * sizeof(struct hif_desc));
+
+ /*Initialize tx descriptors */
+ desc = hif->tx_base;
+
+ for (i = 0; i < hif->tx_ring_size; i++) {
+ /* Chain descriptors */
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p + i + 1), &desc->next);
+ writel(0, &desc->ctrl);
+ desc++;
+ }
+
+ /* Overwrite last descriptor to chain it to first one */
+ desc--;
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), &desc->next);
+ hif->txavail = hif->tx_ring_size;
+ hif->txtosend = 0;
+ hif->txtoclean = 0;
+ hif->txtoflush = 0;
+
+ /*Initialize Tx buffer descriptor ring base address */
+ writel((u32)DDR_PHYS_TO_PFE(first_desc_p), HIF_TX_BDP_ADDR);
+
+ return 0;
+
+err:
+ pfe_hif_release_buffers(hif);
+ return -ENOMEM;
+}
+
+/*
+ * pfe_hif_client_register
+ *
+ * This function used to register a client driver with the HIF driver.
+ *
+ * Return value:
+ * 0 - on Successful registration
+ */
+static int pfe_hif_client_register(struct pfe_hif *hif, u32 client_id,
+ struct hif_client_shm *client_shm)
+{
+ struct hif_client *client = &hif->client[client_id];
+ u32 i, cnt;
+ struct rx_queue_desc *rx_qbase;
+ struct tx_queue_desc *tx_qbase;
+ struct hif_rx_queue *rx_queue;
+ struct hif_tx_queue *tx_queue;
+ int err = 0;
+
+ pr_info("%s\n", __func__);
+
+ spin_lock_bh(&hif->tx_lock);
+
+ if (test_bit(client_id, &hif->shm->g_client_status[0])) {
+ pr_err("%s: client %d already registered\n",
+ __func__, client_id);
+ err = -1;
+ goto unlock;
+ }
+
+ memset(client, 0, sizeof(struct hif_client));
+
+ /* Initialize client Rx queues baseaddr, size */
+
+ cnt = CLIENT_CTRL_RX_Q_CNT(client_shm->ctrl);
+ /* Check if client is requesting for more queues than supported */
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
+ cnt = HIF_CLIENT_QUEUES_MAX;
+
+ client->rx_qn = cnt;
+ rx_qbase = (struct rx_queue_desc *)client_shm->rx_qbase;
+ for (i = 0; i < cnt; i++) {
+ rx_queue = &client->rx_q[i];
+ rx_queue->base = rx_qbase + i * client_shm->rx_qsize;
+ rx_queue->size = client_shm->rx_qsize;
+ rx_queue->write_idx = 0;
+ }
+
+ /* Initialize client Tx queues baseaddr, size */
+ cnt = CLIENT_CTRL_TX_Q_CNT(client_shm->ctrl);
+
+ /* Check if client is requesting for more queues than supported */
+ if (cnt > HIF_CLIENT_QUEUES_MAX)
+ cnt = HIF_CLIENT_QUEUES_MAX;
+
+ client->tx_qn = cnt;
+ tx_qbase = (struct tx_queue_desc *)client_shm->tx_qbase;
+ for (i = 0; i < cnt; i++) {
+ tx_queue = &client->tx_q[i];
+ tx_queue->base = tx_qbase + i * client_shm->tx_qsize;
+ tx_queue->size = client_shm->tx_qsize;
+ tx_queue->ack_idx = 0;
+ }
+
+ set_bit(client_id, &hif->shm->g_client_status[0]);
+
+unlock:
+ spin_unlock_bh(&hif->tx_lock);
+
+ return err;
+}
+
+/*
+ * pfe_hif_client_unregister
+ *
+ * This function used to unregister a client from the HIF driver.
+ *
+ */
+static void pfe_hif_client_unregister(struct pfe_hif *hif, u32 client_id)
+{
+ pr_info("%s\n", __func__);
+
+ /*
+ * Mark client as no longer available (which prevents further packet
+ * receive for this client)
+ */
+ spin_lock_bh(&hif->tx_lock);
+
+ if (!test_bit(client_id, &hif->shm->g_client_status[0])) {
+ pr_err("%s: client %d not registered\n", __func__,
+ client_id);
+
+ spin_unlock_bh(&hif->tx_lock);
+ return;
+ }
+
+ clear_bit(client_id, &hif->shm->g_client_status[0]);
+
+ spin_unlock_bh(&hif->tx_lock);
+}
+
+/*
+ * client_put_rxpacket-
+ * This functions puts the Rx pkt in the given client Rx queue.
+ * It actually swap the Rx pkt in the client Rx descriptor buffer
+ * and returns the free buffer from it.
+ *
+ * If the function returns NULL means client Rx queue is full and
+ * packet couldn't send to client queue.
+ */
+static void *client_put_rxpacket(struct hif_rx_queue *queue, void *pkt, u32 len,
+ u32 flags, u32 client_ctrl, u32 *rem_len)
+{
+ void *free_pkt = NULL;
+ struct rx_queue_desc *desc = queue->base + queue->write_idx;
+
+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
+ if (page_mode) {
+ int rem_page_size = PAGE_SIZE -
+ PRESENT_OFST_IN_PAGE(pkt);
+ int cur_pkt_size = ROUND_MIN_RX_SIZE(len +
+ pfe_pkt_headroom);
+ *rem_len = (rem_page_size - cur_pkt_size);
+ if (*rem_len) {
+ free_pkt = pkt + cur_pkt_size;
+ get_page(virt_to_page(free_pkt));
+ } else {
+ free_pkt = (void
+ *)__get_free_page(GFP_ATOMIC | GFP_DMA_PFE);
+ *rem_len = pfe_pkt_size;
+ }
+ } else {
+ free_pkt = kmalloc(PFE_BUF_SIZE, GFP_ATOMIC |
+ GFP_DMA_PFE);
+ *rem_len = PFE_BUF_SIZE - pfe_pkt_headroom;
+ }
+
+ if (free_pkt) {
+ desc->data = pkt;
+ desc->client_ctrl = client_ctrl;
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ smp_wmb();
+ writel(CL_DESC_BUF_LEN(len) | flags, &desc->ctrl);
+ queue->write_idx = (queue->write_idx + 1)
+ & (queue->size - 1);
+
+ free_pkt += pfe_pkt_headroom;
+ }
+ }
+
+ return free_pkt;
+}
+
+/*
+ * pfe_hif_rx_process-
+ * This function does pfe hif rx queue processing.
+ * Dequeue packet from Rx queue and send it to corresponding client queue
+ */
+static int pfe_hif_rx_process(struct pfe_hif *hif, int budget)
+{
+ struct hif_desc *desc;
+ struct hif_hdr *pkt_hdr;
+ struct __hif_hdr hif_hdr;
+ void *free_buf;
+ int rtc, len, rx_processed = 0;
+ struct __hif_desc local_desc;
+ int flags;
+ unsigned int desc_p;
+ unsigned int buf_size = 0;
+
+ spin_lock_bh(&hif->lock);
+
+ rtc = hif->rxtoclean_index;
+
+ while (rx_processed < budget) {
+ desc = hif->rx_base + rtc;
+
+ __memcpy12(&local_desc, desc);
+
+ /* ACK pending Rx interrupt */
+ if (local_desc.ctrl & BD_CTRL_DESC_EN) {
+ writel(HIF_INT | HIF_RXPKT_INT, HIF_INT_SRC);
+
+ if (rx_processed == 0) {
+ if (napi_first_batch == 1) {
+ desc_p = hif->descr_baseaddr_p +
+ ((unsigned long int)(desc) -
+ (unsigned long
+ int)hif->descr_baseaddr_v);
+ napi_first_batch = 0;
+ }
+ }
+
+ __memcpy12(&local_desc, desc);
+
+ if (local_desc.ctrl & BD_CTRL_DESC_EN)
+ break;
+ }
+
+ napi_first_batch = 0;
+
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_DESC_COUNT]++;
+#endif
+ len = BD_BUF_LEN(local_desc.ctrl);
+ /*
+ * dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
+ * hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
+ */
+ dma_unmap_single(hif->dev, DDR_PFE_TO_PHYS(local_desc.data),
+ hif->rx_buf_len[rtc], DMA_FROM_DEVICE);
+
+ pkt_hdr = (struct hif_hdr *)hif->rx_buf_addr[rtc];
+
+ /* Track last HIF header received */
+ if (!hif->started) {
+ hif->started = 1;
+
+ __memcpy8(&hif_hdr, pkt_hdr);
+
+ hif->qno = hif_hdr.hdr.q_num;
+ hif->client_id = hif_hdr.hdr.client_id;
+ hif->client_ctrl = (hif_hdr.hdr.client_ctrl1 << 16) |
+ hif_hdr.hdr.client_ctrl;
+ flags = CL_DESC_FIRST;
+
+ } else {
+ flags = 0;
+ }
+
+ if (local_desc.ctrl & BD_CTRL_LIFM)
+ flags |= CL_DESC_LAST;
+
+ /* Check for valid client id and still registered */
+ if ((hif->client_id >= HIF_CLIENTS_MAX) ||
+ !(test_bit(hif->client_id,
+ &hif->shm->g_client_status[0]))) {
+ printk_ratelimited("%s: packet with invalid client id %d q_num %d\n",
+ __func__,
+ hif->client_id,
+ hif->qno);
+
+ free_buf = pkt_hdr;
+
+ goto pkt_drop;
+ }
+
+ /* Check to valid queue number */
+ if (hif->client[hif->client_id].rx_qn <= hif->qno) {
+ pr_info("%s: packet with invalid queue: %d\n"
+ , __func__, hif->qno);
+ hif->qno = 0;
+ }
+
+ free_buf =
+ client_put_rxpacket(&hif->client[hif->client_id].rx_q[hif->qno],
+ (void *)pkt_hdr, len, flags,
+ hif->client_ctrl, &buf_size);
+
+ hif_lib_indicate_client(hif->client_id, EVENT_RX_PKT_IND,
+ hif->qno);
+
+ if (unlikely(!free_buf)) {
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_CLIENT_FULL_COUNT]++;
+#endif
+ /*
+ * If we want to keep in polling mode to retry later,
+ * we need to tell napi that we consumed
+ * the full budget or we will hit a livelock scenario.
+ * The core code keeps this napi instance
+ * at the head of the list and none of the other
+ * instances get to run
+ */
+ rx_processed = budget;
+
+ if (flags & CL_DESC_FIRST)
+ hif->started = 0;
+
+ break;
+ }
+
+pkt_drop:
+ /*Fill free buffer in the descriptor */
+ hif->rx_buf_addr[rtc] = free_buf;
+ hif->rx_buf_len[rtc] = min(pfe_pkt_size, buf_size);
+ writel((DDR_PHYS_TO_PFE
+ ((u32)dma_map_single(hif->dev,
+ free_buf, hif->rx_buf_len[rtc], DMA_FROM_DEVICE))),
+ &desc->data);
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ wmb();
+ writel((BD_CTRL_PKT_INT_EN | BD_CTRL_LIFM | BD_CTRL_DIR |
+ BD_CTRL_DESC_EN | BD_BUF_LEN(hif->rx_buf_len[rtc])),
+ &desc->ctrl);
+
+ rtc = (rtc + 1) & (hif->rx_ring_size - 1);
+
+ if (local_desc.ctrl & BD_CTRL_LIFM) {
+ if (!(hif->client_ctrl & HIF_CTRL_RX_CONTINUED)) {
+ rx_processed++;
+
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_PACKET_COUNT]++;
+#endif
+ }
+ hif->started = 0;
+ }
+ }
+
+ hif->rxtoclean_index = rtc;
+ spin_unlock_bh(&hif->lock);
+
+ /* we made some progress, re-start rx dma in case it stopped */
+ hif_rx_dma_start();
+
+ return rx_processed;
+}
+
+/*
+ * client_ack_txpacket-
+ * This function ack the Tx packet in the give client Tx queue by resetting
+ * ownership bit in the descriptor.
+ */
+static int client_ack_txpacket(struct pfe_hif *hif, unsigned int client_id,
+ unsigned int q_no)
+{
+ struct hif_tx_queue *queue = &hif->client[client_id].tx_q[q_no];
+ struct tx_queue_desc *desc = queue->base + queue->ack_idx;
+
+ if (readl(&desc->ctrl) & CL_DESC_OWN) {
+ writel((readl(&desc->ctrl) & ~CL_DESC_OWN), &desc->ctrl);
+ queue->ack_idx = (queue->ack_idx + 1) & (queue->size - 1);
+
+ return 0;
+
+ } else {
+ /*This should not happen */
+ pr_err("%s: %d %d %d %d %d %p %d\n", __func__,
+ hif->txtosend, hif->txtoclean, hif->txavail,
+ client_id, q_no, queue, queue->ack_idx);
+ WARN(1, "%s: doesn't own this descriptor", __func__);
+ return 1;
+ }
+}
+
+void __hif_tx_done_process(struct pfe_hif *hif, int count)
+{
+ struct hif_desc *desc;
+ struct hif_desc_sw *desc_sw;
+ int ttc, tx_avl;
+ int pkts_done[HIF_CLIENTS_MAX] = {0, 0};
+
+ ttc = hif->txtoclean;
+ tx_avl = hif->txavail;
+
+ while ((tx_avl < hif->tx_ring_size) && count--) {
+ desc = hif->tx_base + ttc;
+
+ if (readl(&desc->ctrl) & BD_CTRL_DESC_EN)
+ break;
+
+ desc_sw = &hif->tx_sw_queue[ttc];
+
+ if (desc_sw->data) {
+ /*
+ * dmap_unmap_single(hif->dev, desc_sw->data,
+ * desc_sw->len, DMA_TO_DEVICE);
+ */
+ dma_unmap_single(hif->dev, desc_sw->data,
+ desc_sw->len, DMA_TO_DEVICE);
+ }
+
+ if (desc_sw->client_id > HIF_CLIENTS_MAX)
+ pr_err("Invalid cl id %d\n", desc_sw->client_id);
+
+ pkts_done[desc_sw->client_id]++;
+
+ client_ack_txpacket(hif, desc_sw->client_id, desc_sw->q_no);
+
+ ttc = (ttc + 1) & (hif->tx_ring_size - 1);
+ tx_avl++;
+ }
+
+ if (pkts_done[0])
+ hif_lib_indicate_client(0, EVENT_TXDONE_IND, 0);
+ if (pkts_done[1])
+ hif_lib_indicate_client(1, EVENT_TXDONE_IND, 0);
+
+ hif->txtoclean = ttc;
+ hif->txavail = tx_avl;
+
+ if (!count) {
+ tasklet_schedule(&hif->tx_cleanup_tasklet);
+ } else {
+ /*Enable Tx done interrupt */
+ writel(readl_relaxed(HIF_INT_ENABLE) | HIF_TXPKT_INT,
+ HIF_INT_ENABLE);
+ }
+}
+
+static void pfe_tx_do_cleanup(unsigned long data)
+{
+ struct pfe_hif *hif = (struct pfe_hif *)data;
+
+ writel(HIF_INT | HIF_TXPKT_INT, HIF_INT_SRC);
+
+ hif_tx_done_process(hif, 64);
+}
+
+/*
+ * __hif_xmit_pkt -
+ * This function puts one packet in the HIF Tx queue
+ */
+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
+ q_no, void *data, u32 len, unsigned int flags)
+{
+ struct hif_desc *desc;
+ struct hif_desc_sw *desc_sw;
+
+ desc = hif->tx_base + hif->txtosend;
+ desc_sw = &hif->tx_sw_queue[hif->txtosend];
+
+ desc_sw->len = len;
+ desc_sw->client_id = client_id;
+ desc_sw->q_no = q_no;
+ desc_sw->flags = flags;
+
+ if (flags & HIF_DONT_DMA_MAP) {
+ desc_sw->data = 0;
+ writel((u32)DDR_PHYS_TO_PFE(data), &desc->data);
+ } else {
+ desc_sw->data = dma_map_single(hif->dev, data, len,
+ DMA_TO_DEVICE);
+ writel((u32)DDR_PHYS_TO_PFE(desc_sw->data), &desc->data);
+ }
+
+ hif->txtosend = (hif->txtosend + 1) & (hif->tx_ring_size - 1);
+ hif->txavail--;
+
+ if ((!((flags & HIF_DATA_VALID) && (flags &
+ HIF_LAST_BUFFER))))
+ goto skip_tx;
+
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ wmb();
+
+ do {
+ desc_sw = &hif->tx_sw_queue[hif->txtoflush];
+ desc = hif->tx_base + hif->txtoflush;
+
+ if (desc_sw->flags & HIF_LAST_BUFFER) {
+ writel((BD_CTRL_LIFM |
+ BD_CTRL_BRFETCH_DISABLE | BD_CTRL_RTFETCH_DISABLE
+ | BD_CTRL_PARSE_DISABLE | BD_CTRL_DESC_EN |
+ BD_CTRL_PKT_INT_EN | BD_BUF_LEN(desc_sw->len)),
+ &desc->ctrl);
+ } else {
+ writel((BD_CTRL_DESC_EN |
+ BD_BUF_LEN(desc_sw->len)), &desc->ctrl);
+ }
+ hif->txtoflush = (hif->txtoflush + 1) & (hif->tx_ring_size - 1);
+ }
+ while (hif->txtoflush != hif->txtosend)
+ ;
+
+skip_tx:
+ return;
+}
+
+static irqreturn_t wol_isr(int irq, void *dev_id)
+{
+ pr_info("WoL\n");
+ gemac_set_wol(EMAC1_BASE_ADDR, 0);
+ gemac_set_wol(EMAC2_BASE_ADDR, 0);
+ return IRQ_HANDLED;
+}
+
+/*
+ * hif_isr-
+ * This ISR routine processes Rx/Tx done interrupts from the HIF hardware block
+ */
+static irqreturn_t hif_isr(int irq, void *dev_id)
+{
+ struct pfe_hif *hif = (struct pfe_hif *)dev_id;
+ int int_status;
+ int int_enable_mask;
+
+ /*Read hif interrupt source register */
+ int_status = readl_relaxed(HIF_INT_SRC);
+ int_enable_mask = readl_relaxed(HIF_INT_ENABLE);
+
+ if ((int_status & HIF_INT) == 0)
+ return IRQ_NONE;
+
+ int_status &= ~(HIF_INT);
+
+ if (int_status & HIF_RXPKT_INT) {
+ int_status &= ~(HIF_RXPKT_INT);
+ int_enable_mask &= ~(HIF_RXPKT_INT);
+
+ napi_first_batch = 1;
+
+ if (napi_schedule_prep(&hif->napi)) {
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_SCHED_COUNT]++;
+#endif
+ __napi_schedule(&hif->napi);
+ }
+ }
+
+ if (int_status & HIF_TXPKT_INT) {
+ int_status &= ~(HIF_TXPKT_INT);
+ int_enable_mask &= ~(HIF_TXPKT_INT);
+ /*Schedule tx cleanup tassklet */
+ tasklet_schedule(&hif->tx_cleanup_tasklet);
+ }
+
+ /*Disable interrupts, they will be enabled after they are serviced */
+ writel_relaxed(int_enable_mask, HIF_INT_ENABLE);
+
+ if (int_status) {
+ pr_info("%s : Invalid interrupt : %d\n", __func__,
+ int_status);
+ writel(int_status, HIF_INT_SRC);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int data2)
+{
+ unsigned int client_id = data1;
+
+ if (client_id >= HIF_CLIENTS_MAX) {
+ pr_err("%s: client id %d out of bounds\n", __func__,
+ client_id);
+ return;
+ }
+
+ switch (req) {
+ case REQUEST_CL_REGISTER:
+ /* Request for register a client */
+ pr_info("%s: register client_id %d\n",
+ __func__, client_id);
+ pfe_hif_client_register(hif, client_id, (struct
+ hif_client_shm *)&hif->shm->client[client_id]);
+ break;
+
+ case REQUEST_CL_UNREGISTER:
+ pr_info("%s: unregister client_id %d\n",
+ __func__, client_id);
+
+ /* Request for unregister a client */
+ pfe_hif_client_unregister(hif, client_id);
+
+ break;
+
+ default:
+ pr_err("%s: unsupported request %d\n",
+ __func__, req);
+ break;
+ }
+
+ /*
+ * Process client Tx queues
+ * Currently we don't have checking for tx pending
+ */
+}
+
+/*
+ * pfe_hif_rx_poll
+ * This function is NAPI poll function to process HIF Rx queue.
+ */
+static int pfe_hif_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct pfe_hif *hif = container_of(napi, struct pfe_hif, napi);
+ int work_done;
+
+#ifdef HIF_NAPI_STATS
+ hif->napi_counters[NAPI_POLL_COUNT]++;
+#endif
+
+ work_done = pfe_hif_rx_process(hif, budget);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ writel(readl_relaxed(HIF_INT_ENABLE) | HIF_RXPKT_INT,
+ HIF_INT_ENABLE);
+ }
+#ifdef HIF_NAPI_STATS
+ else
+ hif->napi_counters[NAPI_FULL_BUDGET_COUNT]++;
+#endif
+
+ return work_done;
+}
+
+/*
+ * pfe_hif_init
+ * This function initializes the baseaddresses and irq, etc.
+ */
+int pfe_hif_init(struct pfe *pfe)
+{
+ struct pfe_hif *hif = &pfe->hif;
+ int err;
+
+ pr_info("%s\n", __func__);
+
+ hif->dev = pfe->dev;
+ hif->irq = pfe->hif_irq;
+
+ err = pfe_hif_alloc_descr(hif);
+ if (err)
+ goto err0;
+
+ if (pfe_hif_init_buffers(hif)) {
+ pr_err("%s: Could not initialize buffer descriptors\n"
+ , __func__);
+ err = -ENOMEM;
+ goto err1;
+ }
+
+ /* Initialize NAPI for Rx processing */
+ init_dummy_netdev(&hif->dummy_dev);
+ netif_napi_add(&hif->dummy_dev, &hif->napi, pfe_hif_rx_poll,
+ HIF_RX_POLL_WEIGHT);
+ napi_enable(&hif->napi);
+
+ spin_lock_init(&hif->tx_lock);
+ spin_lock_init(&hif->lock);
+
+ hif_init();
+ hif_rx_enable();
+ hif_tx_enable();
+
+ /* Disable tx done interrupt */
+ writel(HIF_INT_MASK, HIF_INT_ENABLE);
+
+ gpi_enable(HGPI_BASE_ADDR);
+
+ err = request_irq(hif->irq, hif_isr, 0, "pfe_hif", hif);
+ if (err) {
+ pr_err("%s: failed to get the hif IRQ = %d\n",
+ __func__, hif->irq);
+ goto err1;
+ }
+
+ err = request_irq(pfe->wol_irq, wol_isr, 0, "pfe_wol", pfe);
+ if (err) {
+ pr_err("%s: failed to get the wol IRQ = %d\n",
+ __func__, pfe->wol_irq);
+ goto err1;
+ }
+
+ tasklet_init(&hif->tx_cleanup_tasklet,
+ (void(*)(unsigned long))pfe_tx_do_cleanup,
+ (unsigned long)hif);
+
+ return 0;
+err1:
+ pfe_hif_free_descr(hif);
+err0:
+ return err;
+}
+
+/* pfe_hif_exit- */
+void pfe_hif_exit(struct pfe *pfe)
+{
+ struct pfe_hif *hif = &pfe->hif;
+
+ pr_info("%s\n", __func__);
+
+ tasklet_kill(&hif->tx_cleanup_tasklet);
+
+ spin_lock_bh(&hif->lock);
+ hif->shm->g_client_status[0] = 0;
+ /* Make sure all clients are disabled*/
+ hif->shm->g_client_status[1] = 0;
+
+ spin_unlock_bh(&hif->lock);
+
+ /*Disable Rx/Tx */
+ gpi_disable(HGPI_BASE_ADDR);
+ hif_rx_disable();
+ hif_tx_disable();
+
+ napi_disable(&hif->napi);
+ netif_napi_del(&hif->napi);
+
+ free_irq(pfe->wol_irq, pfe);
+ free_irq(hif->irq, hif);
+
+ pfe_hif_release_buffers(hif);
+ pfe_hif_free_descr(hif);
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_hif.h b/drivers/staging/fsl_ppfe/pfe_hif.h
new file mode 100644
index 000000000000..fc4c6c08f094
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_HIF_H_
+#define _PFE_HIF_H_
+
+#include <linux/netdevice.h>
+
+#define HIF_NAPI_STATS
+
+#define HIF_CLIENT_QUEUES_MAX 16
+#define HIF_RX_POLL_WEIGHT 64
+
+#define HIF_RX_PKT_MIN_SIZE 0x800 /* 2KB */
+#define HIF_RX_PKT_MIN_SIZE_MASK ~(HIF_RX_PKT_MIN_SIZE - 1)
+#define ROUND_MIN_RX_SIZE(_sz) (((_sz) + (HIF_RX_PKT_MIN_SIZE - 1)) \
+ & HIF_RX_PKT_MIN_SIZE_MASK)
+#define PRESENT_OFST_IN_PAGE(_buf) (((unsigned long int)(_buf) & (PAGE_SIZE \
+ - 1)) & HIF_RX_PKT_MIN_SIZE_MASK)
+
+enum {
+ NAPI_SCHED_COUNT = 0,
+ NAPI_POLL_COUNT,
+ NAPI_PACKET_COUNT,
+ NAPI_DESC_COUNT,
+ NAPI_FULL_BUDGET_COUNT,
+ NAPI_CLIENT_FULL_COUNT,
+ NAPI_MAX_COUNT
+};
+
+/*
+ * HIF_TX_DESC_NT value should be always greter than 4,
+ * Otherwise HIF_TX_POLL_MARK will become zero.
+ */
+#define HIF_RX_DESC_NT 256
+#define HIF_TX_DESC_NT 2048
+
+#define HIF_FIRST_BUFFER BIT(0)
+#define HIF_LAST_BUFFER BIT(1)
+#define HIF_DONT_DMA_MAP BIT(2)
+#define HIF_DATA_VALID BIT(3)
+#define HIF_TSO BIT(4)
+
+enum {
+ PFE_CL_GEM0 = 0,
+ PFE_CL_GEM1,
+ HIF_CLIENTS_MAX
+};
+
+/*structure to store client queue info */
+struct hif_rx_queue {
+ struct rx_queue_desc *base;
+ u32 size;
+ u32 write_idx;
+};
+
+struct hif_tx_queue {
+ struct tx_queue_desc *base;
+ u32 size;
+ u32 ack_idx;
+};
+
+/*Structure to store the client info */
+struct hif_client {
+ int rx_qn;
+ struct hif_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
+ int tx_qn;
+ struct hif_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
+};
+
+/*HIF hardware buffer descriptor */
+struct hif_desc {
+ u32 ctrl;
+ u32 status;
+ u32 data;
+ u32 next;
+};
+
+struct __hif_desc {
+ u32 ctrl;
+ u32 status;
+ u32 data;
+};
+
+struct hif_desc_sw {
+ dma_addr_t data;
+ u16 len;
+ u8 client_id;
+ u8 q_no;
+ u16 flags;
+};
+
+struct hif_hdr {
+ u8 client_id;
+ u8 q_num;
+ u16 client_ctrl;
+ u16 client_ctrl1;
+};
+
+struct __hif_hdr {
+ union {
+ struct hif_hdr hdr;
+ u32 word[2];
+ };
+};
+
+struct hif_ipsec_hdr {
+ u16 sa_handle[2];
+} __packed;
+
+/* HIF_CTRL_TX... defines */
+#define HIF_CTRL_TX_CHECKSUM BIT(2)
+
+/* HIF_CTRL_RX... defines */
+#define HIF_CTRL_RX_OFFSET_OFST (24)
+#define HIF_CTRL_RX_CHECKSUMMED BIT(2)
+#define HIF_CTRL_RX_CONTINUED BIT(1)
+
+struct pfe_hif {
+ /* To store registered clients in hif layer */
+ struct hif_client client[HIF_CLIENTS_MAX];
+ struct hif_shm *shm;
+ int irq;
+
+ void *descr_baseaddr_v;
+ unsigned long descr_baseaddr_p;
+
+ struct hif_desc *rx_base;
+ u32 rx_ring_size;
+ u32 rxtoclean_index;
+ void *rx_buf_addr[HIF_RX_DESC_NT];
+ int rx_buf_len[HIF_RX_DESC_NT];
+ unsigned int qno;
+ unsigned int client_id;
+ unsigned int client_ctrl;
+ unsigned int started;
+
+ struct hif_desc *tx_base;
+ u32 tx_ring_size;
+ u32 txtosend;
+ u32 txtoclean;
+ u32 txavail;
+ u32 txtoflush;
+ struct hif_desc_sw tx_sw_queue[HIF_TX_DESC_NT];
+
+/* tx_lock synchronizes hif packet tx as well as pfe_hif structure access */
+ spinlock_t tx_lock;
+/* lock synchronizes hif rx queue processing */
+ spinlock_t lock;
+ struct net_device dummy_dev;
+ struct napi_struct napi;
+ struct device *dev;
+
+#ifdef HIF_NAPI_STATS
+ unsigned int napi_counters[NAPI_MAX_COUNT];
+#endif
+ struct tasklet_struct tx_cleanup_tasklet;
+};
+
+void __hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int
+ q_no, void *data, u32 len, unsigned int flags);
+int hif_xmit_pkt(struct pfe_hif *hif, unsigned int client_id, unsigned int q_no,
+ void *data, unsigned int len);
+void __hif_tx_done_process(struct pfe_hif *hif, int count);
+void hif_process_client_req(struct pfe_hif *hif, int req, int data1, int
+ data2);
+int pfe_hif_init(struct pfe *pfe);
+void pfe_hif_exit(struct pfe *pfe);
+void pfe_hif_rx_idle(struct pfe_hif *hif);
+static inline void hif_tx_done_process(struct pfe_hif *hif, int count)
+{
+ spin_lock_bh(&hif->tx_lock);
+ __hif_tx_done_process(hif, count);
+ spin_unlock_bh(&hif->tx_lock);
+}
+
+static inline void hif_tx_lock(struct pfe_hif *hif)
+{
+ spin_lock_bh(&hif->tx_lock);
+}
+
+static inline void hif_tx_unlock(struct pfe_hif *hif)
+{
+ spin_unlock_bh(&hif->tx_lock);
+}
+
+static inline int __hif_tx_avail(struct pfe_hif *hif)
+{
+ return hif->txavail;
+}
+
+#define __memcpy8(dst, src) memcpy(dst, src, 8)
+#define __memcpy12(dst, src) memcpy(dst, src, 12)
+#define __memcpy(dst, src, len) memcpy(dst, src, len)
+
+#endif /* _PFE_HIF_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_hif_lib.c b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
new file mode 100644
index 000000000000..ac730e043f8d
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/sched.h>
+#include <linux/skbuff.h>
+#include <linux/moduleparam.h>
+#include <linux/cpu.h>
+
+#include "pfe_mod.h"
+#include "pfe_hif.h"
+#include "pfe_hif_lib.h"
+
+unsigned int lro_mode;
+unsigned int page_mode;
+unsigned int tx_qos = 1;
+module_param(tx_qos, uint, 0444);
+MODULE_PARM_DESC(tx_qos, "0: disable ,\n"
+ "1: enable (default), guarantee no packet drop at TMU level\n");
+unsigned int pfe_pkt_size;
+unsigned int pfe_pkt_headroom;
+unsigned int emac_txq_cnt;
+
+/*
+ * @pfe_hal_lib.c.
+ * Common functions used by HIF client drivers
+ */
+
+/*HIF shared memory Global variable */
+struct hif_shm ghif_shm;
+
+/* Cleanup the HIF shared memory, release HIF rx_buffer_pool.
+ * This function should be called after pfe_hif_exit
+ *
+ * @param[in] hif_shm Shared memory address location in DDR
+ */
+static void pfe_hif_shm_clean(struct hif_shm *hif_shm)
+{
+ int i;
+ void *pkt;
+
+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+ pkt = hif_shm->rx_buf_pool[i];
+ if (pkt) {
+ hif_shm->rx_buf_pool[i] = NULL;
+ pkt -= pfe_pkt_headroom;
+
+ if (page_mode)
+ put_page(virt_to_page(pkt));
+ else
+ kfree(pkt);
+ }
+ }
+}
+
+/* Initialize shared memory used between HIF driver and clients,
+ * allocate rx_buffer_pool required for HIF Rx descriptors.
+ * This function should be called before initializing HIF driver.
+ *
+ * @param[in] hif_shm Shared memory address location in DDR
+ * @rerurn 0 - on succes, <0 on fail to initialize
+ */
+static int pfe_hif_shm_init(struct hif_shm *hif_shm)
+{
+ int i;
+ void *pkt;
+
+ memset(hif_shm, 0, sizeof(struct hif_shm));
+ hif_shm->rx_buf_pool_cnt = HIF_RX_DESC_NT;
+
+ for (i = 0; i < hif_shm->rx_buf_pool_cnt; i++) {
+ if (page_mode) {
+ pkt = (void *)__get_free_page(GFP_KERNEL |
+ GFP_DMA_PFE);
+ } else {
+ pkt = kmalloc(PFE_BUF_SIZE, GFP_KERNEL | GFP_DMA_PFE);
+ }
+
+ if (pkt)
+ hif_shm->rx_buf_pool[i] = pkt + pfe_pkt_headroom;
+ else
+ goto err0;
+ }
+
+ return 0;
+
+err0:
+ pr_err("%s Low memory\n", __func__);
+ pfe_hif_shm_clean(hif_shm);
+ return -ENOMEM;
+}
+
+/*This function sends indication to HIF driver
+ *
+ * @param[in] hif hif context
+ */
+static void hif_lib_indicate_hif(struct pfe_hif *hif, int req, int data1, int
+ data2)
+{
+ hif_process_client_req(hif, req, data1, data2);
+}
+
+void hif_lib_indicate_client(int client_id, int event_type, int qno)
+{
+ struct hif_client_s *client = pfe->hif_client[client_id];
+
+ if (!client || (event_type >= HIF_EVENT_MAX) || (qno >=
+ HIF_CLIENT_QUEUES_MAX))
+ return;
+
+ if (!test_and_set_bit(qno, &client->queue_mask[event_type]))
+ client->event_handler(client->priv, event_type, qno);
+}
+
+/*This function releases Rx queue descriptors memory and pre-filled buffers
+ *
+ * @param[in] client hif_client context
+ */
+static void hif_lib_client_release_rx_buffers(struct hif_client_s *client)
+{
+ struct rx_queue_desc *desc;
+ int qno, ii;
+ void *buf;
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ desc = client->rx_q[qno].base;
+
+ for (ii = 0; ii < client->rx_q[qno].size; ii++) {
+ buf = (void *)desc->data;
+ if (buf) {
+ buf -= pfe_pkt_headroom;
+
+ if (page_mode)
+ free_page((unsigned long)buf);
+ else
+ kfree(buf);
+
+ desc->ctrl = 0;
+ }
+
+ desc++;
+ }
+ }
+
+ kfree(client->rx_qbase);
+}
+
+/*This function allocates memory for the rxq descriptors and pre-fill rx queues
+ * with buffers.
+ * @param[in] client client context
+ * @param[in] q_size size of the rxQ, all queues are of same size
+ */
+static int hif_lib_client_init_rx_buffers(struct hif_client_s *client, int
+ q_size)
+{
+ struct rx_queue_desc *desc;
+ struct hif_client_rx_queue *queue;
+ int ii, qno;
+
+ /*Allocate memory for the client queues */
+ client->rx_qbase = kzalloc(client->rx_qn * q_size * sizeof(struct
+ rx_queue_desc), GFP_KERNEL);
+ if (!client->rx_qbase)
+ goto err;
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ queue = &client->rx_q[qno];
+
+ queue->base = client->rx_qbase + qno * q_size * sizeof(struct
+ rx_queue_desc);
+ queue->size = q_size;
+ queue->read_idx = 0;
+ queue->write_idx = 0;
+
+ pr_debug("rx queue: %d, base: %p, size: %d\n", qno,
+ queue->base, queue->size);
+ }
+
+ for (qno = 0; qno < client->rx_qn; qno++) {
+ queue = &client->rx_q[qno];
+ desc = queue->base;
+
+ for (ii = 0; ii < queue->size; ii++) {
+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) |
+ CL_DESC_OWN;
+ desc++;
+ }
+ }
+
+ return 0;
+
+err:
+ return 1;
+}
+
+
+static void hif_lib_client_cleanup_tx_queue(struct hif_client_tx_queue *queue)
+{
+ pr_debug("%s\n", __func__);
+
+ /*
+ * Check if there are any pending packets. Client must flush the tx
+ * queues before unregistering, by calling by calling
+ * hif_lib_tx_get_next_complete()
+ *
+ * Hif no longer calls since we are no longer registered
+ */
+ if (queue->tx_pending)
+ pr_err("%s: pending transmit packets\n", __func__);
+}
+
+static void hif_lib_client_release_tx_buffers(struct hif_client_s *client)
+{
+ int qno;
+
+ pr_debug("%s\n", __func__);
+
+ for (qno = 0; qno < client->tx_qn; qno++)
+ hif_lib_client_cleanup_tx_queue(&client->tx_q[qno]);
+
+ kfree(client->tx_qbase);
+}
+
+static int hif_lib_client_init_tx_buffers(struct hif_client_s *client, int
+ q_size)
+{
+ struct hif_client_tx_queue *queue;
+ int qno;
+
+ client->tx_qbase = kzalloc(client->tx_qn * q_size * sizeof(struct
+ tx_queue_desc), GFP_KERNEL);
+ if (!client->tx_qbase)
+ return 1;
+
+ for (qno = 0; qno < client->tx_qn; qno++) {
+ queue = &client->tx_q[qno];
+
+ queue->base = client->tx_qbase + qno * q_size * sizeof(struct
+ tx_queue_desc);
+ queue->size = q_size;
+ queue->read_idx = 0;
+ queue->write_idx = 0;
+ queue->tx_pending = 0;
+ queue->nocpy_flag = 0;
+ queue->prev_tmu_tx_pkts = 0;
+ queue->done_tmu_tx_pkts = 0;
+
+ pr_debug("tx queue: %d, base: %p, size: %d\n", qno,
+ queue->base, queue->size);
+ }
+
+ return 0;
+}
+
+static int hif_lib_event_dummy(void *priv, int event_type, int qno)
+{
+ return 0;
+}
+
+int hif_lib_client_register(struct hif_client_s *client)
+{
+ struct hif_shm *hif_shm;
+ struct hif_client_shm *client_shm;
+ int err, i;
+ /* int loop_cnt = 0; */
+
+ pr_debug("%s\n", __func__);
+
+ /*Allocate memory before spin_lock*/
+ if (hif_lib_client_init_rx_buffers(client, client->rx_qsize)) {
+ err = -ENOMEM;
+ goto err_rx;
+ }
+
+ if (hif_lib_client_init_tx_buffers(client, client->tx_qsize)) {
+ err = -ENOMEM;
+ goto err_tx;
+ }
+
+ spin_lock_bh(&pfe->hif.lock);
+ if (!(client->pfe) || (client->id >= HIF_CLIENTS_MAX) ||
+ (pfe->hif_client[client->id])) {
+ err = -EINVAL;
+ goto err;
+ }
+
+ hif_shm = client->pfe->hif.shm;
+
+ if (!client->event_handler)
+ client->event_handler = hif_lib_event_dummy;
+
+ /*Initialize client specific shared memory */
+ client_shm = (struct hif_client_shm *)&hif_shm->client[client->id];
+ client_shm->rx_qbase = (unsigned long int)client->rx_qbase;
+ client_shm->rx_qsize = client->rx_qsize;
+ client_shm->tx_qbase = (unsigned long int)client->tx_qbase;
+ client_shm->tx_qsize = client->tx_qsize;
+ client_shm->ctrl = (client->tx_qn << CLIENT_CTRL_TX_Q_CNT_OFST) |
+ (client->rx_qn << CLIENT_CTRL_RX_Q_CNT_OFST);
+ /* spin_lock_init(&client->rx_lock); */
+
+ for (i = 0; i < HIF_EVENT_MAX; i++) {
+ client->queue_mask[i] = 0; /*
+ * By default all events are
+ * unmasked
+ */
+ }
+
+ /*Indicate to HIF driver*/
+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_REGISTER, client->id, 0);
+
+ pr_debug("%s: client: %p, client_id: %d, tx_qsize: %d, rx_qsize: %d\n",
+ __func__, client, client->id, client->tx_qsize,
+ client->rx_qsize);
+
+ client->cpu_id = -1;
+
+ pfe->hif_client[client->id] = client;
+ spin_unlock_bh(&pfe->hif.lock);
+
+ return 0;
+
+err:
+ spin_unlock_bh(&pfe->hif.lock);
+ hif_lib_client_release_tx_buffers(client);
+
+err_tx:
+ hif_lib_client_release_rx_buffers(client);
+
+err_rx:
+ return err;
+}
+
+int hif_lib_client_unregister(struct hif_client_s *client)
+{
+ struct pfe *pfe = client->pfe;
+ u32 client_id = client->id;
+
+ pr_info(
+ "%s : client: %p, client_id: %d, txQ_depth: %d, rxQ_depth: %d\n"
+ , __func__, client, client->id, client->tx_qsize,
+ client->rx_qsize);
+
+ spin_lock_bh(&pfe->hif.lock);
+ hif_lib_indicate_hif(&pfe->hif, REQUEST_CL_UNREGISTER, client->id, 0);
+
+ hif_lib_client_release_tx_buffers(client);
+ hif_lib_client_release_rx_buffers(client);
+ pfe->hif_client[client_id] = NULL;
+ spin_unlock_bh(&pfe->hif.lock);
+
+ return 0;
+}
+
+int hif_lib_event_handler_start(struct hif_client_s *client, int event,
+ int qno)
+{
+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
+ struct rx_queue_desc *desc = queue->base + queue->read_idx;
+
+ if ((event >= HIF_EVENT_MAX) || (qno >= HIF_CLIENT_QUEUES_MAX)) {
+ pr_debug("%s: Unsupported event : %d queue number : %d\n",
+ __func__, event, qno);
+ return -1;
+ }
+
+ test_and_clear_bit(qno, &client->queue_mask[event]);
+
+ switch (event) {
+ case EVENT_RX_PKT_IND:
+ if (!(desc->ctrl & CL_DESC_OWN))
+ hif_lib_indicate_client(client->id,
+ EVENT_RX_PKT_IND, qno);
+ break;
+
+ case EVENT_HIGH_RX_WM:
+ case EVENT_TXDONE_IND:
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * This function gets one packet from the specified client queue
+ * It also refill the rx buffer
+ */
+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
+ *ofst, unsigned int *rx_ctrl,
+ unsigned int *desc_ctrl, void **priv_data)
+{
+ struct hif_client_rx_queue *queue = &client->rx_q[qno];
+ struct rx_queue_desc *desc;
+ void *pkt = NULL;
+
+ /*
+ * Following lock is to protect rx queue access from,
+ * hif_lib_event_handler_start.
+ * In general below lock is not required, because hif_lib_xmit_pkt and
+ * hif_lib_event_handler_start are called from napi poll and which is
+ * not re-entrant. But if some client use in different way this lock is
+ * required.
+ */
+ /*spin_lock_irqsave(&client->rx_lock, flags); */
+ desc = queue->base + queue->read_idx;
+ if (!(desc->ctrl & CL_DESC_OWN)) {
+ pkt = desc->data - pfe_pkt_headroom;
+
+ *rx_ctrl = desc->client_ctrl;
+ *desc_ctrl = desc->ctrl;
+
+ if (desc->ctrl & CL_DESC_FIRST) {
+ u16 size = *rx_ctrl >> HIF_CTRL_RX_OFFSET_OFST;
+
+ if (size) {
+ size += PFE_PARSE_INFO_SIZE;
+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
+ PFE_PKT_HEADER_SZ - size;
+ *ofst = pfe_pkt_headroom + PFE_PKT_HEADER_SZ
+ + size;
+ *priv_data = desc->data + PFE_PKT_HEADER_SZ;
+ } else {
+ *len = CL_DESC_BUF_LEN(desc->ctrl) -
+ PFE_PKT_HEADER_SZ - PFE_PARSE_INFO_SIZE;
+ *ofst = pfe_pkt_headroom
+ + PFE_PKT_HEADER_SZ
+ + PFE_PARSE_INFO_SIZE;
+ *priv_data = NULL;
+ }
+
+ } else {
+ *len = CL_DESC_BUF_LEN(desc->ctrl);
+ *ofst = pfe_pkt_headroom;
+ }
+
+ /*
+ * Needed so we don't free a buffer/page
+ * twice on module_exit
+ */
+ desc->data = NULL;
+
+ /*
+ * Ensure everything else is written to DDR before
+ * writing bd->ctrl
+ */
+ smp_wmb();
+
+ desc->ctrl = CL_DESC_BUF_LEN(pfe_pkt_size) | CL_DESC_OWN;
+ queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
+ }
+
+ /*spin_unlock_irqrestore(&client->rx_lock, flags); */
+ return pkt;
+}
+
+static inline void hif_hdr_write(struct hif_hdr *pkt_hdr, unsigned int
+ client_id, unsigned int qno,
+ u32 client_ctrl)
+{
+ /* Optimize the write since the destinaton may be non-cacheable */
+ if (!((unsigned long)pkt_hdr & 0x3)) {
+ ((u32 *)pkt_hdr)[0] = (client_ctrl << 16) | (qno << 8) |
+ client_id;
+ } else {
+ ((u16 *)pkt_hdr)[0] = (qno << 8) | (client_id & 0xFF);
+ ((u16 *)pkt_hdr)[1] = (client_ctrl & 0xFFFF);
+ }
+}
+
+/*This function puts the given packet in the specific client queue */
+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
+ *data, unsigned int len, u32 client_ctrl,
+ unsigned int flags, void *client_data)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->write_idx;
+
+ /* First buffer */
+ if (flags & HIF_FIRST_BUFFER) {
+ data -= sizeof(struct hif_hdr);
+ len += sizeof(struct hif_hdr);
+
+ hif_hdr_write(data, client->id, qno, client_ctrl);
+ }
+
+ desc->data = client_data;
+ desc->ctrl = CL_DESC_OWN | CL_DESC_FLAGS(flags);
+
+ __hif_xmit_pkt(&pfe->hif, client->id, qno, data, len, flags);
+
+ queue->write_idx = (queue->write_idx + 1) & (queue->size - 1);
+ queue->tx_pending++;
+ queue->jiffies_last_packet = jiffies;
+}
+
+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
+ unsigned int *flags, int count)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+ struct tx_queue_desc *desc = queue->base + queue->read_idx;
+
+ pr_debug("%s: qno : %d rd_indx: %d pending:%d\n", __func__, qno,
+ queue->read_idx, queue->tx_pending);
+
+ if (!queue->tx_pending)
+ return NULL;
+
+ if (queue->nocpy_flag && !queue->done_tmu_tx_pkts) {
+ u32 tmu_tx_pkts = be32_to_cpu(pe_dmem_read(TMU0_ID +
+ client->id, TMU_DM_TX_TRANS, 4));
+
+ if (queue->prev_tmu_tx_pkts > tmu_tx_pkts)
+ queue->done_tmu_tx_pkts = UINT_MAX -
+ queue->prev_tmu_tx_pkts + tmu_tx_pkts;
+ else
+ queue->done_tmu_tx_pkts = tmu_tx_pkts -
+ queue->prev_tmu_tx_pkts;
+
+ queue->prev_tmu_tx_pkts = tmu_tx_pkts;
+
+ if (!queue->done_tmu_tx_pkts)
+ return NULL;
+ }
+
+ if (desc->ctrl & CL_DESC_OWN)
+ return NULL;
+
+ queue->read_idx = (queue->read_idx + 1) & (queue->size - 1);
+ queue->tx_pending--;
+
+ *flags = CL_DESC_GET_FLAGS(desc->ctrl);
+
+ if (queue->done_tmu_tx_pkts && (*flags & HIF_LAST_BUFFER))
+ queue->done_tmu_tx_pkts--;
+
+ return desc->data;
+}
+
+static void hif_lib_tmu_credit_init(struct pfe *pfe)
+{
+ int i, q;
+
+ for (i = 0; i < NUM_GEMAC_SUPPORT; i++)
+ for (q = 0; q < emac_txq_cnt; q++) {
+ pfe->tmu_credit.tx_credit_max[i][q] = (q == 0) ?
+ DEFAULT_Q0_QDEPTH : DEFAULT_MAX_QDEPTH;
+ pfe->tmu_credit.tx_credit[i][q] =
+ pfe->tmu_credit.tx_credit_max[i][q];
+ }
+}
+
+/* __hif_lib_update_credit
+ *
+ * @param[in] client hif client context
+ * @param[in] queue queue number in match with TMU
+ */
+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue)
+{
+ unsigned int tmu_tx_packets, tmp;
+
+ if (tx_qos) {
+ tmu_tx_packets = be32_to_cpu(pe_dmem_read(TMU0_ID +
+ client->id, (TMU_DM_TX_TRANS + (queue * 4)), 4));
+
+ /* tx_packets counter overflowed */
+ if (tmu_tx_packets >
+ pfe->tmu_credit.tx_packets[client->id][queue]) {
+ tmp = UINT_MAX - tmu_tx_packets +
+ pfe->tmu_credit.tx_packets[client->id][queue];
+
+ pfe->tmu_credit.tx_credit[client->id][queue] =
+ pfe->tmu_credit.tx_credit_max[client->id][queue] - tmp;
+ } else {
+ /* TMU tx <= pfe_eth tx, normal case or both OF since
+ * last time
+ */
+ pfe->tmu_credit.tx_credit[client->id][queue] =
+ pfe->tmu_credit.tx_credit_max[client->id][queue] -
+ (pfe->tmu_credit.tx_packets[client->id][queue] -
+ tmu_tx_packets);
+ }
+ }
+}
+
+int pfe_hif_lib_init(struct pfe *pfe)
+{
+ int rc;
+
+ pr_info("%s\n", __func__);
+
+ if (lro_mode) {
+ page_mode = 1;
+ pfe_pkt_size = min(PAGE_SIZE, MAX_PFE_PKT_SIZE);
+ pfe_pkt_headroom = 0;
+ } else {
+ page_mode = 0;
+ pfe_pkt_size = PFE_PKT_SIZE;
+ pfe_pkt_headroom = PFE_PKT_HEADROOM;
+ }
+
+ if (tx_qos)
+ emac_txq_cnt = EMAC_TXQ_CNT / 2;
+ else
+ emac_txq_cnt = EMAC_TXQ_CNT;
+
+ hif_lib_tmu_credit_init(pfe);
+ pfe->hif.shm = &ghif_shm;
+ rc = pfe_hif_shm_init(pfe->hif.shm);
+
+ return rc;
+}
+
+void pfe_hif_lib_exit(struct pfe *pfe)
+{
+ pr_info("%s\n", __func__);
+
+ pfe_hif_shm_clean(pfe->hif.shm);
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_hif_lib.h b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
new file mode 100644
index 000000000000..29a38baa2561
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hif_lib.h
@@ -0,0 +1,229 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_HIF_LIB_H_
+#define _PFE_HIF_LIB_H_
+
+#include "pfe_hif.h"
+
+#define HIF_CL_REQ_TIMEOUT 10
+#define GFP_DMA_PFE 0
+#define PFE_PARSE_INFO_SIZE 16
+
+enum {
+ REQUEST_CL_REGISTER = 0,
+ REQUEST_CL_UNREGISTER,
+ HIF_REQUEST_MAX
+};
+
+enum {
+ /* Event to indicate that client rx queue is reached water mark level */
+ EVENT_HIGH_RX_WM = 0,
+ /* Event to indicate that, packet received for client */
+ EVENT_RX_PKT_IND,
+ /* Event to indicate that, packet tx done for client */
+ EVENT_TXDONE_IND,
+ HIF_EVENT_MAX
+};
+
+/*structure to store client queue info */
+
+/*structure to store client queue info */
+struct hif_client_rx_queue {
+ struct rx_queue_desc *base;
+ u32 size;
+ u32 read_idx;
+ u32 write_idx;
+};
+
+struct hif_client_tx_queue {
+ struct tx_queue_desc *base;
+ u32 size;
+ u32 read_idx;
+ u32 write_idx;
+ u32 tx_pending;
+ unsigned long jiffies_last_packet;
+ u32 nocpy_flag;
+ u32 prev_tmu_tx_pkts;
+ u32 done_tmu_tx_pkts;
+};
+
+struct hif_client_s {
+ int id;
+ int tx_qn;
+ int rx_qn;
+ void *rx_qbase;
+ void *tx_qbase;
+ int tx_qsize;
+ int rx_qsize;
+ int cpu_id;
+ struct hif_client_tx_queue tx_q[HIF_CLIENT_QUEUES_MAX];
+ struct hif_client_rx_queue rx_q[HIF_CLIENT_QUEUES_MAX];
+ int (*event_handler)(void *priv, int event, int data);
+ unsigned long queue_mask[HIF_EVENT_MAX];
+ struct pfe *pfe;
+ void *priv;
+};
+
+/*
+ * Client specific shared memory
+ * It contains number of Rx/Tx queues, base addresses and queue sizes
+ */
+struct hif_client_shm {
+ u32 ctrl; /*0-7: number of Rx queues, 8-15: number of tx queues */
+ unsigned long rx_qbase; /*Rx queue base address */
+ u32 rx_qsize; /*each Rx queue size, all Rx queues are of same size */
+ unsigned long tx_qbase; /* Tx queue base address */
+ u32 tx_qsize; /*each Tx queue size, all Tx queues are of same size */
+};
+
+/*Client shared memory ctrl bit description */
+#define CLIENT_CTRL_RX_Q_CNT_OFST 0
+#define CLIENT_CTRL_TX_Q_CNT_OFST 8
+#define CLIENT_CTRL_RX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_RX_Q_CNT_OFST) \
+ & 0xFF)
+#define CLIENT_CTRL_TX_Q_CNT(ctrl) (((ctrl) >> CLIENT_CTRL_TX_Q_CNT_OFST) \
+ & 0xFF)
+
+/*
+ * Shared memory used to communicate between HIF driver and host/client drivers
+ * Before starting the hif driver rx_buf_pool ans rx_buf_pool_cnt should be
+ * initialized with host buffers and buffers count in the pool.
+ * rx_buf_pool_cnt should be >= HIF_RX_DESC_NT.
+ *
+ */
+struct hif_shm {
+ u32 rx_buf_pool_cnt; /*Number of rx buffers available*/
+ /*Rx buffers required to initialize HIF rx descriptors */
+ void *rx_buf_pool[HIF_RX_DESC_NT];
+ unsigned long g_client_status[2]; /*Global client status bit mask */
+ /* Client specific shared memory */
+ struct hif_client_shm client[HIF_CLIENTS_MAX];
+};
+
+#define CL_DESC_OWN BIT(31)
+/* This sets owner ship to HIF driver */
+#define CL_DESC_LAST BIT(30)
+/* This indicates last packet for multi buffers handling */
+#define CL_DESC_FIRST BIT(29)
+/* This indicates first packet for multi buffers handling */
+
+#define CL_DESC_BUF_LEN(x) ((x) & 0xFFFF)
+#define CL_DESC_FLAGS(x) (((x) & 0xF) << 16)
+#define CL_DESC_GET_FLAGS(x) (((x) >> 16) & 0xF)
+
+struct rx_queue_desc {
+ void *data;
+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
+ u32 client_ctrl;
+};
+
+struct tx_queue_desc {
+ void *data;
+ u32 ctrl; /*0-15bit len, 16-20bit flags, 31bit owner*/
+};
+
+/* HIF Rx is not working properly for 2-byte aligned buffers and
+ * ip_header should be 4byte aligned for better iperformance.
+ * "ip_header = 64 + 6(hif_header) + 14 (MAC Header)" will be 4byte aligned.
+ */
+#define PFE_PKT_HEADER_SZ sizeof(struct hif_hdr)
+/* must be big enough for headroom, pkt size and skb shared info */
+#define PFE_BUF_SIZE 2048
+#define PFE_PKT_HEADROOM 128
+
+#define SKB_SHARED_INFO_SIZE SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+#define PFE_PKT_SIZE (PFE_BUF_SIZE - PFE_PKT_HEADROOM \
+ - SKB_SHARED_INFO_SIZE)
+#define MAX_L2_HDR_SIZE 14 /* Not correct for VLAN/PPPoE */
+#define MAX_L3_HDR_SIZE 20 /* Not correct for IPv6 */
+#define MAX_L4_HDR_SIZE 60 /* TCP with maximum options */
+#define MAX_HDR_SIZE (MAX_L2_HDR_SIZE + MAX_L3_HDR_SIZE \
+ + MAX_L4_HDR_SIZE)
+/* Used in page mode to clamp packet size to the maximum supported by the hif
+ *hw interface (<16KiB)
+ */
+#define MAX_PFE_PKT_SIZE 16380UL
+
+extern unsigned int pfe_pkt_size;
+extern unsigned int pfe_pkt_headroom;
+extern unsigned int page_mode;
+extern unsigned int lro_mode;
+extern unsigned int tx_qos;
+extern unsigned int emac_txq_cnt;
+
+int pfe_hif_lib_init(struct pfe *pfe);
+void pfe_hif_lib_exit(struct pfe *pfe);
+int hif_lib_client_register(struct hif_client_s *client);
+int hif_lib_client_unregister(struct hif_client_s *client);
+void __hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void
+ *data, unsigned int len, u32 client_ctrl,
+ unsigned int flags, void *client_data);
+int hif_lib_xmit_pkt(struct hif_client_s *client, unsigned int qno, void *data,
+ unsigned int len, u32 client_ctrl, void *client_data);
+void hif_lib_indicate_client(int cl_id, int event, int data);
+int hif_lib_event_handler_start(struct hif_client_s *client, int event, int
+ data);
+int hif_lib_tmu_queue_start(struct hif_client_s *client, int qno);
+int hif_lib_tmu_queue_stop(struct hif_client_s *client, int qno);
+void *hif_lib_tx_get_next_complete(struct hif_client_s *client, int qno,
+ unsigned int *flags, int count);
+void *hif_lib_receive_pkt(struct hif_client_s *client, int qno, int *len, int
+ *ofst, unsigned int *rx_ctrl,
+ unsigned int *desc_ctrl, void **priv_data);
+void __hif_lib_update_credit(struct hif_client_s *client, unsigned int queue);
+void hif_lib_set_rx_cpu_affinity(struct hif_client_s *client, int cpu_id);
+void hif_lib_set_tx_queue_nocpy(struct hif_client_s *client, int qno, int
+ enable);
+static inline int hif_lib_tx_avail(struct hif_client_s *client, unsigned int
+ qno)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+
+ return (queue->size - queue->tx_pending);
+}
+
+static inline int hif_lib_get_tx_wr_index(struct hif_client_s *client, unsigned
+ int qno)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+
+ return queue->write_idx;
+}
+
+static inline int hif_lib_tx_pending(struct hif_client_s *client, unsigned int
+ qno)
+{
+ struct hif_client_tx_queue *queue = &client->tx_q[qno];
+
+ return queue->tx_pending;
+}
+
+#define hif_lib_tx_credit_avail(pfe, id, qno) \
+ ((pfe)->tmu_credit.tx_credit[id][qno])
+
+#define hif_lib_tx_credit_max(pfe, id, qno) \
+ ((pfe)->tmu_credit.tx_credit_max[id][qno])
+
+/*
+ * Test comment
+ */
+#define hif_lib_tx_credit_use(pfe, id, qno, credit) \
+ ({ typeof(pfe) pfe_ = pfe; \
+ typeof(id) id_ = id; \
+ typeof(qno) qno_ = qno_; \
+ typeof(credit) credit_ = credit; \
+ do { \
+ if (tx_qos) { \
+ (pfe_)->tmu_credit.tx_credit[id_][qno_]\
+ -= credit_; \
+ (pfe_)->tmu_credit.tx_packets[id_][qno_]\
+ += credit_; \
+ } \
+ } while (0); \
+ })
+
+#endif /* _PFE_HIF_LIB_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_hw.c b/drivers/staging/fsl_ppfe/pfe_hw.c
new file mode 100644
index 000000000000..a881b4389dc5
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hw.c
@@ -0,0 +1,164 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include "pfe_mod.h"
+#include "pfe_hw.h"
+
+/* Functions to handle most of pfe hw register initialization */
+int pfe_hw_init(struct pfe *pfe, int resume)
+{
+ struct class_cfg class_cfg = {
+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
+ .route_table_baseaddr = pfe->ddr_phys_baseaddr +
+ ROUTE_TABLE_BASEADDR,
+ .route_table_hash_bits = ROUTE_TABLE_HASH_BITS,
+ };
+
+ struct tmu_cfg tmu_cfg = {
+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
+ .llm_base_addr = pfe->ddr_phys_baseaddr + TMU_LLM_BASEADDR,
+ .llm_queue_len = TMU_LLM_QUEUE_LEN,
+ };
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ struct util_cfg util_cfg = {
+ .pe_sys_clk_ratio = PE_SYS_CLK_RATIO,
+ };
+#endif
+
+ struct BMU_CFG bmu1_cfg = {
+ .baseaddr = CBUS_VIRT_TO_PFE(LMEM_BASE_ADDR +
+ BMU1_LMEM_BASEADDR),
+ .count = BMU1_BUF_COUNT,
+ .size = BMU1_BUF_SIZE,
+ .low_watermark = 10,
+ .high_watermark = 15,
+ };
+
+ struct BMU_CFG bmu2_cfg = {
+ .baseaddr = DDR_PHYS_TO_PFE(pfe->ddr_phys_baseaddr +
+ BMU2_DDR_BASEADDR),
+ .count = BMU2_BUF_COUNT,
+ .size = BMU2_BUF_SIZE,
+ .low_watermark = 250,
+ .high_watermark = 253,
+ };
+
+ struct gpi_cfg egpi1_cfg = {
+ .lmem_rtry_cnt = EGPI1_LMEM_RTRY_CNT,
+ .tmlf_txthres = EGPI1_TMLF_TXTHRES,
+ .aseq_len = EGPI1_ASEQ_LEN,
+ .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC1_BASE_ADDR +
+ EMAC_TCNTRL_REG),
+ };
+
+ struct gpi_cfg egpi2_cfg = {
+ .lmem_rtry_cnt = EGPI2_LMEM_RTRY_CNT,
+ .tmlf_txthres = EGPI2_TMLF_TXTHRES,
+ .aseq_len = EGPI2_ASEQ_LEN,
+ .mtip_pause_reg = CBUS_VIRT_TO_PFE(EMAC2_BASE_ADDR +
+ EMAC_TCNTRL_REG),
+ };
+
+ struct gpi_cfg hgpi_cfg = {
+ .lmem_rtry_cnt = HGPI_LMEM_RTRY_CNT,
+ .tmlf_txthres = HGPI_TMLF_TXTHRES,
+ .aseq_len = HGPI_ASEQ_LEN,
+ .mtip_pause_reg = 0,
+ };
+
+ pr_info("%s\n", __func__);
+
+#if !defined(LS1012A_PFE_RESET_WA)
+ /* LS1012A needs this to make PE work correctly */
+ writel(0x3, CLASS_PE_SYS_CLK_RATIO);
+ writel(0x3, TMU_PE_SYS_CLK_RATIO);
+ writel(0x3, UTIL_PE_SYS_CLK_RATIO);
+ usleep_range(10, 20);
+#endif
+
+ pr_info("CLASS version: %x\n", readl(CLASS_VERSION));
+ pr_info("TMU version: %x\n", readl(TMU_VERSION));
+
+ pr_info("BMU1 version: %x\n", readl(BMU1_BASE_ADDR +
+ BMU_VERSION));
+ pr_info("BMU2 version: %x\n", readl(BMU2_BASE_ADDR +
+ BMU_VERSION));
+
+ pr_info("EGPI1 version: %x\n", readl(EGPI1_BASE_ADDR +
+ GPI_VERSION));
+ pr_info("EGPI2 version: %x\n", readl(EGPI2_BASE_ADDR +
+ GPI_VERSION));
+ pr_info("HGPI version: %x\n", readl(HGPI_BASE_ADDR +
+ GPI_VERSION));
+
+ pr_info("HIF version: %x\n", readl(HIF_VERSION));
+ pr_info("HIF NOPCY version: %x\n", readl(HIF_NOCPY_VERSION));
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ pr_info("UTIL version: %x\n", readl(UTIL_VERSION));
+#endif
+ while (!(readl(TMU_CTRL) & ECC_MEM_INIT_DONE))
+ ;
+
+ hif_rx_disable();
+ hif_tx_disable();
+
+ bmu_init(BMU1_BASE_ADDR, &bmu1_cfg);
+
+ pr_info("bmu_init(1) done\n");
+
+ bmu_init(BMU2_BASE_ADDR, &bmu2_cfg);
+
+ pr_info("bmu_init(2) done\n");
+
+ class_cfg.resume = resume ? 1 : 0;
+
+ class_init(&class_cfg);
+
+ pr_info("class_init() done\n");
+
+ tmu_init(&tmu_cfg);
+
+ pr_info("tmu_init() done\n");
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ util_init(&util_cfg);
+
+ pr_info("util_init() done\n");
+#endif
+ gpi_init(EGPI1_BASE_ADDR, &egpi1_cfg);
+
+ pr_info("gpi_init(1) done\n");
+
+ gpi_init(EGPI2_BASE_ADDR, &egpi2_cfg);
+
+ pr_info("gpi_init(2) done\n");
+
+ gpi_init(HGPI_BASE_ADDR, &hgpi_cfg);
+
+ pr_info("gpi_init(hif) done\n");
+
+ bmu_enable(BMU1_BASE_ADDR);
+
+ pr_info("bmu_enable(1) done\n");
+
+ bmu_enable(BMU2_BASE_ADDR);
+
+ pr_info("bmu_enable(2) done\n");
+
+ return 0;
+}
+
+void pfe_hw_exit(struct pfe *pfe)
+{
+ pr_info("%s\n", __func__);
+
+ bmu_disable(BMU1_BASE_ADDR);
+ bmu_reset(BMU1_BASE_ADDR);
+
+ bmu_disable(BMU2_BASE_ADDR);
+ bmu_reset(BMU2_BASE_ADDR);
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_hw.h b/drivers/staging/fsl_ppfe/pfe_hw.h
new file mode 100644
index 000000000000..b3f06196bbd0
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_hw.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_HW_H_
+#define _PFE_HW_H_
+
+#define PE_SYS_CLK_RATIO 1 /* SYS/AXI = 250MHz, HFE = 500MHz */
+
+int pfe_hw_init(struct pfe *pfe, int resume);
+void pfe_hw_exit(struct pfe *pfe);
+
+#endif /* _PFE_HW_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
new file mode 100644
index 000000000000..fd2b0e353d8b
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_ls1012a_platform.c
@@ -0,0 +1,383 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#include "pfe_mod.h"
+
+extern bool pfe_use_old_dts_phy;
+struct ls1012a_pfe_platform_data pfe_platform_data;
+
+static int pfe_get_gemac_if_properties(struct device_node *gem,
+ int port,
+ struct ls1012a_pfe_platform_data *pdata)
+{
+ struct device_node *phy_node = NULL;
+ int size;
+ int phy_id = 0;
+ const u32 *addr;
+ const u8 *mac_addr;
+
+ addr = of_get_property(gem, "reg", &size);
+ if (addr)
+ port = be32_to_cpup(addr);
+ else
+ goto err;
+
+
+ pdata->ls1012a_eth_pdata[port].gem_id = port;
+
+ mac_addr = of_get_mac_address(gem);
+ if (!IS_ERR_OR_NULL(mac_addr)) {
+ memcpy(pdata->ls1012a_eth_pdata[port].mac_addr, mac_addr,
+ ETH_ALEN);
+ }
+
+ phy_node = of_parse_phandle(gem, "phy-handle", 0);
+ pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
+ if (phy_node) {
+ pfe_use_old_dts_phy = false;
+ goto process_phynode;
+ } else if (of_phy_is_fixed_link(gem)) {
+ pfe_use_old_dts_phy = false;
+ if (of_phy_register_fixed_link(gem) < 0) {
+ pr_err("broken fixed-link specification\n");
+ goto err;
+ }
+ phy_node = of_node_get(gem);
+ pdata->ls1012a_eth_pdata[port].phy_node = phy_node;
+ } else if (of_get_property(gem, "fsl,pfe-phy-if-flags", &size)) {
+ pfe_use_old_dts_phy = true;
+ /* Use old dts properties for phy handling */
+ addr = of_get_property(gem, "fsl,pfe-phy-if-flags", &size);
+ pdata->ls1012a_eth_pdata[port].phy_flags = be32_to_cpup(addr);
+
+ addr = of_get_property(gem, "fsl,gemac-phy-id", &size);
+ if (!addr) {
+ pr_err("%s:%d Invalid gemac-phy-id....\n", __func__,
+ __LINE__);
+ } else {
+ phy_id = be32_to_cpup(addr);
+ pdata->ls1012a_eth_pdata[port].phy_id = phy_id;
+ pdata->ls1012a_mdio_pdata[0].phy_mask &= ~(1 << phy_id);
+ }
+
+ /* If PHY is enabled, read mdio properties */
+ if (pdata->ls1012a_eth_pdata[port].phy_flags & GEMAC_NO_PHY)
+ goto done;
+
+ } else {
+ pr_info("%s: No PHY or fixed-link\n", __func__);
+ return 0;
+ }
+
+process_phynode:
+ pdata->ls1012a_eth_pdata[port].mii_config = of_get_phy_mode(gem);
+ if ((pdata->ls1012a_eth_pdata[port].mii_config) < 0)
+ pr_err("%s:%d Incorrect Phy mode....\n", __func__,
+ __LINE__);
+
+ addr = of_get_property(gem, "fsl,mdio-mux-val", &size);
+ if (!addr) {
+ pr_err("%s: Invalid mdio-mux-val....\n", __func__);
+ } else {
+ phy_id = be32_to_cpup(addr);
+ pdata->ls1012a_eth_pdata[port].mdio_muxval = phy_id;
+ }
+
+ if (pdata->ls1012a_eth_pdata[port].phy_id < 32)
+ pfe->mdio_muxval[pdata->ls1012a_eth_pdata[port].phy_id] =
+ pdata->ls1012a_eth_pdata[port].mdio_muxval;
+
+
+ pdata->ls1012a_mdio_pdata[port].irq[0] = PHY_POLL;
+
+done:
+ return 0;
+
+err:
+ return -1;
+}
+
+/*
+ *
+ * pfe_platform_probe -
+ *
+ *
+ */
+static int pfe_platform_probe(struct platform_device *pdev)
+{
+ struct resource res;
+ int ii, rc, interface_count = 0, size = 0;
+ const u32 *prop;
+ struct device_node *np, *gem = NULL;
+ struct clk *pfe_clk;
+
+ np = pdev->dev.of_node;
+
+ if (!np) {
+ pr_err("Invalid device node\n");
+ return -EINVAL;
+ }
+
+ pfe = kzalloc(sizeof(*pfe), GFP_KERNEL);
+ if (!pfe) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ platform_set_drvdata(pdev, pfe);
+
+ dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+
+ if (of_address_to_resource(np, 1, &res)) {
+ rc = -ENOMEM;
+ pr_err("failed to get ddr resource\n");
+ goto err_ddr;
+ }
+
+ pfe->ddr_phys_baseaddr = res.start;
+ pfe->ddr_size = resource_size(&res);
+
+ pfe->ddr_baseaddr = memremap(res.start, resource_size(&res),
+ MEMREMAP_WB);
+ if (!pfe->ddr_baseaddr) {
+ pr_err("memremap() ddr failed\n");
+ rc = -ENOMEM;
+ goto err_ddr;
+ }
+
+ pfe->scfg =
+ syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "fsl,pfe-scfg");
+ if (IS_ERR(pfe->scfg)) {
+ dev_err(&pdev->dev, "No syscfg phandle specified\n");
+ return PTR_ERR(pfe->scfg);
+ }
+
+ pfe->cbus_baseaddr = of_iomap(np, 0);
+ if (!pfe->cbus_baseaddr) {
+ rc = -ENOMEM;
+ pr_err("failed to get axi resource\n");
+ goto err_axi;
+ }
+
+ pfe->hif_irq = platform_get_irq(pdev, 0);
+ if (pfe->hif_irq < 0) {
+ pr_err("platform_get_irq for hif failed\n");
+ rc = pfe->hif_irq;
+ goto err_hif_irq;
+ }
+
+ pfe->wol_irq = platform_get_irq(pdev, 2);
+ if (pfe->wol_irq < 0) {
+ pr_err("platform_get_irq for WoL failed\n");
+ rc = pfe->wol_irq;
+ goto err_hif_irq;
+ }
+
+ /* Read interface count */
+ prop = of_get_property(np, "fsl,pfe-num-interfaces", &size);
+ if (!prop) {
+ pr_err("Failed to read number of interfaces\n");
+ rc = -ENXIO;
+ goto err_prop;
+ }
+
+ interface_count = be32_to_cpup(prop);
+ if (interface_count <= 0) {
+ pr_err("No ethernet interface count : %d\n",
+ interface_count);
+ rc = -ENXIO;
+ goto err_prop;
+ }
+
+ pfe_platform_data.ls1012a_mdio_pdata[0].phy_mask = 0xffffffff;
+
+ for (ii = 0; ii < interface_count; ii++) {
+ gem = of_get_next_child(np, gem);
+ if (gem)
+ pfe_get_gemac_if_properties(gem, ii,
+ &pfe_platform_data);
+ else
+ pr_err("Unable to find interface %d\n", ii);
+
+ }
+
+ pfe->dev = &pdev->dev;
+
+ pfe->dev->platform_data = &pfe_platform_data;
+
+ /* declare WoL capabilities */
+ device_init_wakeup(&pdev->dev, true);
+
+ /* find the clocks */
+ pfe_clk = devm_clk_get(pfe->dev, "pfe");
+ if (IS_ERR(pfe_clk))
+ return PTR_ERR(pfe_clk);
+
+ /* PFE clock is (platform clock / 2) */
+ /* save sys_clk value as KHz */
+ pfe->ctrl.sys_clk = clk_get_rate(pfe_clk) / (2 * 1000);
+
+ rc = pfe_probe(pfe);
+ if (rc < 0)
+ goto err_probe;
+
+ return 0;
+
+err_probe:
+err_prop:
+err_hif_irq:
+ iounmap(pfe->cbus_baseaddr);
+
+err_axi:
+ memunmap(pfe->ddr_baseaddr);
+
+err_ddr:
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(pfe);
+
+err_alloc:
+ return rc;
+}
+
+/*
+ * pfe_platform_remove -
+ */
+static int pfe_platform_remove(struct platform_device *pdev)
+{
+ struct pfe *pfe = platform_get_drvdata(pdev);
+ int rc;
+
+ pr_info("%s\n", __func__);
+
+ rc = pfe_remove(pfe);
+
+ iounmap(pfe->cbus_baseaddr);
+
+ memunmap(pfe->ddr_baseaddr);
+
+ platform_set_drvdata(pdev, NULL);
+
+ kfree(pfe);
+
+ return rc;
+}
+
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+int pfe_platform_suspend(struct device *dev)
+{
+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
+ struct net_device *netdev;
+ int i;
+
+ pfe->wake = 0;
+
+ for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
+ netdev = pfe->eth.eth_priv[i]->ndev;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ if (pfe_eth_suspend(netdev))
+ pfe->wake = 1;
+ }
+
+ /* Shutdown PFE only if we're not waking up the system */
+ if (!pfe->wake) {
+#if defined(LS1012A_PFE_RESET_WA)
+ pfe_hif_rx_idle(&pfe->hif);
+#endif
+ pfe_ctrl_suspend(&pfe->ctrl);
+ pfe_firmware_exit(pfe);
+
+ pfe_hif_exit(pfe);
+ pfe_hif_lib_exit(pfe);
+
+ pfe_hw_exit(pfe);
+ }
+
+ return 0;
+}
+
+static int pfe_platform_resume(struct device *dev)
+{
+ struct pfe *pfe = platform_get_drvdata(to_platform_device(dev));
+ struct net_device *netdev;
+ int i;
+
+ if (!pfe->wake) {
+ pfe_hw_init(pfe, 1);
+ pfe_hif_lib_init(pfe);
+ pfe_hif_init(pfe);
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ util_enable();
+#endif
+ tmu_enable(0xf);
+ class_enable();
+ pfe_ctrl_resume(&pfe->ctrl);
+ }
+
+ for (i = 0; i < (NUM_GEMAC_SUPPORT); i++) {
+ netdev = pfe->eth.eth_priv[i]->ndev;
+
+ if (pfe->mdio.mdio_priv[i]->mii_bus)
+ pfe_eth_mdio_reset(pfe->mdio.mdio_priv[i]->mii_bus);
+
+ if (netif_running(netdev))
+ pfe_eth_resume(netdev);
+
+ netif_device_attach(netdev);
+ }
+ return 0;
+}
+#else
+#define pfe_platform_suspend NULL
+#define pfe_platform_resume NULL
+#endif
+
+static const struct dev_pm_ops pfe_platform_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pfe_platform_suspend, pfe_platform_resume)
+};
+#endif
+
+static const struct of_device_id pfe_match[] = {
+ {
+ .compatible = "fsl,pfe",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, pfe_match);
+
+static struct platform_driver pfe_platform_driver = {
+ .probe = pfe_platform_probe,
+ .remove = pfe_platform_remove,
+ .driver = {
+ .name = "pfe",
+ .of_match_table = pfe_match,
+#ifdef CONFIG_PM
+ .pm = &pfe_platform_pm_ops,
+#endif
+ },
+};
+
+module_platform_driver(pfe_platform_driver);
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("PFE Ethernet driver");
+MODULE_AUTHOR("NXP DNCPE");
diff --git a/drivers/staging/fsl_ppfe/pfe_mod.c b/drivers/staging/fsl_ppfe/pfe_mod.c
new file mode 100644
index 000000000000..f3244673265c
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_mod.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include <linux/dma-mapping.h>
+#include "pfe_mod.h"
+#include "pfe_cdev.h"
+
+unsigned int us;
+module_param(us, uint, 0444);
+MODULE_PARM_DESC(us, "0: module enabled for kernel networking (DEFAULT)\n"
+ "1: module enabled for userspace networking\n");
+struct pfe *pfe;
+
+/*
+ * pfe_probe -
+ */
+int pfe_probe(struct pfe *pfe)
+{
+ int rc;
+
+ if (pfe->ddr_size < DDR_MAX_SIZE) {
+ pr_err("%s: required DDR memory (%x) above platform ddr memory (%x)\n",
+ __func__, (unsigned int)DDR_MAX_SIZE, pfe->ddr_size);
+ rc = -ENOMEM;
+ goto err_hw;
+ }
+
+ if (((int)(pfe->ddr_phys_baseaddr + BMU2_DDR_BASEADDR) &
+ (8 * SZ_1M - 1)) != 0) {
+ pr_err("%s: BMU2 base address (0x%x) must be aligned on 8MB boundary\n",
+ __func__, (int)pfe->ddr_phys_baseaddr +
+ BMU2_DDR_BASEADDR);
+ rc = -ENOMEM;
+ goto err_hw;
+ }
+
+ pr_info("cbus_baseaddr: %lx, ddr_baseaddr: %lx, ddr_phys_baseaddr: %lx, ddr_size: %x\n",
+ (unsigned long)pfe->cbus_baseaddr,
+ (unsigned long)pfe->ddr_baseaddr,
+ pfe->ddr_phys_baseaddr, pfe->ddr_size);
+
+ pfe_lib_init(pfe->cbus_baseaddr, pfe->ddr_baseaddr,
+ pfe->ddr_phys_baseaddr, pfe->ddr_size);
+
+ rc = pfe_hw_init(pfe, 0);
+ if (rc < 0)
+ goto err_hw;
+
+ if (us)
+ goto firmware_init;
+
+ rc = pfe_hif_lib_init(pfe);
+ if (rc < 0)
+ goto err_hif_lib;
+
+ rc = pfe_hif_init(pfe);
+ if (rc < 0)
+ goto err_hif;
+
+firmware_init:
+ rc = pfe_firmware_init(pfe);
+ if (rc < 0)
+ goto err_firmware;
+
+ rc = pfe_ctrl_init(pfe);
+ if (rc < 0)
+ goto err_ctrl;
+
+ rc = pfe_eth_init(pfe);
+ if (rc < 0)
+ goto err_eth;
+
+ rc = pfe_sysfs_init(pfe);
+ if (rc < 0)
+ goto err_sysfs;
+
+ rc = pfe_debugfs_init(pfe);
+ if (rc < 0)
+ goto err_debugfs;
+
+ if (us) {
+ /* Creating a character device */
+ rc = pfe_cdev_init();
+ if (rc < 0)
+ goto err_cdev;
+ }
+
+ return 0;
+
+err_cdev:
+ pfe_debugfs_exit(pfe);
+
+err_debugfs:
+ pfe_sysfs_exit(pfe);
+
+err_sysfs:
+ pfe_eth_exit(pfe);
+
+err_eth:
+ pfe_ctrl_exit(pfe);
+
+err_ctrl:
+ pfe_firmware_exit(pfe);
+
+err_firmware:
+ if (us)
+ goto err_hif_lib;
+
+ pfe_hif_exit(pfe);
+
+err_hif:
+ pfe_hif_lib_exit(pfe);
+
+err_hif_lib:
+ pfe_hw_exit(pfe);
+
+err_hw:
+ return rc;
+}
+
+/*
+ * pfe_remove -
+ */
+int pfe_remove(struct pfe *pfe)
+{
+ pr_info("%s\n", __func__);
+
+ if (us)
+ pfe_cdev_exit();
+
+ pfe_debugfs_exit(pfe);
+
+ pfe_sysfs_exit(pfe);
+
+ pfe_eth_exit(pfe);
+
+ pfe_ctrl_exit(pfe);
+
+#if defined(LS1012A_PFE_RESET_WA)
+ pfe_hif_rx_idle(&pfe->hif);
+#endif
+ pfe_firmware_exit(pfe);
+
+ if (us)
+ goto hw_exit;
+
+ pfe_hif_exit(pfe);
+
+ pfe_hif_lib_exit(pfe);
+
+hw_exit:
+ pfe_hw_exit(pfe);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_mod.h b/drivers/staging/fsl_ppfe/pfe_mod.h
new file mode 100644
index 000000000000..afe3964a3c1b
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_mod.h
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_MOD_H_
+#define _PFE_MOD_H_
+
+#include <linux/device.h>
+#include <linux/elf.h>
+
+extern unsigned int us;
+
+struct pfe;
+
+#include "pfe_hw.h"
+#include "pfe_firmware.h"
+#include "pfe_ctrl.h"
+#include "pfe_hif.h"
+#include "pfe_hif_lib.h"
+#include "pfe_eth.h"
+#include "pfe_sysfs.h"
+#include "pfe_perfmon.h"
+#include "pfe_debugfs.h"
+
+#define PHYID_MAX_VAL 32
+
+struct pfe_tmu_credit {
+ /* Number of allowed TX packet in-flight, matches TMU queue size */
+ unsigned int tx_credit[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
+ unsigned int tx_credit_max[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
+ unsigned int tx_packets[NUM_GEMAC_SUPPORT][EMAC_TXQ_CNT];
+};
+
+struct pfe {
+ struct regmap *scfg;
+ unsigned long ddr_phys_baseaddr;
+ void *ddr_baseaddr;
+ unsigned int ddr_size;
+ void *cbus_baseaddr;
+ void *apb_baseaddr;
+ unsigned long iram_phys_baseaddr;
+ void *iram_baseaddr;
+ unsigned long ipsec_phys_baseaddr;
+ void *ipsec_baseaddr;
+ int hif_irq;
+ int wol_irq;
+ int hif_client_irq;
+ struct device *dev;
+ struct dentry *dentry;
+ struct pfe_ctrl ctrl;
+ struct pfe_hif hif;
+ struct pfe_eth eth;
+ struct pfe_mdio mdio;
+ struct hif_client_s *hif_client[HIF_CLIENTS_MAX];
+#if defined(CFG_DIAGS)
+ struct pfe_diags diags;
+#endif
+ struct pfe_tmu_credit tmu_credit;
+ struct pfe_cpumon cpumon;
+ struct pfe_memmon memmon;
+ int wake;
+ int mdio_muxval[PHYID_MAX_VAL];
+ struct clk *hfe_clock;
+};
+
+extern struct pfe *pfe;
+
+int pfe_probe(struct pfe *pfe);
+int pfe_remove(struct pfe *pfe);
+
+/* DDR Mapping in reserved memory*/
+#define ROUTE_TABLE_BASEADDR 0
+#define ROUTE_TABLE_HASH_BITS 15 /* 32K entries */
+#define ROUTE_TABLE_SIZE ((1 << ROUTE_TABLE_HASH_BITS) \
+ * CLASS_ROUTE_SIZE)
+#define BMU2_DDR_BASEADDR (ROUTE_TABLE_BASEADDR + ROUTE_TABLE_SIZE)
+#define BMU2_BUF_COUNT (4096 - 256)
+/* This is to get a total DDR size of 12MiB */
+#define BMU2_DDR_SIZE (DDR_BUF_SIZE * BMU2_BUF_COUNT)
+#define UTIL_CODE_BASEADDR (BMU2_DDR_BASEADDR + BMU2_DDR_SIZE)
+#define UTIL_CODE_SIZE (128 * SZ_1K)
+#define UTIL_DDR_DATA_BASEADDR (UTIL_CODE_BASEADDR + UTIL_CODE_SIZE)
+#define UTIL_DDR_DATA_SIZE (64 * SZ_1K)
+#define CLASS_DDR_DATA_BASEADDR (UTIL_DDR_DATA_BASEADDR + UTIL_DDR_DATA_SIZE)
+#define CLASS_DDR_DATA_SIZE (32 * SZ_1K)
+#define TMU_DDR_DATA_BASEADDR (CLASS_DDR_DATA_BASEADDR + CLASS_DDR_DATA_SIZE)
+#define TMU_DDR_DATA_SIZE (32 * SZ_1K)
+#define TMU_LLM_BASEADDR (TMU_DDR_DATA_BASEADDR + TMU_DDR_DATA_SIZE)
+#define TMU_LLM_QUEUE_LEN (8 * 512)
+/* Must be power of two and at least 16 * 8 = 128 bytes */
+#define TMU_LLM_SIZE (4 * 16 * TMU_LLM_QUEUE_LEN)
+/* (4 TMU's x 16 queues x queue_len) */
+
+#define DDR_MAX_SIZE (TMU_LLM_BASEADDR + TMU_LLM_SIZE)
+
+/* LMEM Mapping */
+#define BMU1_LMEM_BASEADDR 0
+#define BMU1_BUF_COUNT 256
+#define BMU1_LMEM_SIZE (LMEM_BUF_SIZE * BMU1_BUF_COUNT)
+
+#endif /* _PFE_MOD_H */
diff --git a/drivers/staging/fsl_ppfe/pfe_perfmon.h b/drivers/staging/fsl_ppfe/pfe_perfmon.h
new file mode 100644
index 000000000000..afcecfeafd93
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_perfmon.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_PERFMON_H_
+#define _PFE_PERFMON_H_
+
+#include "pfe/pfe.h"
+
+#define CT_CPUMON_INTERVAL (1 * TIMER_TICKS_PER_SEC)
+
+struct pfe_cpumon {
+ u32 cpu_usage_pct[MAX_PE];
+ u32 class_usage_pct;
+};
+
+struct pfe_memmon {
+ u32 kernel_memory_allocated;
+};
+
+int pfe_perfmon_init(struct pfe *pfe);
+void pfe_perfmon_exit(struct pfe *pfe);
+
+#endif /* _PFE_PERFMON_H_ */
diff --git a/drivers/staging/fsl_ppfe/pfe_sysfs.c b/drivers/staging/fsl_ppfe/pfe_sysfs.c
new file mode 100644
index 000000000000..32ae0e12302e
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.c
@@ -0,0 +1,806 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+#include "pfe_mod.h"
+
+#define PE_EXCEPTION_DUMP_ADDRESS 0x1fa8
+#define NUM_QUEUES 16
+
+static char register_name[20][5] = {
+ "EPC", "ECAS", "EID", "ED",
+ "r0", "r1", "r2", "r3",
+ "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15",
+};
+
+static char exception_name[14][20] = {
+ "Reset",
+ "HardwareFailure",
+ "NMI",
+ "InstBreakpoint",
+ "DataBreakpoint",
+ "Unsupported",
+ "PrivilegeViolation",
+ "InstBusError",
+ "DataBusError",
+ "AlignmentError",
+ "ArithmeticError",
+ "SystemCall",
+ "MemoryManagement",
+ "Interrupt",
+};
+
+static unsigned long class_do_clear;
+static unsigned long tmu_do_clear;
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+static unsigned long util_do_clear;
+#endif
+
+static ssize_t display_pe_status(char *buf, int id, u32 dmem_addr, unsigned long
+ do_clear)
+{
+ ssize_t len = 0;
+ u32 val;
+ char statebuf[5];
+ struct pfe_cpumon *cpumon = &pfe->cpumon;
+ u32 debug_indicator;
+ u32 debug[20];
+
+ *(u32 *)statebuf = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+
+ statebuf[4] = '\0';
+ len += sprintf(buf + len, "state=%4s ", statebuf);
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, "ctr=%08x ", cpu_to_be32(val));
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ if (do_clear && val)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ dmem_addr += 4;
+ len += sprintf(buf + len, "rx=%u ", cpu_to_be32(val));
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ if (do_clear && val)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ dmem_addr += 4;
+ if (id >= TMU0_ID && id <= TMU_MAX_ID)
+ len += sprintf(buf + len, "qstatus=%x", cpu_to_be32(val));
+ else
+ len += sprintf(buf + len, "tx=%u", cpu_to_be32(val));
+
+ val = pe_dmem_read(id, dmem_addr, 4);
+ if (do_clear && val)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ dmem_addr += 4;
+ if (val)
+ len += sprintf(buf + len, " drop=%u", cpu_to_be32(val));
+
+ len += sprintf(buf + len, " load=%d%%", cpumon->cpu_usage_pct[id]);
+
+ len += sprintf(buf + len, "\n");
+
+ debug_indicator = pe_dmem_read(id, dmem_addr, 4);
+ dmem_addr += 4;
+ if (!strncmp((char *)&debug_indicator, "DBUG", 4)) {
+ int j, last = 0;
+
+ for (j = 0; j < 16; j++) {
+ debug[j] = pe_dmem_read(id, dmem_addr, 4);
+ if (debug[j]) {
+ if (do_clear)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ last = j + 1;
+ }
+ dmem_addr += 4;
+ }
+ for (j = 0; j < last; j++) {
+ len += sprintf(buf + len, "%08x%s",
+ cpu_to_be32(debug[j]),
+ (j & 0x7) == 0x7 || j == last - 1 ? "\n" : " ");
+ }
+ }
+
+ if (!strncmp(statebuf, "DEAD", 4)) {
+ u32 i, dump = PE_EXCEPTION_DUMP_ADDRESS;
+
+ len += sprintf(buf + len, "Exception details:\n");
+ for (i = 0; i < 20; i++) {
+ debug[i] = pe_dmem_read(id, dump, 4);
+ dump += 4;
+ if (i == 2)
+ len += sprintf(buf + len, "%4s = %08x (=%s) ",
+ register_name[i], cpu_to_be32(debug[i]),
+ exception_name[min((u32)
+ cpu_to_be32(debug[i]), (u32)13)]);
+ else
+ len += sprintf(buf + len, "%4s = %08x%s",
+ register_name[i], cpu_to_be32(debug[i]),
+ (i & 0x3) == 0x3 || i == 19 ? "\n" : " ");
+ }
+ }
+
+ return len;
+}
+
+static ssize_t class_phy_stats(char *buf, int phy)
+{
+ ssize_t len = 0;
+ int off1 = phy * 0x28;
+ int off2 = phy * 0x10;
+
+ if (phy == 3)
+ off1 = CLASS_PHY4_RX_PKTS - CLASS_PHY1_RX_PKTS;
+
+ len += sprintf(buf + len, "phy: %d\n", phy);
+ len += sprintf(buf + len,
+ " rx: %10u, tx: %10u, intf: %10u, ipv4: %10u, ipv6: %10u\n",
+ readl(CLASS_PHY1_RX_PKTS + off1),
+ readl(CLASS_PHY1_TX_PKTS + off1),
+ readl(CLASS_PHY1_INTF_MATCH_PKTS + off1),
+ readl(CLASS_PHY1_V4_PKTS + off1),
+ readl(CLASS_PHY1_V6_PKTS + off1));
+
+ len += sprintf(buf + len,
+ " icmp: %10u, igmp: %10u, tcp: %10u, udp: %10u\n",
+ readl(CLASS_PHY1_ICMP_PKTS + off2),
+ readl(CLASS_PHY1_IGMP_PKTS + off2),
+ readl(CLASS_PHY1_TCP_PKTS + off2),
+ readl(CLASS_PHY1_UDP_PKTS + off2));
+
+ len += sprintf(buf + len, " err\n");
+ len += sprintf(buf + len,
+ " lp: %10u, intf: %10u, l3: %10u, chcksum: %10u, ttl: %10u\n",
+ readl(CLASS_PHY1_LP_FAIL_PKTS + off1),
+ readl(CLASS_PHY1_INTF_FAIL_PKTS + off1),
+ readl(CLASS_PHY1_L3_FAIL_PKTS + off1),
+ readl(CLASS_PHY1_CHKSUM_ERR_PKTS + off1),
+ readl(CLASS_PHY1_TTL_ERR_PKTS + off1));
+
+ return len;
+}
+
+/* qm_read_drop_stat
+ * This function is used to read the drop statistics from the TMU
+ * hw drop counter. Since the hw counter is always cleared afer
+ * reading, this function maintains the previous drop count, and
+ * adds the new value to it. That value can be retrieved by
+ * passing a pointer to it with the total_drops arg.
+ *
+ * @param tmu TMU number (0 - 3)
+ * @param queue queue number (0 - 15)
+ * @param total_drops pointer to location to store total drops (or NULL)
+ * @param do_reset if TRUE, clear total drops after updating
+ */
+u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset)
+{
+ static u32 qtotal[TMU_MAX_ID + 1][NUM_QUEUES];
+ u32 val;
+
+ writel((tmu << 8) | queue, TMU_TEQ_CTRL);
+ writel((tmu << 8) | queue, TMU_LLM_CTRL);
+ val = readl(TMU_TEQ_DROP_STAT);
+ qtotal[tmu][queue] += val;
+ if (total_drops)
+ *total_drops = qtotal[tmu][queue];
+ if (do_reset)
+ qtotal[tmu][queue] = 0;
+ return val;
+}
+
+static ssize_t tmu_queue_stats(char *buf, int tmu, int queue)
+{
+ ssize_t len = 0;
+ u32 drops;
+
+ len += sprintf(buf + len, "%d-%02d, ", tmu, queue);
+
+ drops = qm_read_drop_stat(tmu, queue, NULL, 0);
+
+ /* Select queue */
+ writel((tmu << 8) | queue, TMU_TEQ_CTRL);
+ writel((tmu << 8) | queue, TMU_LLM_CTRL);
+
+ len += sprintf(buf + len,
+ "(teq) drop: %10u, tx: %10u (llm) head: %08x, tail: %08x, drop: %10u\n",
+ drops, readl(TMU_TEQ_TRANS_STAT),
+ readl(TMU_LLM_QUE_HEADPTR), readl(TMU_LLM_QUE_TAILPTR),
+ readl(TMU_LLM_QUE_DROPCNT));
+
+ return len;
+}
+
+static ssize_t tmu_queues(char *buf, int tmu)
+{
+ ssize_t len = 0;
+ int queue;
+
+ for (queue = 0; queue < 16; queue++)
+ len += tmu_queue_stats(buf + len, tmu, queue);
+
+ return len;
+}
+
+static ssize_t block_version(char *buf, void *addr)
+{
+ ssize_t len = 0;
+ u32 val;
+
+ val = readl(addr);
+ len += sprintf(buf + len, "revision: %x, version: %x, id: %x\n",
+ (val >> 24) & 0xff, (val >> 16) & 0xff, val & 0xffff);
+
+ return len;
+}
+
+static ssize_t bmu(char *buf, int id, void *base)
+{
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "%s: %d\n ", __func__, id);
+
+ len += block_version(buf + len, base + BMU_VERSION);
+
+ len += sprintf(buf + len, " buf size: %x\n", (1 << readl(base +
+ BMU_BUF_SIZE)));
+ len += sprintf(buf + len, " buf count: %x\n", readl(base +
+ BMU_BUF_CNT));
+ len += sprintf(buf + len, " buf rem: %x\n", readl(base +
+ BMU_REM_BUF_CNT));
+ len += sprintf(buf + len, " buf curr: %x\n", readl(base +
+ BMU_CURR_BUF_CNT));
+ len += sprintf(buf + len, " free err: %x\n", readl(base +
+ BMU_FREE_ERR_ADDR));
+
+ return len;
+}
+
+static ssize_t gpi(char *buf, int id, void *base)
+{
+ ssize_t len = 0;
+ u32 val;
+
+ len += sprintf(buf + len, "%s%d:\n ", __func__, id);
+ len += block_version(buf + len, base + GPI_VERSION);
+
+ len += sprintf(buf + len, " tx under stick: %x\n", readl(base +
+ GPI_FIFO_STATUS));
+ val = readl(base + GPI_FIFO_DEBUG);
+ len += sprintf(buf + len, " tx pkts: %x\n", (val >> 23) &
+ 0x3f);
+ len += sprintf(buf + len, " rx pkts: %x\n", (val >> 18) &
+ 0x3f);
+ len += sprintf(buf + len, " tx bytes: %x\n", (val >> 9) &
+ 0x1ff);
+ len += sprintf(buf + len, " rx bytes: %x\n", (val >> 0) &
+ 0x1ff);
+ len += sprintf(buf + len, " overrun: %x\n", readl(base +
+ GPI_OVERRUN_DROPCNT));
+
+ return len;
+}
+
+static ssize_t pfe_set_class(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ class_do_clear = kstrtoul(buf, 0, 0);
+ return count;
+}
+
+static ssize_t pfe_show_class(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+ int id;
+ u32 val;
+ struct pfe_cpumon *cpumon = &pfe->cpumon;
+
+ len += block_version(buf + len, CLASS_VERSION);
+
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
+ len += sprintf(buf + len, "%d: ", id - CLASS0_ID);
+
+ val = readl(CLASS_PE0_DEBUG + id * 4);
+ len += sprintf(buf + len, "pc=1%04x ", val & 0xffff);
+
+ len += display_pe_status(buf + len, id, CLASS_DM_PESTATUS,
+ class_do_clear);
+ }
+ len += sprintf(buf + len, "aggregate load=%d%%\n\n",
+ cpumon->class_usage_pct);
+
+ len += sprintf(buf + len, "pe status: 0x%x\n",
+ readl(CLASS_PE_STATUS));
+ len += sprintf(buf + len, "max buf cnt: 0x%x afull thres: 0x%x\n",
+ readl(CLASS_MAX_BUF_CNT), readl(CLASS_AFULL_THRES));
+ len += sprintf(buf + len, "tsq max cnt: 0x%x tsq fifo thres: 0x%x\n",
+ readl(CLASS_TSQ_MAX_CNT), readl(CLASS_TSQ_FIFO_THRES));
+ len += sprintf(buf + len, "state: 0x%x\n", readl(CLASS_STATE));
+
+ len += class_phy_stats(buf + len, 0);
+ len += class_phy_stats(buf + len, 1);
+ len += class_phy_stats(buf + len, 2);
+ len += class_phy_stats(buf + len, 3);
+
+ return len;
+}
+
+static ssize_t pfe_set_tmu(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ tmu_do_clear = kstrtoul(buf, 0, 0);
+ return count;
+}
+
+static ssize_t pfe_show_tmu(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+ int id;
+ u32 val;
+
+ len += block_version(buf + len, TMU_VERSION);
+
+ for (id = TMU0_ID; id <= TMU_MAX_ID; id++) {
+ if (id == TMU2_ID)
+ continue;
+ len += sprintf(buf + len, "%d: ", id - TMU0_ID);
+
+ len += display_pe_status(buf + len, id, TMU_DM_PESTATUS,
+ tmu_do_clear);
+ }
+
+ len += sprintf(buf + len, "pe status: %x\n", readl(TMU_PE_STATUS));
+ len += sprintf(buf + len, "inq fifo cnt: %x\n",
+ readl(TMU_PHY_INQ_FIFO_CNT));
+ val = readl(TMU_INQ_STAT);
+ len += sprintf(buf + len, "inq wr ptr: %x\n", val & 0x3ff);
+ len += sprintf(buf + len, "inq rd ptr: %x\n", val >> 10);
+
+ return len;
+}
+
+static unsigned long drops_do_clear;
+static u32 class_drop_counter[CLASS_NUM_DROP_COUNTERS];
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+static u32 util_drop_counter[UTIL_NUM_DROP_COUNTERS];
+#endif
+
+char *class_drop_description[CLASS_NUM_DROP_COUNTERS] = {
+ "ICC",
+ "Host Pkt Error",
+ "Rx Error",
+ "IPsec Outbound",
+ "IPsec Inbound",
+ "EXPT IPsec Error",
+ "Reassembly",
+ "Fragmenter",
+ "NAT-T",
+ "Socket",
+ "Multicast",
+ "NAT-PT",
+ "Tx Disabled",
+};
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+char *util_drop_description[UTIL_NUM_DROP_COUNTERS] = {
+ "IPsec Outbound",
+ "IPsec Inbound",
+ "IPsec Rate Limiter",
+ "Fragmenter",
+ "Socket",
+ "Tx Disabled",
+ "Rx Error",
+};
+#endif
+
+static ssize_t pfe_set_drops(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ drops_do_clear = kstrtoul(buf, 0, 0);
+ return count;
+}
+
+static u32 tmu_drops[4][16];
+static ssize_t pfe_show_drops(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+ int id, dropnum;
+ int tmu, queue;
+ u32 val;
+ u32 dmem_addr;
+ int num_class_drops = 0, num_tmu_drops = 0, num_util_drops = 0;
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ memset(class_drop_counter, 0, sizeof(class_drop_counter));
+ for (id = CLASS0_ID; id <= CLASS_MAX_ID; id++) {
+ if (drops_do_clear)
+ pe_sync_stop(ctrl, (1 << id));
+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
+ dropnum++) {
+ dmem_addr = CLASS_DM_DROP_CNTR;
+ val = be32_to_cpu(pe_dmem_read(id, dmem_addr, 4));
+ class_drop_counter[dropnum] += val;
+ num_class_drops += val;
+ if (drops_do_clear)
+ pe_dmem_write(id, 0, dmem_addr, 4);
+ }
+ if (drops_do_clear)
+ pe_start(ctrl, (1 << id));
+ }
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ if (drops_do_clear)
+ pe_sync_stop(ctrl, (1 << UTIL_ID));
+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
+ dmem_addr = UTIL_DM_DROP_CNTR;
+ val = be32_to_cpu(pe_dmem_read(UTIL_ID, dmem_addr, 4));
+ util_drop_counter[dropnum] = val;
+ num_util_drops += val;
+ if (drops_do_clear)
+ pe_dmem_write(UTIL_ID, 0, dmem_addr, 4);
+ }
+ if (drops_do_clear)
+ pe_start(ctrl, (1 << UTIL_ID));
+#endif
+ for (tmu = 0; tmu < 4; tmu++) {
+ for (queue = 0; queue < 16; queue++) {
+ qm_read_drop_stat(tmu, queue, &tmu_drops[tmu][queue],
+ drops_do_clear);
+ num_tmu_drops += tmu_drops[tmu][queue];
+ }
+ }
+
+ if (num_class_drops == 0 && num_util_drops == 0 && num_tmu_drops == 0)
+ len += sprintf(buf + len, "No PE drops\n\n");
+
+ if (num_class_drops > 0) {
+ len += sprintf(buf + len, "Class PE drops --\n");
+ for (dropnum = 0; dropnum < CLASS_NUM_DROP_COUNTERS;
+ dropnum++) {
+ if (class_drop_counter[dropnum] > 0)
+ len += sprintf(buf + len, " %s: %d\n",
+ class_drop_description[dropnum],
+ class_drop_counter[dropnum]);
+ }
+ len += sprintf(buf + len, "\n");
+ }
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ if (num_util_drops > 0) {
+ len += sprintf(buf + len, "Util PE drops --\n");
+ for (dropnum = 0; dropnum < UTIL_NUM_DROP_COUNTERS; dropnum++) {
+ if (util_drop_counter[dropnum] > 0)
+ len += sprintf(buf + len, " %s: %d\n",
+ util_drop_description[dropnum],
+ util_drop_counter[dropnum]);
+ }
+ len += sprintf(buf + len, "\n");
+ }
+#endif
+ if (num_tmu_drops > 0) {
+ len += sprintf(buf + len, "TMU drops --\n");
+ for (tmu = 0; tmu < 4; tmu++) {
+ for (queue = 0; queue < 16; queue++) {
+ if (tmu_drops[tmu][queue] > 0)
+ len += sprintf(buf + len,
+ " TMU%d-Q%d: %d\n"
+ , tmu, queue, tmu_drops[tmu][queue]);
+ }
+ }
+ len += sprintf(buf + len, "\n");
+ }
+
+ return len;
+}
+
+static ssize_t pfe_show_tmu0_queues(struct device *dev, struct device_attribute
+ *attr, char *buf)
+{
+ return tmu_queues(buf, 0);
+}
+
+static ssize_t pfe_show_tmu1_queues(struct device *dev, struct device_attribute
+ *attr, char *buf)
+{
+ return tmu_queues(buf, 1);
+}
+
+static ssize_t pfe_show_tmu2_queues(struct device *dev, struct device_attribute
+ *attr, char *buf)
+{
+ return tmu_queues(buf, 2);
+}
+
+static ssize_t pfe_show_tmu3_queues(struct device *dev, struct device_attribute
+ *attr, char *buf)
+{
+ return tmu_queues(buf, 3);
+}
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+static ssize_t pfe_set_util(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ util_do_clear = kstrtoul(buf, NULL, 0);
+ return count;
+}
+
+static ssize_t pfe_show_util(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+ struct pfe_ctrl *ctrl = &pfe->ctrl;
+
+ len += block_version(buf + len, UTIL_VERSION);
+
+ pe_sync_stop(ctrl, (1 << UTIL_ID));
+ len += display_pe_status(buf + len, UTIL_ID, UTIL_DM_PESTATUS,
+ util_do_clear);
+ pe_start(ctrl, (1 << UTIL_ID));
+
+ len += sprintf(buf + len, "pe status: %x\n", readl(UTIL_PE_STATUS));
+ len += sprintf(buf + len, "max buf cnt: %x\n",
+ readl(UTIL_MAX_BUF_CNT));
+ len += sprintf(buf + len, "tsq max cnt: %x\n",
+ readl(UTIL_TSQ_MAX_CNT));
+
+ return len;
+}
+#endif
+
+static ssize_t pfe_show_bmu(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+
+ len += bmu(buf + len, 1, BMU1_BASE_ADDR);
+ len += bmu(buf + len, 2, BMU2_BASE_ADDR);
+
+ return len;
+}
+
+static ssize_t pfe_show_hif(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "hif:\n ");
+ len += block_version(buf + len, HIF_VERSION);
+
+ len += sprintf(buf + len, " tx curr bd: %x\n",
+ readl(HIF_TX_CURR_BD_ADDR));
+ len += sprintf(buf + len, " tx status: %x\n",
+ readl(HIF_TX_STATUS));
+ len += sprintf(buf + len, " tx dma status: %x\n",
+ readl(HIF_TX_DMA_STATUS));
+
+ len += sprintf(buf + len, " rx curr bd: %x\n",
+ readl(HIF_RX_CURR_BD_ADDR));
+ len += sprintf(buf + len, " rx status: %x\n",
+ readl(HIF_RX_STATUS));
+ len += sprintf(buf + len, " rx dma status: %x\n",
+ readl(HIF_RX_DMA_STATUS));
+
+ len += sprintf(buf + len, "hif nocopy:\n ");
+ len += block_version(buf + len, HIF_NOCPY_VERSION);
+
+ len += sprintf(buf + len, " tx curr bd: %x\n",
+ readl(HIF_NOCPY_TX_CURR_BD_ADDR));
+ len += sprintf(buf + len, " tx status: %x\n",
+ readl(HIF_NOCPY_TX_STATUS));
+ len += sprintf(buf + len, " tx dma status: %x\n",
+ readl(HIF_NOCPY_TX_DMA_STATUS));
+
+ len += sprintf(buf + len, " rx curr bd: %x\n",
+ readl(HIF_NOCPY_RX_CURR_BD_ADDR));
+ len += sprintf(buf + len, " rx status: %x\n",
+ readl(HIF_NOCPY_RX_STATUS));
+ len += sprintf(buf + len, " rx dma status: %x\n",
+ readl(HIF_NOCPY_RX_DMA_STATUS));
+
+ return len;
+}
+
+static ssize_t pfe_show_gpi(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ ssize_t len = 0;
+
+ len += gpi(buf + len, 0, EGPI1_BASE_ADDR);
+ len += gpi(buf + len, 1, EGPI2_BASE_ADDR);
+ len += gpi(buf + len, 3, HGPI_BASE_ADDR);
+
+ return len;
+}
+
+static ssize_t pfe_show_pfemem(struct device *dev, struct device_attribute
+ *attr, char *buf)
+{
+ ssize_t len = 0;
+ struct pfe_memmon *memmon = &pfe->memmon;
+
+ len += sprintf(buf + len, "Kernel Memory: %d Bytes (%d KB)\n",
+ memmon->kernel_memory_allocated,
+ (memmon->kernel_memory_allocated + 1023) / 1024);
+
+ return len;
+}
+
+#ifdef HIF_NAPI_STATS
+static ssize_t pfe_show_hif_napi_stats(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pfe *pfe = platform_get_drvdata(pdev);
+ ssize_t len = 0;
+
+ len += sprintf(buf + len, "sched: %u\n",
+ pfe->hif.napi_counters[NAPI_SCHED_COUNT]);
+ len += sprintf(buf + len, "poll: %u\n",
+ pfe->hif.napi_counters[NAPI_POLL_COUNT]);
+ len += sprintf(buf + len, "packet: %u\n",
+ pfe->hif.napi_counters[NAPI_PACKET_COUNT]);
+ len += sprintf(buf + len, "budget: %u\n",
+ pfe->hif.napi_counters[NAPI_FULL_BUDGET_COUNT]);
+ len += sprintf(buf + len, "desc: %u\n",
+ pfe->hif.napi_counters[NAPI_DESC_COUNT]);
+ len += sprintf(buf + len, "full: %u\n",
+ pfe->hif.napi_counters[NAPI_CLIENT_FULL_COUNT]);
+
+ return len;
+}
+
+static ssize_t pfe_set_hif_napi_stats(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct pfe *pfe = platform_get_drvdata(pdev);
+
+ memset(pfe->hif.napi_counters, 0, sizeof(pfe->hif.napi_counters));
+
+ return count;
+}
+
+static DEVICE_ATTR(hif_napi_stats, 0644, pfe_show_hif_napi_stats,
+ pfe_set_hif_napi_stats);
+#endif
+
+static DEVICE_ATTR(class, 0644, pfe_show_class, pfe_set_class);
+static DEVICE_ATTR(tmu, 0644, pfe_show_tmu, pfe_set_tmu);
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+static DEVICE_ATTR(util, 0644, pfe_show_util, pfe_set_util);
+#endif
+static DEVICE_ATTR(bmu, 0444, pfe_show_bmu, NULL);
+static DEVICE_ATTR(hif, 0444, pfe_show_hif, NULL);
+static DEVICE_ATTR(gpi, 0444, pfe_show_gpi, NULL);
+static DEVICE_ATTR(drops, 0644, pfe_show_drops, pfe_set_drops);
+static DEVICE_ATTR(tmu0_queues, 0444, pfe_show_tmu0_queues, NULL);
+static DEVICE_ATTR(tmu1_queues, 0444, pfe_show_tmu1_queues, NULL);
+static DEVICE_ATTR(tmu2_queues, 0444, pfe_show_tmu2_queues, NULL);
+static DEVICE_ATTR(tmu3_queues, 0444, pfe_show_tmu3_queues, NULL);
+static DEVICE_ATTR(pfemem, 0444, pfe_show_pfemem, NULL);
+
+int pfe_sysfs_init(struct pfe *pfe)
+{
+ if (device_create_file(pfe->dev, &dev_attr_class))
+ goto err_class;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu))
+ goto err_tmu;
+
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ if (device_create_file(pfe->dev, &dev_attr_util))
+ goto err_util;
+#endif
+
+ if (device_create_file(pfe->dev, &dev_attr_bmu))
+ goto err_bmu;
+
+ if (device_create_file(pfe->dev, &dev_attr_hif))
+ goto err_hif;
+
+ if (device_create_file(pfe->dev, &dev_attr_gpi))
+ goto err_gpi;
+
+ if (device_create_file(pfe->dev, &dev_attr_drops))
+ goto err_drops;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu0_queues))
+ goto err_tmu0_queues;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu1_queues))
+ goto err_tmu1_queues;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu2_queues))
+ goto err_tmu2_queues;
+
+ if (device_create_file(pfe->dev, &dev_attr_tmu3_queues))
+ goto err_tmu3_queues;
+
+ if (device_create_file(pfe->dev, &dev_attr_pfemem))
+ goto err_pfemem;
+
+#ifdef HIF_NAPI_STATS
+ if (device_create_file(pfe->dev, &dev_attr_hif_napi_stats))
+ goto err_hif_napi_stats;
+#endif
+
+ return 0;
+
+#ifdef HIF_NAPI_STATS
+err_hif_napi_stats:
+ device_remove_file(pfe->dev, &dev_attr_pfemem);
+#endif
+
+err_pfemem:
+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
+
+err_tmu3_queues:
+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
+
+err_tmu2_queues:
+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
+
+err_tmu1_queues:
+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
+
+err_tmu0_queues:
+ device_remove_file(pfe->dev, &dev_attr_drops);
+
+err_drops:
+ device_remove_file(pfe->dev, &dev_attr_gpi);
+
+err_gpi:
+ device_remove_file(pfe->dev, &dev_attr_hif);
+
+err_hif:
+ device_remove_file(pfe->dev, &dev_attr_bmu);
+
+err_bmu:
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ device_remove_file(pfe->dev, &dev_attr_util);
+
+err_util:
+#endif
+ device_remove_file(pfe->dev, &dev_attr_tmu);
+
+err_tmu:
+ device_remove_file(pfe->dev, &dev_attr_class);
+
+err_class:
+ return -1;
+}
+
+void pfe_sysfs_exit(struct pfe *pfe)
+{
+#ifdef HIF_NAPI_STATS
+ device_remove_file(pfe->dev, &dev_attr_hif_napi_stats);
+#endif
+ device_remove_file(pfe->dev, &dev_attr_pfemem);
+ device_remove_file(pfe->dev, &dev_attr_tmu3_queues);
+ device_remove_file(pfe->dev, &dev_attr_tmu2_queues);
+ device_remove_file(pfe->dev, &dev_attr_tmu1_queues);
+ device_remove_file(pfe->dev, &dev_attr_tmu0_queues);
+ device_remove_file(pfe->dev, &dev_attr_drops);
+ device_remove_file(pfe->dev, &dev_attr_gpi);
+ device_remove_file(pfe->dev, &dev_attr_hif);
+ device_remove_file(pfe->dev, &dev_attr_bmu);
+#if !defined(CONFIG_FSL_PPFE_UTIL_DISABLED)
+ device_remove_file(pfe->dev, &dev_attr_util);
+#endif
+ device_remove_file(pfe->dev, &dev_attr_tmu);
+ device_remove_file(pfe->dev, &dev_attr_class);
+}
diff --git a/drivers/staging/fsl_ppfe/pfe_sysfs.h b/drivers/staging/fsl_ppfe/pfe_sysfs.h
new file mode 100644
index 000000000000..406c8c2e3753
--- /dev/null
+++ b/drivers/staging/fsl_ppfe/pfe_sysfs.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2015-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ */
+
+#ifndef _PFE_SYSFS_H_
+#define _PFE_SYSFS_H_
+
+#include <linux/proc_fs.h>
+
+u32 qm_read_drop_stat(u32 tmu, u32 queue, u32 *total_drops, int do_reset);
+
+int pfe_sysfs_init(struct pfe *pfe);
+void pfe_sysfs_exit(struct pfe *pfe);
+
+#endif /* _PFE_SYSFS_H_ */
diff --git a/drivers/staging/fsl_qbman/Kconfig b/drivers/staging/fsl_qbman/Kconfig
new file mode 100644
index 000000000000..93dcb7d382c1
--- /dev/null
+++ b/drivers/staging/fsl_qbman/Kconfig
@@ -0,0 +1,228 @@
+config FSL_SDK_DPA
+ bool "Freescale Datapath Queue and Buffer management"
+ depends on !FSL_DPAA
+ select FSL_QMAN_FQ_LOOKUP if PPC64
+ select FSL_QMAN_FQ_LOOKUP if ARM64
+
+
+menu "Freescale Datapath QMan/BMan options"
+ depends on FSL_SDK_DPA
+
+config FSL_DPA_CHECKING
+ bool "additional driver checking"
+ default n
+ ---help---
+ Compiles in additional checks to sanity-check the drivers and any
+ use of it by other code. Not recommended for performance.
+
+config FSL_DPA_CAN_WAIT
+ bool
+ default y
+
+config FSL_DPA_CAN_WAIT_SYNC
+ bool
+ default y
+
+config FSL_DPA_PIRQ_FAST
+ bool
+ default y
+
+config FSL_DPA_PIRQ_SLOW
+ bool
+ default y
+
+config FSL_DPA_PORTAL_SHARE
+ bool
+ default y
+
+config FSL_SDK_BMAN
+ bool "Freescale Buffer Manager (BMan) support"
+ default y
+
+if FSL_SDK_BMAN
+
+config FSL_BMAN_CONFIG
+ bool "BMan device management"
+ default y
+ ---help---
+ If this linux image is running natively, you need this option. If this
+ linux image is running as a guest OS under the hypervisor, only one
+ guest OS ("the control plane") needs this option.
+
+config FSL_BMAN_TEST
+ tristate "BMan self-tests"
+ default n
+ ---help---
+ This option compiles self-test code for BMan.
+
+config FSL_BMAN_TEST_HIGH
+ bool "BMan high-level self-test"
+ depends on FSL_BMAN_TEST
+ default y
+ ---help---
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine to
+ the cpu(s) the test executes on).
+
+config FSL_BMAN_TEST_THRESH
+ bool "BMan threshold test"
+ depends on FSL_BMAN_TEST
+ default y
+ ---help---
+ Multi-threaded (SMP) test of BMan pool depletion. A pool is seeded
+ before multiple threads (one per cpu) create pool objects to track
+ depletion state changes. The pool is then drained to empty by a
+ "drainer" thread, and the other threads that they observe exactly
+ the depletion state changes that are expected.
+
+config FSL_BMAN_DEBUGFS
+ tristate "BMan debugfs interface"
+ depends on DEBUG_FS
+ default y
+ ---help---
+ This option compiles debugfs code for BMan.
+
+endif # FSL_SDK_BMAN
+
+config FSL_SDK_QMAN
+ bool "Freescale Queue Manager (QMan) support"
+ default y
+
+if FSL_SDK_QMAN
+
+config FSL_QMAN_POLL_LIMIT
+ int
+ default 32
+
+config FSL_QMAN_CONFIG
+ bool "QMan device management"
+ default y
+ ---help---
+ If this linux image is running natively, you need this option. If this
+ linux image is running as a guest OS under the hypervisor, only one
+ guest OS ("the control plane") needs this option.
+
+config FSL_QMAN_TEST
+ tristate "QMan self-tests"
+ default n
+ ---help---
+ This option compiles self-test code for QMan.
+
+config FSL_QMAN_TEST_STASH_POTATO
+ bool "QMan 'hot potato' data-stashing self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ ---help---
+ This performs a "hot potato" style test enqueuing/dequeuing a frame
+ across a series of FQs scheduled to different portals (and cpus), with
+ DQRR, data and context stashing always on.
+
+config FSL_QMAN_TEST_HIGH
+ bool "QMan high-level self-test"
+ depends on FSL_QMAN_TEST
+ default y
+ ---help---
+ This requires the presence of cpu-affine portals, and performs
+ high-level API testing with them (whichever portal(s) are affine to
+ the cpu(s) the test executes on).
+
+config FSL_QMAN_DEBUGFS
+ tristate "QMan debugfs interface"
+ depends on DEBUG_FS
+ default y
+ ---help---
+ This option compiles debugfs code for QMan.
+
+# H/w settings that can be hard-coded for now.
+config FSL_QMAN_FQD_SZ
+ int "size of Frame Queue Descriptor region"
+ default 10
+ ---help---
+ This is the size of the FQD region defined as: PAGE_SIZE * (2^value)
+ ex: 10 => PAGE_SIZE * (2^10)
+ Note: Default device-trees now require minimum Kconfig setting of 10.
+
+config FSL_QMAN_PFDR_SZ
+ int "size of the PFDR pool"
+ default 13
+ ---help---
+ This is the size of the PFDR pool defined as: PAGE_SIZE * (2^value)
+ ex: 13 => PAGE_SIZE * (2^13)
+
+# Corenet initiator settings. Stash request queues are 4-deep to match cores'
+# ability to snart. Stash priority is 3, other priorities are 2.
+config FSL_QMAN_CI_SCHED_CFG_SRCCIV
+ int
+ depends on FSL_QMAN_CONFIG
+ default 4
+config FSL_QMAN_CI_SCHED_CFG_SRQ_W
+ int
+ depends on FSL_QMAN_CONFIG
+ default 3
+config FSL_QMAN_CI_SCHED_CFG_RW_W
+ int
+ depends on FSL_QMAN_CONFIG
+ default 2
+config FSL_QMAN_CI_SCHED_CFG_BMAN_W
+ int
+ depends on FSL_QMAN_CONFIG
+ default 2
+
+# portal interrupt settings
+config FSL_QMAN_PIRQ_DQRR_ITHRESH
+ int
+ default 12
+config FSL_QMAN_PIRQ_MR_ITHRESH
+ int
+ default 4
+config FSL_QMAN_PIRQ_IPERIOD
+ int
+ default 100
+
+# 64 bit kernel support
+config FSL_QMAN_FQ_LOOKUP
+ bool
+ default n
+
+config QMAN_CEETM_UPDATE_PERIOD
+ int "Token update period for shaping, in nanoseconds"
+ default 1000
+ ---help---
+ Traffic shaping works by performing token calculations (using
+ credits) on shaper instances periodically. This update period
+ sets the granularity for how often those token rate credit
+ updates are performed, and thus determines the accuracy and
+ range of traffic rates that can be configured by users. The
+ reference manual recommends a 1 microsecond period as providing
+ a good balance between granularity and range.
+
+ Unless you know what you are doing, leave this value at its default.
+
+config FSL_QMAN_INIT_TIMEOUT
+ int "timeout for qman init stage, in seconds"
+ default 10
+ ---help---
+ The timeout setting to quit the initialization loop for non-control
+ partition in case the control partition fails to boot-up.
+
+endif # FSL_SDK_QMAN
+
+config FSL_USDPAA
+ bool "Freescale USDPAA process driver"
+ depends on FSL_SDK_DPA
+ default y
+ ---help---
+ This driver provides user-space access to kernel-managed
+ resource interfaces for USDPAA applications, on the assumption
+ that each process will open this device once. Specifically, this
+ device exposes functionality that would be awkward if exposed
+ via the portal devices - ie. this device exposes functionality
+ that is inherently process-wide rather than portal-specific.
+ This device is necessary for obtaining access to DMA memory and
+ for allocation of Qman and Bman resources. In short, if you wish
+ to use USDPAA applications, you need this.
+
+ If unsure, say Y.
+
+
+endmenu
diff --git a/drivers/staging/fsl_qbman/Makefile b/drivers/staging/fsl_qbman/Makefile
new file mode 100644
index 000000000000..538f5b566f34
--- /dev/null
+++ b/drivers/staging/fsl_qbman/Makefile
@@ -0,0 +1,32 @@
+subdir-ccflags-y := -Werror
+
+# Include netcomm SW specific definitions
+include $(srctree)/drivers/net/ethernet/freescale/sdk_fman/ncsw_config.mk
+ccflags-y += -I$(NET_DPA)
+
+# Common
+obj-$(CONFIG_FSL_SDK_DPA) += dpa_alloc.o
+obj-$(CONFIG_FSL_SDK_DPA) += qbman_driver.o
+
+# Bman
+obj-$(CONFIG_FSL_SDK_BMAN) += bman_high.o
+obj-$(CONFIG_FSL_BMAN_CONFIG) += bman_config.o bman_driver.o
+obj-$(CONFIG_FSL_BMAN_TEST) += bman_tester.o
+obj-$(CONFIG_FSL_BMAN_DEBUGFS) += bman_debugfs_interface.o
+bman_tester-y = bman_test.o
+bman_tester-$(CONFIG_FSL_BMAN_TEST_HIGH) += bman_test_high.o
+bman_tester-$(CONFIG_FSL_BMAN_TEST_THRESH) += bman_test_thresh.o
+bman_debugfs_interface-y = bman_debugfs.o
+
+# Qman
+obj-$(CONFIG_FSL_SDK_QMAN) += qman_high.o qman_utility.o
+obj-$(CONFIG_FSL_QMAN_CONFIG) += qman_config.o qman_driver.o
+obj-$(CONFIG_FSL_QMAN_TEST) += qman_tester.o
+qman_tester-y = qman_test.o
+qman_tester-$(CONFIG_FSL_QMAN_TEST_STASH_POTATO) += qman_test_hotpotato.o
+qman_tester-$(CONFIG_FSL_QMAN_TEST_HIGH) += qman_test_high.o
+obj-$(CONFIG_FSL_QMAN_DEBUGFS) += qman_debugfs_interface.o
+qman_debugfs_interface-y = qman_debugfs.o
+
+# USDPAA
+obj-$(CONFIG_FSL_USDPAA) += fsl_usdpaa.o fsl_usdpaa_irq.o
diff --git a/drivers/staging/fsl_qbman/bman_config.c b/drivers/staging/fsl_qbman/bman_config.c
new file mode 100644
index 000000000000..bb3977304f28
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_config.c
@@ -0,0 +1,720 @@
+/* Copyright (c) 2009-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/cacheflush.h>
+#include "bman_private.h"
+#include <linux/of_reserved_mem.h>
+
+/* Last updated for v00.79 of the BG */
+
+struct bman;
+
+/* Register offsets */
+#define REG_POOL_SWDET(n) (0x0000 + ((n) * 0x04))
+#define REG_POOL_HWDET(n) (0x0100 + ((n) * 0x04))
+#define REG_POOL_SWDXT(n) (0x0200 + ((n) * 0x04))
+#define REG_POOL_HWDXT(n) (0x0300 + ((n) * 0x04))
+#define REG_POOL_CONTENT(n) (0x0600 + ((n) * 0x04))
+#define REG_FBPR_FPC 0x0800
+#define REG_STATE_IDLE 0x960
+#define REG_STATE_STOP 0x964
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FBPR_BARE 0x0c00
+#define REG_FBPR_BAR 0x0c04
+#define REG_FBPR_AR 0x0c10
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_ERR_ISR 0x0e00 /* + "enum bm_isr_reg" */
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define BM_EIRQ_IVCI 0x00000010 /* Invalid Command Verb */
+#define BM_EIRQ_FLWI 0x00000008 /* FBPR Low Watermark */
+#define BM_EIRQ_MBEI 0x00000004 /* Multi-bit ECC Error */
+#define BM_EIRQ_SBEI 0x00000002 /* Single-bit ECC Error */
+#define BM_EIRQ_BSCN 0x00000001 /* pool State Change Notification */
+
+/* BMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (BM_EIRQ_IVCI)
+
+union bman_ecir {
+ u32 ecir_raw;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved1:4;
+ u32 portal_num:4;
+ u32 __reserved2:12;
+ u32 numb:4;
+ u32 __reserved3:2;
+ u32 pid:6;
+#else
+ u32 pid:6;
+ u32 __reserved3:2;
+ u32 numb:4;
+ u32 __reserved2:12;
+ u32 portal_num:4;
+ u32 __reserved1:4;
+#endif
+ } __packed info;
+};
+
+union bman_eadr {
+ u32 eadr_raw;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved1:5;
+ u32 memid:3;
+ u32 __reserved2:14;
+ u32 eadr:10;
+#else
+ u32 eadr:10;
+ u32 __reserved2:14;
+ u32 memid:3;
+ u32 __reserved1:5;
+#endif
+ } __packed info;
+};
+
+struct bman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+#define BMAN_HWE_TXT(a, b) { .mask = BM_EIRQ_##a, .txt = b }
+
+static const struct bman_hwerr_txt bman_hwerr_txts[] = {
+ BMAN_HWE_TXT(IVCI, "Invalid Command Verb"),
+ BMAN_HWE_TXT(FLWI, "FBPR Low Watermark"),
+ BMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
+ BMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
+ BMAN_HWE_TXT(BSCN, "Pool State Change Notification"),
+};
+#define BMAN_HWE_COUNT (sizeof(bman_hwerr_txts)/sizeof(struct bman_hwerr_txt))
+
+struct bman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+#define BMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
+static const struct bman_error_info_mdata error_mdata[] = {
+ BMAN_ERR_MDATA(0x03FF, 192, "Stockpile memory"),
+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 1"),
+ BMAN_ERR_MDATA(0x00FF, 256, "SW portal ring memory port 2"),
+};
+#define BMAN_ERR_MDATA_COUNT \
+ (sizeof(error_mdata)/sizeof(struct bman_error_info_mdata))
+
+/* Add this in Kconfig */
+#define BMAN_ERRS_TO_UNENABLE (BM_EIRQ_FLWI)
+
+/**
+ * bm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
+ * @v: for accessors that write values, this is the 32-bit value
+ *
+ * Manipulates BMAN_ERR_ISR, BMAN_ERR_IER, BMAN_ERR_ISDR, BMAN_ERR_IIR. All
+ * manipulations except bm_err_isr_[un]inhibit() use 32-bit masks composed of
+ * the BM_EIRQ_*** definitions. Note that "bm_err_isr_enable_write" means
+ * "write the enable register" rather than "enable the write register"!
+ */
+#define bm_err_isr_status_read(bm) \
+ __bm_err_isr_read(bm, bm_isr_status)
+#define bm_err_isr_status_clear(bm, m) \
+ __bm_err_isr_write(bm, bm_isr_status, m)
+#define bm_err_isr_enable_read(bm) \
+ __bm_err_isr_read(bm, bm_isr_enable)
+#define bm_err_isr_enable_write(bm, v) \
+ __bm_err_isr_write(bm, bm_isr_enable, v)
+#define bm_err_isr_disable_read(bm) \
+ __bm_err_isr_read(bm, bm_isr_disable)
+#define bm_err_isr_disable_write(bm, v) \
+ __bm_err_isr_write(bm, bm_isr_disable, v)
+#define bm_err_isr_inhibit(bm) \
+ __bm_err_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_err_isr_uninhibit(bm) \
+ __bm_err_isr_write(bm, bm_isr_inhibit, 0)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * BMAN_POOLk_SDCNT, BMAN_POOLk_HDCNT, BMAN_FULT,
+ * BMAN_VLDPL, BMAN_EECC, BMAN_SBET, BMAN_EINJ
+ */
+
+/* Encapsulate "struct bman *" as a cast of the register space address. */
+
+static struct bman *bm_create(void *regs)
+{
+ return (struct bman *)regs;
+}
+
+static inline u32 __bm_in(struct bman *bm, u32 offset)
+{
+ return in_be32((void *)bm + offset);
+}
+static inline void __bm_out(struct bman *bm, u32 offset, u32 val)
+{
+ out_be32((void *)bm + offset, val);
+}
+#define bm_in(reg) __bm_in(bm, REG_##reg)
+#define bm_out(reg, val) __bm_out(bm, REG_##reg, val)
+
+static u32 __bm_err_isr_read(struct bman *bm, enum bm_isr_reg n)
+{
+ return __bm_in(bm, REG_ERR_ISR + (n << 2));
+}
+
+static void __bm_err_isr_write(struct bman *bm, enum bm_isr_reg n, u32 val)
+{
+ __bm_out(bm, REG_ERR_ISR + (n << 2), val);
+}
+
+static void bm_get_version(struct bman *bm, u16 *id, u8 *major, u8 *minor)
+{
+ u32 v = bm_in(IP_REV_1);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+}
+
+static u32 __generate_thresh(u32 val, int roundup)
+{
+ u32 e = 0; /* co-efficient, exponent */
+ int oddbit = 0;
+ while (val > 0xff) {
+ oddbit = val & 1;
+ val >>= 1;
+ e++;
+ if (roundup && oddbit)
+ val++;
+ }
+ DPA_ASSERT(e < 0x10);
+ return val | (e << 8);
+}
+
+static void bm_set_pool(struct bman *bm, u8 pool, u32 swdet, u32 swdxt,
+ u32 hwdet, u32 hwdxt)
+{
+ DPA_ASSERT(pool < bman_pool_max);
+ bm_out(POOL_SWDET(pool), __generate_thresh(swdet, 0));
+ bm_out(POOL_SWDXT(pool), __generate_thresh(swdxt, 1));
+ bm_out(POOL_HWDET(pool), __generate_thresh(hwdet, 0));
+ bm_out(POOL_HWDXT(pool), __generate_thresh(hwdxt, 1));
+}
+
+static void bm_set_memory(struct bman *bm, u64 ba, int prio, u32 size)
+{
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
+ is_power_of_2(size));
+ /* choke if '[e]ba' has lower-alignment than 'size' */
+ DPA_ASSERT(!(ba & (size - 1)));
+ bm_out(FBPR_BARE, upper_32_bits(ba));
+ bm_out(FBPR_BAR, lower_32_bits(ba));
+ bm_out(FBPR_AR, (prio ? 0x40000000 : 0) | (exp - 1));
+}
+
+/*****************/
+/* Config driver */
+/*****************/
+
+/* TODO: Kconfig these? */
+#define DEFAULT_FBPR_SZ (PAGE_SIZE << 12)
+
+/* We support only one of these. */
+static struct bman *bm;
+static struct device_node *bm_node;
+
+/* And this state belongs to 'bm'. It is set during fsl_bman_init(), but used
+ * during bman_init_ccsr(). */
+static dma_addr_t fbpr_a;
+static size_t fbpr_sz = DEFAULT_FBPR_SZ;
+
+static int bman_fbpr(struct reserved_mem *rmem)
+{
+ fbpr_a = rmem->base;
+ fbpr_sz = rmem->size;
+
+ WARN_ON(!(fbpr_a && fbpr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(bman_fbpr, "fsl,bman-fbpr", bman_fbpr);
+
+static int __init fsl_bman_init(struct device_node *node)
+{
+ struct resource res;
+ u32 __iomem *regs;
+ const char *s;
+ int ret, standby = 0;
+ u16 id;
+ u8 major, minor;
+
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("Can't get %s property 'reg'\n",
+ node->full_name);
+ return ret;
+ }
+ s = of_get_property(node, "fsl,hv-claimable", &ret);
+ if (s && !strcmp(s, "standby"))
+ standby = 1;
+ /* Global configuration */
+ regs = ioremap(res.start, res.end - res.start + 1);
+ bm = bm_create(regs);
+ BUG_ON(!bm);
+ bm_node = node;
+ bm_get_version(bm, &id, &major, &minor);
+ pr_info("Bman ver:%04x,%02x,%02x\n", id, major, minor);
+ if ((major == 1) && (minor == 0)) {
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ } else if ((major == 2) && (minor == 0)) {
+ bman_ip_rev = BMAN_REV20;
+ bman_pool_max = 8;
+ } else if ((major == 2) && (minor == 1)) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ } else {
+ pr_warn("unknown Bman version, default to rev1.0\n");
+ }
+
+ if (standby) {
+ pr_info(" -> in standby mode\n");
+ return 0;
+ }
+ return 0;
+}
+
+int bman_have_ccsr(void)
+{
+ return bm ? 1 : 0;
+}
+
+int bm_pool_set(u32 bpid, const u32 *thresholds)
+{
+ if (!bm)
+ return -ENODEV;
+ bm_set_pool(bm, bpid, thresholds[0],
+ thresholds[1], thresholds[2],
+ thresholds[3]);
+ return 0;
+}
+EXPORT_SYMBOL(bm_pool_set);
+
+__init int bman_init_early(void)
+{
+ struct device_node *dn;
+ int ret;
+
+ for_each_compatible_node(dn, NULL, "fsl,bman") {
+ if (bm)
+ pr_err("%s: only one 'fsl,bman' allowed\n",
+ dn->full_name);
+ else {
+ if (!of_device_is_available(dn))
+ continue;
+
+ ret = fsl_bman_init(dn);
+ BUG_ON(ret);
+ }
+ }
+ return 0;
+}
+postcore_initcall_sync(bman_init_early);
+
+
+static void log_edata_bits(u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ pr_warn("Bman ErrInt, EDATA:\n");
+ i = bit_count/32;
+ if (bit_count%32) {
+ i++;
+ mask = ~(mask << bit_count%32);
+ }
+ j = 16-i;
+ pr_warn(" 0x%08x\n", bm_in(EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ pr_warn(" 0x%08x\n", bm_in(EDATA(j)));
+}
+
+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+{
+ union bman_ecir ecir_val;
+ union bman_eadr eadr_val;
+
+ ecir_val.ecir_raw = bm_in(ECIR);
+ /* Is portal info valid */
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ pr_warn("Bman ErrInt: SWP id %d, numb %d, pid %d\n",
+ ecir_val.info.portal_num, ecir_val.info.numb,
+ ecir_val.info.pid);
+ }
+ if (ecsr_val & (BM_EIRQ_SBEI|BM_EIRQ_MBEI)) {
+ eadr_val.eadr_raw = bm_in(EADR);
+ pr_warn("Bman ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[eadr_val.info.memid].txt,
+ error_mdata[eadr_val.info.memid].addr_mask
+ & eadr_val.info.eadr);
+ log_edata_bits(error_mdata[eadr_val.info.memid].bits);
+ }
+}
+
+/* Bman interrupt handler */
+static irqreturn_t bman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+
+ ier_val = bm_err_isr_enable_read(bm);
+ isr_val = bm_err_isr_status_read(bm);
+ ecsr_val = bm_in(ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+ for (i = 0; i < BMAN_HWE_COUNT; i++) {
+ if (bman_hwerr_txts[i].mask & isr_mask) {
+ pr_warn("Bman ErrInt: %s\n", bman_hwerr_txts[i].txt);
+ if (bman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(isr_mask, ecsr_val);
+ /* Re-arm error capture registers */
+ bm_out(ECSR, ecsr_val);
+ }
+ if (bman_hwerr_txts[i].mask & BMAN_ERRS_TO_UNENABLE) {
+ pr_devel("Bman un-enabling error 0x%x\n",
+ bman_hwerr_txts[i].mask);
+ ier_val &= ~bman_hwerr_txts[i].mask;
+ bm_err_isr_enable_write(bm, ier_val);
+ }
+ }
+ }
+ bm_err_isr_status_clear(bm, isr_val);
+ return IRQ_HANDLED;
+}
+
+static int __bind_irq(void)
+{
+ int ret, err_irq;
+
+ err_irq = of_irq_to_resource(bm_node, 0, NULL);
+ if (err_irq == 0) {
+ pr_info("Can't get %s property '%s'\n", bm_node->full_name,
+ "interrupts");
+ return -ENODEV;
+ }
+ ret = request_irq(err_irq, bman_isr, IRQF_SHARED, "bman-err", bm_node);
+ if (ret) {
+ pr_err("request_irq() failed %d for '%s'\n", ret,
+ bm_node->full_name);
+ return -ENODEV;
+ }
+ /* Disable Buffer Pool State Change */
+ bm_err_isr_disable_write(bm, BM_EIRQ_BSCN);
+ /* Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init). */
+ bm_err_isr_status_clear(bm, 0xffffffff);
+ /* Enable Error Interrupts */
+ bm_err_isr_enable_write(bm, 0xffffffff);
+ return 0;
+}
+
+int bman_init_ccsr(struct device_node *node)
+{
+ int ret;
+ if (!bman_have_ccsr())
+ return 0;
+ if (node != bm_node)
+ return -EINVAL;
+ /* FBPR memory */
+ bm_set_memory(bm, fbpr_a, 0, fbpr_sz);
+ pr_info("bman-fbpr addr %pad size 0x%zx\n", &fbpr_a, fbpr_sz);
+
+ ret = __bind_irq();
+ if (ret)
+ return ret;
+ return 0;
+}
+
+u32 bm_pool_free_buffers(u32 bpid)
+{
+ return bm_in(POOL_CONTENT(bpid));
+}
+
+#ifdef CONFIG_SYSFS
+
+#define DRV_NAME "fsl-bman"
+#define SBEC_MAX_ID 1
+#define SBEC_MIN_ID 0
+
+static ssize_t show_fbpr_fpc(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(FBPR_FPC));
+};
+
+static ssize_t show_pool_count(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ u32 data;
+ int i;
+
+ if (!sscanf(dev_attr->attr.name, "%d", &i) || (i >= bman_pool_max))
+ return -EINVAL;
+ data = bm_in(POOL_CONTENT(i));
+ return snprintf(buf, PAGE_SIZE, "%d\n", data);
+};
+
+static ssize_t show_err_isr(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", bm_in(ERR_ISR));
+};
+
+static ssize_t show_sbec(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ int i;
+
+ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
+ return -EINVAL;
+ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
+ return -EINVAL;
+ return snprintf(buf, PAGE_SIZE, "%u\n", bm_in(SBEC(i)));
+};
+
+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
+static DEVICE_ATTR(fbpr_fpc, S_IRUSR, show_fbpr_fpc, NULL);
+
+/* Didn't use DEVICE_ATTR as 64 of this would be required.
+ * Initialize them when needed. */
+static char *name_attrs_pool_count; /* "xx" + null-terminator */
+static struct device_attribute *dev_attr_buffer_pool_count;
+
+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+
+static struct attribute *bman_dev_attributes[] = {
+ &dev_attr_fbpr_fpc.attr,
+ &dev_attr_err_isr.attr,
+ NULL
+};
+
+static struct attribute *bman_dev_ecr_attributes[] = {
+ &dev_attr_sbec_0.attr,
+ &dev_attr_sbec_1.attr,
+ NULL
+};
+
+static struct attribute **bman_dev_pool_count_attributes;
+
+
+/* root level */
+static const struct attribute_group bman_dev_attr_grp = {
+ .name = NULL,
+ .attrs = bman_dev_attributes
+};
+static const struct attribute_group bman_dev_ecr_grp = {
+ .name = "error_capture",
+ .attrs = bman_dev_ecr_attributes
+};
+static struct attribute_group bman_dev_pool_countent_grp = {
+ .name = "pool_count",
+};
+
+static int of_fsl_bman_remove(struct platform_device *ofdev)
+{
+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
+ return 0;
+};
+
+static int of_fsl_bman_probe(struct platform_device *ofdev)
+{
+ int ret, i;
+
+ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
+ if (ret)
+ goto done;
+ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
+ if (ret)
+ goto del_group_0;
+
+ name_attrs_pool_count = kmalloc(sizeof(char) * bman_pool_max * 3,
+ GFP_KERNEL);
+ if (!name_attrs_pool_count) {
+ pr_err("Can't alloc name_attrs_pool_count\n");
+ goto del_group_1;
+ }
+
+ dev_attr_buffer_pool_count = kmalloc(sizeof(struct device_attribute) *
+ bman_pool_max, GFP_KERNEL);
+ if (!dev_attr_buffer_pool_count) {
+ pr_err("Can't alloc dev_attr-buffer_pool_count\n");
+ goto del_group_2;
+ }
+
+ bman_dev_pool_count_attributes = kmalloc(sizeof(struct attribute *) *
+ (bman_pool_max + 1), GFP_KERNEL);
+ if (!bman_dev_pool_count_attributes) {
+ pr_err("can't alloc bman_dev_pool_count_attributes\n");
+ goto del_group_3;
+ }
+
+ for (i = 0; i < bman_pool_max; i++) {
+ ret = scnprintf((name_attrs_pool_count + i * 3), 3, "%d", i);
+ if (!ret)
+ goto del_group_4;
+ dev_attr_buffer_pool_count[i].attr.name =
+ (name_attrs_pool_count + i * 3);
+ dev_attr_buffer_pool_count[i].attr.mode = S_IRUSR;
+ dev_attr_buffer_pool_count[i].show = show_pool_count;
+ bman_dev_pool_count_attributes[i] =
+ &dev_attr_buffer_pool_count[i].attr;
+ sysfs_attr_init(bman_dev_pool_count_attributes[i]);
+ }
+ bman_dev_pool_count_attributes[bman_pool_max] = NULL;
+
+ bman_dev_pool_countent_grp.attrs = bman_dev_pool_count_attributes;
+
+ ret = sysfs_create_group(&ofdev->dev.kobj, &bman_dev_pool_countent_grp);
+ if (ret)
+ goto del_group_4;
+
+ goto done;
+
+del_group_4:
+ kfree(bman_dev_pool_count_attributes);
+del_group_3:
+ kfree(dev_attr_buffer_pool_count);
+del_group_2:
+ kfree(name_attrs_pool_count);
+del_group_1:
+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_ecr_grp);
+del_group_0:
+ sysfs_remove_group(&ofdev->dev.kobj, &bman_dev_attr_grp);
+done:
+ if (ret)
+ dev_err(&ofdev->dev,
+ "Cannot create dev attributes ret=%d\n", ret);
+ return ret;
+};
+
+static struct of_device_id of_fsl_bman_ids[] = {
+ {
+ .compatible = "fsl,bman",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_bman_ids);
+
+#ifdef CONFIG_SUSPEND
+static u32 saved_isdr;
+
+static int bman_pm_suspend_noirq(struct device *dev)
+{
+ uint32_t idle_state;
+
+ suspend_unused_bportal();
+ /* save isdr, disable all, clear isr */
+ saved_isdr = bm_err_isr_disable_read(bm);
+ bm_err_isr_disable_write(bm, 0xffffffff);
+ bm_err_isr_status_clear(bm, 0xffffffff);
+
+ if (bman_ip_rev < BMAN_REV21) {
+#ifdef CONFIG_PM_DEBUG
+ pr_info("Bman version doesn't have STATE_IDLE\n");
+#endif
+ return 0;
+ }
+ idle_state = bm_in(STATE_IDLE);
+ if (!(idle_state & 0x1)) {
+ pr_err("Bman not idle 0x%x aborting\n", idle_state);
+ bm_err_isr_disable_write(bm, saved_isdr);
+ resume_unused_bportal();
+ return -EBUSY;
+ }
+#ifdef CONFIG_PM_DEBUG
+ pr_info("Bman suspend code, IDLE_STAT = 0x%x\n", idle_state);
+#endif
+ return 0;
+}
+
+static int bman_pm_resume_noirq(struct device *dev)
+{
+ /* restore isdr */
+ bm_err_isr_disable_write(bm, saved_isdr);
+ resume_unused_bportal();
+ return 0;
+}
+#else
+#define bman_pm_suspend_noirq NULL
+#define bman_pm_resume_noirq NULL
+#endif
+
+static const struct dev_pm_ops bman_pm_ops = {
+ .suspend_noirq = bman_pm_suspend_noirq,
+ .resume_noirq = bman_pm_resume_noirq,
+};
+
+static struct platform_driver of_fsl_bman_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = of_fsl_bman_ids,
+ .pm = &bman_pm_ops,
+ },
+ .probe = of_fsl_bman_probe,
+ .remove = of_fsl_bman_remove,
+};
+
+static int bman_ctrl_init(void)
+{
+ return platform_driver_register(&of_fsl_bman_driver);
+}
+
+static void bman_ctrl_exit(void)
+{
+ platform_driver_unregister(&of_fsl_bman_driver);
+}
+
+module_init(bman_ctrl_init);
+module_exit(bman_ctrl_exit);
+
+#endif /* CONFIG_SYSFS */
diff --git a/drivers/staging/fsl_qbman/bman_debugfs.c b/drivers/staging/fsl_qbman/bman_debugfs.c
new file mode 100644
index 000000000000..0e74c99befc5
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_debugfs.c
@@ -0,0 +1,125 @@
+/* Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/module.h>
+#include <linux/fsl_bman.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+
+static struct dentry *dfs_root; /* debugfs root directory */
+
+/*******************************************************************************
+ * Query Buffer Pool State
+ ******************************************************************************/
+static int query_bp_state_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct bm_pool_state state;
+ int i, j;
+ u32 mask;
+
+ memset(&state, 0, sizeof(struct bm_pool_state));
+ ret = bman_query_pools(&state);
+ if (ret) {
+ seq_printf(file, "Error %d\n", ret);
+ return 0;
+ }
+ seq_puts(file, "bp_id free_buffers_avail bp_depleted\n");
+ for (i = 0; i < 2; i++) {
+ mask = 0x80000000;
+ for (j = 0; j < 32; j++) {
+ seq_printf(file,
+ " %-2u %-3s %-3s\n",
+ (i*32)+j,
+ (state.as.state.__state[i] & mask) ? "no" : "yes",
+ (state.ds.state.__state[i] & mask) ? "yes" : "no");
+ mask >>= 1;
+ }
+ }
+ return 0;
+}
+
+static int query_bp_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, query_bp_state_show, NULL);
+}
+
+static const struct file_operations query_bp_state_fops = {
+ .owner = THIS_MODULE,
+ .open = query_bp_state_open,
+ .read = seq_read,
+ .release = single_release,
+};
+
+static int __init bman_debugfs_module_init(void)
+{
+ int ret = 0;
+ struct dentry *d;
+ struct device_node *dn;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,bman");
+ if (!dn) {
+ pr_debug("No fsl,bman node\n");
+ return 0;
+ }
+ dfs_root = debugfs_create_dir("bman", NULL);
+ if (dfs_root == NULL) {
+ ret = -ENOMEM;
+ pr_err("Cannot create bman debugfs dir\n");
+ goto _return;
+ }
+ d = debugfs_create_file("query_bp_state",
+ S_IRUGO,
+ dfs_root,
+ NULL,
+ &query_bp_state_fops);
+ if (d == NULL) {
+ ret = -ENOMEM;
+ pr_err("Cannot create query_bp_state\n");
+ goto _return;
+ }
+ return 0;
+
+_return:
+ debugfs_remove_recursive(dfs_root);
+ return ret;
+}
+
+static void __exit bman_debugfs_module_exit(void)
+{
+ debugfs_remove_recursive(dfs_root);
+}
+
+
+module_init(bman_debugfs_module_init);
+module_exit(bman_debugfs_module_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/staging/fsl_qbman/bman_driver.c b/drivers/staging/fsl_qbman/bman_driver.c
new file mode 100644
index 000000000000..9a3397d6b1d0
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_driver.c
@@ -0,0 +1,559 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "bman_low.h"
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#endif
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+u16 bman_ip_rev;
+EXPORT_SYMBOL(bman_ip_rev);
+u16 bman_pool_max;
+EXPORT_SYMBOL(bman_pool_max);
+static u16 bman_portal_max;
+
+/* After initialising cpus that own shared portal configs, we cache the
+ * resulting portals (ie. not just the configs) in this array. Then we
+ * initialise slave cpus that don't have their own portals, redirecting them to
+ * portals from this cache in a round-robin assignment. */
+static struct bman_portal *shared_portals[NR_CPUS];
+static int num_shared_portals;
+static int shared_portals_idx;
+static LIST_HEAD(unused_pcfgs);
+static DEFINE_SPINLOCK(unused_pcfgs_lock);
+static void *affine_bportals[NR_CPUS];
+
+static int __init fsl_bpool_init(struct device_node *node)
+{
+ int ret;
+ u32 *thresh, *bpid = (u32 *)of_get_property(node, "fsl,bpid", &ret);
+ if (!bpid || (ret != 4)) {
+ pr_err("Can't get %s property 'fsl,bpid'\n", node->full_name);
+ return -ENODEV;
+ }
+ thresh = (u32 *)of_get_property(node, "fsl,bpool-thresholds", &ret);
+ if (thresh) {
+ if (ret != 16) {
+ pr_err("Invalid %s property '%s'\n",
+ node->full_name, "fsl,bpool-thresholds");
+ return -ENODEV;
+ }
+ }
+ if (thresh) {
+#ifdef CONFIG_FSL_BMAN_CONFIG
+ ret = bm_pool_set(be32_to_cpu(*bpid), thresh);
+ if (ret)
+ pr_err("No CCSR node for %s property '%s'\n",
+ node->full_name, "fsl,bpool-thresholds");
+ return ret;
+#else
+ pr_err("Ignoring %s property '%s', no CCSR support\n",
+ node->full_name, "fsl,bpool-thresholds");
+#endif
+ }
+ return 0;
+}
+
+static int __init fsl_bpid_range_init(struct device_node *node)
+{
+ int ret;
+ u32 *range = (u32 *)of_get_property(node, "fsl,bpid-range", &ret);
+ if (!range) {
+ pr_err("No 'fsl,bpid-range' property in node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err("'fsl,bpid-range' is not a 2-cell range in node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+ bman_seed_bpid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ pr_info("Bman: BPID allocator includes range %d:%d\n",
+ be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ return 0;
+}
+
+static struct bm_portal_config * __init parse_pcfg(struct device_node *node)
+{
+ struct bm_portal_config *pcfg;
+ const u32 *index;
+ int irq, ret;
+ resource_size_t len;
+
+ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg) {
+ pr_err("can't allocate portal config");
+ return NULL;
+ }
+
+ if (of_device_is_compatible(node, "fsl,bman-portal-1.0") ||
+ of_device_is_compatible(node, "fsl,bman-portal-1.0.0")) {
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ bman_portal_max = 10;
+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.0") ||
+ of_device_is_compatible(node, "fsl,bman-portal-2.0.8")) {
+ bman_ip_rev = BMAN_REV20;
+ bman_pool_max = 8;
+ bman_portal_max = 3;
+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.0")) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ bman_portal_max = 50;
+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.1")) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ bman_portal_max = 25;
+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.2")) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ bman_portal_max = 18;
+ } else if (of_device_is_compatible(node, "fsl,bman-portal-2.1.3")) {
+ bman_ip_rev = BMAN_REV21;
+ bman_pool_max = 64;
+ bman_portal_max = 10;
+ } else {
+ pr_warn("unknown BMan version in portal node,"
+ "default to rev1.0\n");
+ bman_ip_rev = BMAN_REV10;
+ bman_pool_max = 64;
+ bman_portal_max = 10;
+ }
+
+ ret = of_address_to_resource(node, DPA_PORTAL_CE,
+ &pcfg->addr_phys[DPA_PORTAL_CE]);
+ if (ret) {
+ pr_err("Can't get %s property 'reg::CE'\n", node->full_name);
+ goto err;
+ }
+ ret = of_address_to_resource(node, DPA_PORTAL_CI,
+ &pcfg->addr_phys[DPA_PORTAL_CI]);
+ if (ret) {
+ pr_err("Can't get %s property 'reg::CI'\n", node->full_name);
+ goto err;
+ }
+
+ index = of_get_property(node, "cell-index", &ret);
+ if (!index || (ret != 4)) {
+ pr_err("Can't get %s property '%s'\n", node->full_name,
+ "cell-index");
+ goto err;
+ }
+ if (be32_to_cpu(*index) >= bman_portal_max) {
+ pr_err("BMan portal cell index %d out of range, max %d\n",
+ be32_to_cpu(*index), bman_portal_max);
+ goto err;
+ }
+
+ pcfg->public_cfg.cpu = -1;
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq == 0) {
+ pr_err("Can't get %s property 'interrupts'\n", node->full_name);
+ goto err;
+ }
+ pcfg->public_cfg.irq = irq;
+ pcfg->public_cfg.index = be32_to_cpu(*index);
+ bman_depletion_fill(&pcfg->public_cfg.mask);
+
+ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
+ if (len != (unsigned long)len)
+ goto err;
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
+ pcfg->addr_phys[DPA_PORTAL_CE].start,
+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
+ pcfg->addr_phys[DPA_PORTAL_CI].start,
+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
+
+#else
+ pcfg->addr_virt[DPA_PORTAL_CE] =
+ memremap(pcfg->addr_phys[DPA_PORTAL_CE].start,
+ (unsigned long)len, MEMREMAP_WB);
+
+ pcfg->addr_virt[DPA_PORTAL_CI] =
+ ioremap(pcfg->addr_phys[DPA_PORTAL_CI].start,
+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
+
+#endif
+ /* disable bp depletion */
+ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(0));
+ __raw_writel(0x0, pcfg->addr_virt[DPA_PORTAL_CI] + BM_REG_SCN(1));
+ return pcfg;
+err:
+ kfree(pcfg);
+ return NULL;
+}
+
+static struct bm_portal_config *get_pcfg(struct list_head *list)
+{
+ struct bm_portal_config *pcfg;
+ if (list_empty(list))
+ return NULL;
+ pcfg = list_entry(list->prev, struct bm_portal_config, list);
+ list_del(&pcfg->list);
+ return pcfg;
+}
+
+static struct bm_portal_config *get_pcfg_idx(struct list_head *list,
+ uint32_t idx)
+{
+ struct bm_portal_config *pcfg;
+ if (list_empty(list))
+ return NULL;
+ list_for_each_entry(pcfg, list, list) {
+ if (pcfg->public_cfg.index == idx) {
+ list_del(&pcfg->list);
+ return pcfg;
+ }
+ }
+ return NULL;
+}
+
+struct bm_portal_config *bm_get_unused_portal(void)
+{
+ return bm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
+}
+
+struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx)
+{
+ struct bm_portal_config *ret;
+ spin_lock(&unused_pcfgs_lock);
+ if (idx == QBMAN_ANY_PORTAL_IDX)
+ ret = get_pcfg(&unused_pcfgs);
+ else
+ ret = get_pcfg_idx(&unused_pcfgs, idx);
+ spin_unlock(&unused_pcfgs_lock);
+ return ret;
+}
+
+void bm_put_unused_portal(struct bm_portal_config *pcfg)
+{
+ spin_lock(&unused_pcfgs_lock);
+ list_add(&pcfg->list, &unused_pcfgs);
+ spin_unlock(&unused_pcfgs_lock);
+}
+
+static struct bman_portal *init_pcfg(struct bm_portal_config *pcfg)
+{
+ struct bman_portal *p;
+ p = bman_create_affine_portal(pcfg);
+ if (p) {
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ bman_p_irqsource_add(p, BM_PIRQ_RCRI | BM_PIRQ_BSCN);
+#endif
+ pr_info("Bman portal %sinitialised, cpu %d\n",
+ pcfg->public_cfg.is_shared ? "(shared) " : "",
+ pcfg->public_cfg.cpu);
+ affine_bportals[pcfg->public_cfg.cpu] = p;
+ } else
+ pr_crit("Bman portal failure on cpu %d\n",
+ pcfg->public_cfg.cpu);
+ return p;
+}
+
+static void init_slave(int cpu)
+{
+ struct bman_portal *p;
+ p = bman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
+ if (!p)
+ pr_err("Bman slave portal failure on cpu %d\n", cpu);
+ else
+ pr_info("Bman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
+ if (shared_portals_idx >= num_shared_portals)
+ shared_portals_idx = 0;
+ affine_bportals[cpu] = p;
+}
+
+/* Bootarg "bportals=[...]" has the same syntax as "qportals=", and so the
+ * parsing is in dpa_sys.h. The syntax is a comma-separated list of indexes
+ * and/or ranges of indexes, with each being optionally prefixed by "s" to
+ * explicitly mark it or them for sharing.
+ * Eg;
+ * bportals=s0,1-3,s4
+ * means that cpus 1,2,3 get "unshared" portals, cpus 0 and 4 get "shared"
+ * portals, and any remaining cpus share the portals that are assigned to cpus 0
+ * or 4, selected in a round-robin fashion. (In this example, cpu 5 would share
+ * cpu 0's portal, cpu 6 would share cpu4's portal, and cpu 7 would share cpu
+ * 0's portal.) */
+static struct cpumask want_unshared __initdata; /* cpus requested without "s" */
+static struct cpumask want_shared __initdata; /* cpus requested with "s" */
+
+static int __init parse_bportals(char *str)
+{
+ return parse_portals_bootarg(str, &want_shared, &want_unshared,
+ "bportals");
+}
+__setup("bportals=", parse_bportals);
+
+static int bman_offline_cpu(unsigned int cpu)
+{
+ struct bman_portal *p;
+ const struct bm_portal_config *pcfg;
+ p = (struct bman_portal *)affine_bportals[cpu];
+ if (p) {
+ pcfg = bman_get_bm_portal_config(p);
+ if (pcfg)
+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+ }
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int bman_online_cpu(unsigned int cpu)
+{
+ struct bman_portal *p;
+ const struct bm_portal_config *pcfg;
+ p = (struct bman_portal *)affine_bportals[cpu];
+ if (p) {
+ pcfg = bman_get_bm_portal_config(p);
+ if (pcfg)
+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
+ }
+ return 0;
+}
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/* Initialise the Bman driver. The meat of this function deals with portals. The
+ * following describes the flow of portal-handling, the code "steps" refer to
+ * this description;
+ * 1. Portal configs are parsed from the device-tree into 'unused_pcfgs', with
+ * ::cpu==-1. Regions and interrupts are mapped (but interrupts are not
+ * bound).
+ * 2. The "want_shared" and "want_unshared" lists (as filled by the
+ * "bportals=[...]" bootarg) are processed, allocating portals and assigning
+ * them to cpus, placing them in the relevant list and setting ::cpu as
+ * appropriate. If no "bportals" bootarg was present, the defaut is to try to
+ * assign portals to all online cpus at the time of driver initialisation.
+ * Any failure to allocate portals (when parsing the "want" lists or when
+ * using default behaviour) will be silently tolerated (the "fixup" logic in
+ * step 3 will determine what happens in this case).
+ * 3. Do fixups relative to cpu_online_mask(). If no portals are marked for
+ * sharing and sharing is required (because not all cpus have been assigned
+ * portals), then one portal will marked for sharing. Conversely if no
+ * sharing is required, any portals marked for sharing will not be shared. It
+ * may be that sharing occurs when it wasn't expected, if portal allocation
+ * failed to honour all the requested assignments (including the default
+ * assignments if no bootarg is present).
+ * 4. Unshared portals are initialised on their respective cpus.
+ * 5. Shared portals are initialised on their respective cpus.
+ * 6. Each remaining cpu is initialised to slave to one of the shared portals,
+ * which are selected in a round-robin fashion.
+ * Any portal configs left unused are available for USDPAA allocation.
+ */
+__init int bman_init(void)
+{
+ struct cpumask slave_cpus;
+ struct cpumask unshared_cpus = *cpu_none_mask;
+ struct cpumask shared_cpus = *cpu_none_mask;
+ LIST_HEAD(unshared_pcfgs);
+ LIST_HEAD(shared_pcfgs);
+ struct device_node *dn;
+ struct bm_portal_config *pcfg;
+ struct bman_portal *p;
+ int cpu, ret;
+ struct cpumask offline_cpus;
+
+ /* Initialise the Bman (CCSR) device */
+ for_each_compatible_node(dn, NULL, "fsl,bman") {
+ if (!bman_init_ccsr(dn))
+ pr_info("Bman err interrupt handler present\n");
+ else
+ pr_err("Bman CCSR setup failed\n");
+ }
+ /* Initialise any declared buffer pools */
+ for_each_compatible_node(dn, NULL, "fsl,bpool") {
+ ret = fsl_bpool_init(dn);
+ if (ret)
+ return ret;
+ }
+ /* Step 1. See comments at the beginning of the file. */
+ for_each_compatible_node(dn, NULL, "fsl,bman-portal") {
+ if (!of_device_is_available(dn))
+ continue;
+ pcfg = parse_pcfg(dn);
+ if (pcfg)
+ list_add_tail(&pcfg->list, &unused_pcfgs);
+ }
+ /* Step 2. */
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, &want_shared)) {
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &shared_pcfgs);
+ cpumask_set_cpu(cpu, &shared_cpus);
+ }
+ if (cpumask_test_cpu(cpu, &want_unshared)) {
+ if (cpumask_test_cpu(cpu, &shared_cpus))
+ continue;
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &unshared_pcfgs);
+ cpumask_set_cpu(cpu, &unshared_cpus);
+ }
+ }
+ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
+ /* Default, give an unshared portal to each online cpu */
+ for_each_online_cpu(cpu) {
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &unshared_pcfgs);
+ cpumask_set_cpu(cpu, &unshared_cpus);
+ }
+ }
+ /* Step 3. */
+ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
+ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
+ if (cpumask_empty(&slave_cpus)) {
+ /* No sharing required */
+ if (!list_empty(&shared_pcfgs)) {
+ /* Migrate "shared" to "unshared" */
+ cpumask_or(&unshared_cpus, &unshared_cpus,
+ &shared_cpus);
+ cpumask_clear(&shared_cpus);
+ list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
+ INIT_LIST_HEAD(&shared_pcfgs);
+ }
+ } else {
+ /* Sharing required */
+ if (list_empty(&shared_pcfgs)) {
+ /* Migrate one "unshared" to "shared" */
+ pcfg = get_pcfg(&unshared_pcfgs);
+ if (!pcfg) {
+ pr_crit("No BMan portals available!\n");
+ return 0;
+ }
+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
+ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
+ list_add_tail(&pcfg->list, &shared_pcfgs);
+ }
+ }
+ /* Step 4. */
+ list_for_each_entry(pcfg, &unshared_pcfgs, list) {
+ pcfg->public_cfg.is_shared = 0;
+ p = init_pcfg(pcfg);
+ if (!p) {
+ pr_crit("Unable to initialize bman portal\n");
+ return 0;
+ }
+ }
+ /* Step 5. */
+ list_for_each_entry(pcfg, &shared_pcfgs, list) {
+ pcfg->public_cfg.is_shared = 1;
+ p = init_pcfg(pcfg);
+ if (p)
+ shared_portals[num_shared_portals++] = p;
+ }
+ /* Step 6. */
+ if (!cpumask_empty(&slave_cpus))
+ for_each_cpu(cpu, &slave_cpus)
+ init_slave(cpu);
+ pr_info("Bman portals initialised\n");
+ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
+ for_each_cpu(cpu, &offline_cpus)
+ bman_offline_cpu(cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qbman_portal:online",
+ bman_online_cpu, bman_offline_cpu);
+ if (ret < 0) {
+ pr_err("bman: failed to register hotplug callbacks.\n");
+ return 0;
+ }
+#endif
+ return 0;
+}
+
+__init int bman_resource_init(void)
+{
+ struct device_node *dn;
+ int ret;
+
+ /* Initialise BPID allocation ranges */
+ for_each_compatible_node(dn, NULL, "fsl,bpid-range") {
+ ret = fsl_bpid_range_init(dn);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+void suspend_unused_bportal(void)
+{
+ struct bm_portal_config *pcfg;
+
+ if (list_empty(&unused_pcfgs))
+ return;
+
+ list_for_each_entry(pcfg, &unused_pcfgs, list) {
+#ifdef CONFIG_PM_DEBUG
+ pr_info("Need to save bportal %d\n", pcfg->public_cfg.index);
+#endif
+ /* save isdr, disable all via isdr, clear isr */
+ pcfg->saved_isdr =
+ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
+ 0xe08);
+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
+ 0xe00);
+ }
+ return;
+}
+
+void resume_unused_bportal(void)
+{
+ struct bm_portal_config *pcfg;
+
+ if (list_empty(&unused_pcfgs))
+ return;
+
+ list_for_each_entry(pcfg, &unused_pcfgs, list) {
+#ifdef CONFIG_PM_DEBUG
+ pr_info("Need to resume bportal %d\n", pcfg->public_cfg.index);
+#endif
+ /* restore isdr */
+ __raw_writel(pcfg->saved_isdr,
+ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
+ }
+ return;
+}
+#endif
diff --git a/drivers/staging/fsl_qbman/bman_high.c b/drivers/staging/fsl_qbman/bman_high.c
new file mode 100644
index 000000000000..c066602d7b3e
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_high.c
@@ -0,0 +1,1145 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_low.h"
+
+/* Compilation constants */
+#define RCR_THRESH 2 /* reread h/w CI when running out of space */
+#define IRQNAME "BMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
+
+struct bman_portal {
+ struct bm_portal p;
+ /* 2-element array. pools[0] is mask, pools[1] is snapshot. */
+ struct bman_depletion *pools;
+ int thresh_set;
+ unsigned long irq_sources;
+ u32 slowpoll; /* only used when interrupts are off */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ struct bman_pool *rcri_owned; /* only 1 release WAIT_SYNC at a time */
+#endif
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ raw_spinlock_t sharing_lock; /* only used if is_shared */
+ int is_shared;
+ struct bman_portal *sharing_redirect;
+#endif
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct bm_portal_config *config;
+ /* This is needed for power management */
+ struct platform_device *pdev;
+ /* 64-entry hash-table of pool objects that are tracking depletion
+ * entry/exit (ie. BMAN_POOL_FLAG_DEPLETION). This isn't fast-path, so
+ * we're not fussy about cache-misses and so forth - whereas the above
+ * members should all fit in one cacheline.
+ * BTW, with 64 entries in the hash table and 64 buffer pools to track,
+ * you'll never guess the hash-function ... */
+ struct bman_pool *cb[64];
+ char irqname[MAX_IRQNAME];
+ /* Track if the portal was alloced by the driver */
+ u8 alloced;
+ /* power management data */
+ u32 save_isdr;
+};
+
+/* For an explanation of the locking, redirection, or affine-portal logic,
+ * please consult the Qman driver for details. This is the same, only simpler
+ * (no fiddly Qman-specific bits.) */
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
+ else \
+ local_irq_save(irqflags); \
+ } while (0)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
+ irqflags); \
+ else \
+ local_irq_restore(irqflags); \
+ } while (0)
+#else
+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
+#endif
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
+static inline struct bman_portal *get_raw_affine_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+static inline struct bman_portal *get_affine_portal(void)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ if (p->sharing_redirect)
+ return p->sharing_redirect;
+ return p;
+}
+#else
+#define get_affine_portal() get_raw_affine_portal()
+#endif
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(bman_affine_portal);
+}
+static inline struct bman_portal *get_poll_portal(void)
+{
+ return &get_cpu_var(bman_affine_portal);
+}
+#define put_poll_portal()
+
+/* GOTCHA: this object type refers to a pool, it isn't *the* pool. There may be
+ * more than one such object per Bman buffer pool, eg. if different users of the
+ * pool are operating via different portals. */
+struct bman_pool {
+ struct bman_pool_params params;
+ /* Used for hash-table admin when using depletion notifications. */
+ struct bman_portal *portal;
+ struct bman_pool *next;
+ /* stockpile state - NULL unless BMAN_POOL_FLAG_STOCKPILE is set */
+ struct bm_buffer *sp;
+ unsigned int sp_fill;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_t in_use;
+#endif
+};
+
+/* (De)Registration of depletion notification callbacks */
+static void depletion_link(struct bman_portal *portal, struct bman_pool *pool)
+{
+ __maybe_unused unsigned long irqflags;
+ pool->portal = portal;
+ PORTAL_IRQ_LOCK(portal, irqflags);
+ pool->next = portal->cb[pool->params.bpid];
+ portal->cb[pool->params.bpid] = pool;
+ if (!pool->next)
+ /* First object for that bpid on this portal, enable the BSCN
+ * mask bit. */
+ bm_isr_bscn_mask(&portal->p, pool->params.bpid, 1);
+ PORTAL_IRQ_UNLOCK(portal, irqflags);
+}
+static void depletion_unlink(struct bman_pool *pool)
+{
+ struct bman_pool *it, *last = NULL;
+ struct bman_pool **base = &pool->portal->cb[pool->params.bpid];
+ __maybe_unused unsigned long irqflags;
+ PORTAL_IRQ_LOCK(pool->portal, irqflags);
+ it = *base; /* <-- gotcha, don't do this prior to the irq_save */
+ while (it != pool) {
+ last = it;
+ it = it->next;
+ }
+ if (!last)
+ *base = pool->next;
+ else
+ last->next = pool->next;
+ if (!last && !pool->next) {
+ /* Last object for that bpid on this portal, disable the BSCN
+ * mask bit. */
+ bm_isr_bscn_mask(&pool->portal->p, pool->params.bpid, 0);
+ /* And "forget" that we last saw this pool as depleted */
+ bman_depletion_unset(&pool->portal->pools[1],
+ pool->params.bpid);
+ }
+ PORTAL_IRQ_UNLOCK(pool->portal, irqflags);
+}
+
+/* In the case that the application's core loop calls qman_poll() and
+ * bman_poll(), we ought to balance how often we incur the overheads of the
+ * slow-path poll. We'll use two decrementer sources. The idle decrementer
+ * constant is used when the last slow-poll detected no work to do, and the busy
+ * decrementer constant when the last slow-poll had work to do. */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+ struct bman_portal *p = ptr;
+ u32 clear = p->irq_sources;
+ u32 is = bm_isr_status_read(&p->p) & p->irq_sources;
+ clear |= __poll_portal_slow(p, is);
+ bm_isr_status_clear(&p->p, clear);
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_SUSPEND
+static int _bman_portal_suspend_noirq(struct device *dev)
+{
+ struct bman_portal *p = (struct bman_portal *)dev->platform_data;
+#ifdef CONFIG_PM_DEBUG
+ struct platform_device *pdev = to_platform_device(dev);
+#endif
+ p->save_isdr = bm_isr_disable_read(&p->p);
+ bm_isr_disable_write(&p->p, 0xffffffff);
+ bm_isr_status_clear(&p->p, 0xffffffff);
+#ifdef CONFIG_PM_DEBUG
+ pr_info("Suspend for %s\n", pdev->name);
+#endif
+ return 0;
+}
+
+static int _bman_portal_resume_noirq(struct device *dev)
+{
+ struct bman_portal *p = (struct bman_portal *)dev->platform_data;
+
+ /* restore isdr */
+ bm_isr_disable_write(&p->p, p->save_isdr);
+ return 0;
+}
+#else
+#define _bman_portal_suspend_noirq NULL
+#define _bman_portal_resume_noirq NULL
+#endif
+
+struct dev_pm_domain bman_portal_device_pm_domain = {
+ .ops = {
+ USE_PLATFORM_PM_SLEEP_OPS
+ .suspend_noirq = _bman_portal_suspend_noirq,
+ .resume_noirq = _bman_portal_resume_noirq,
+ }
+};
+
+struct bman_portal *bman_create_portal(
+ struct bman_portal *portal,
+ const struct bm_portal_config *config)
+{
+ struct bm_portal *__p;
+ const struct bman_depletion *pools = &config->public_cfg.mask;
+ int ret;
+ u8 bpid = 0;
+ char buf[16];
+
+ if (!portal) {
+ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+ if (!portal)
+ return portal;
+ portal->alloced = 1;
+ } else
+ portal->alloced = 0;
+
+ __p = &portal->p;
+
+ /* prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference... */
+ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+ if (bm_rcr_init(__p, bm_rcr_pvb, bm_rcr_cce)) {
+ pr_err("Bman RCR initialisation failed\n");
+ goto fail_rcr;
+ }
+ if (bm_mc_init(__p)) {
+ pr_err("Bman MC initialisation failed\n");
+ goto fail_mc;
+ }
+ if (bm_isr_init(__p)) {
+ pr_err("Bman ISR initialisation failed\n");
+ goto fail_isr;
+ }
+ portal->pools = kmalloc(2 * sizeof(*pools), GFP_KERNEL);
+ if (!portal->pools)
+ goto fail_pools;
+ portal->pools[0] = *pools;
+ bman_depletion_init(portal->pools + 1);
+ while (bpid < bman_pool_max) {
+ /* Default to all BPIDs disabled, we enable as required at
+ * run-time. */
+ bm_isr_bscn_mask(__p, bpid, 0);
+ bpid++;
+ }
+ portal->slowpoll = 0;
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ portal->rcri_owned = NULL;
+#endif
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ raw_spin_lock_init(&portal->sharing_lock);
+ portal->is_shared = config->public_cfg.is_shared;
+ portal->sharing_redirect = NULL;
+#endif
+ sprintf(buf, "bportal-%u", config->public_cfg.index);
+ portal->pdev = platform_device_alloc(buf, -1);
+ if (!portal->pdev)
+ goto fail_devalloc;
+ portal->pdev->dev.pm_domain = &bman_portal_device_pm_domain;
+ portal->pdev->dev.platform_data = portal;
+ ret = platform_device_add(portal->pdev);
+ if (ret)
+ goto fail_devadd;
+ memset(&portal->cb, 0, sizeof(portal->cb));
+ /* Write-to-clear any stale interrupt status bits */
+ bm_isr_disable_write(__p, 0xffffffff);
+ portal->irq_sources = 0;
+ bm_isr_enable_write(__p, portal->irq_sources);
+ bm_isr_status_clear(__p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
+ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+ if ((config->public_cfg.cpu != -1) &&
+ irq_can_set_affinity(config->public_cfg.irq) &&
+ irq_set_affinity(config->public_cfg.irq,
+ cpumask_of(config->public_cfg.cpu))) {
+ pr_err("irq_set_affinity() failed %s\n", portal->irqname);
+ goto fail_affinity;
+ }
+
+ /* Need RCR to be empty before continuing */
+ ret = bm_rcr_get_fill(__p);
+ if (ret) {
+ pr_err("Bman RCR unclean\n");
+ goto fail_rcr_empty;
+ }
+ /* Success */
+ portal->config = config;
+
+ bm_isr_disable_write(__p, 0);
+ bm_isr_uninhibit(__p);
+ return portal;
+fail_rcr_empty:
+fail_affinity:
+ free_irq(config->public_cfg.irq, portal);
+fail_irq:
+ platform_device_del(portal->pdev);
+fail_devadd:
+ platform_device_put(portal->pdev);
+fail_devalloc:
+ kfree(portal->pools);
+fail_pools:
+ bm_isr_finish(__p);
+fail_isr:
+ bm_mc_finish(__p);
+fail_mc:
+ bm_rcr_finish(__p);
+fail_rcr:
+ if (portal->alloced)
+ kfree(portal);
+ return NULL;
+}
+
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config)
+{
+ struct bman_portal *portal;
+
+ portal = &per_cpu(bman_affine_portal, config->public_cfg.cpu);
+ portal = bman_create_portal(portal, config);
+ if (portal) {
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ }
+ return portal;
+}
+
+
+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
+ int cpu)
+{
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ struct bman_portal *p;
+ p = &per_cpu(bman_affine_portal, cpu);
+ BUG_ON(p->config);
+ BUG_ON(p->is_shared);
+ BUG_ON(!redirect->config->public_cfg.is_shared);
+ p->irq_sources = 0;
+ p->sharing_redirect = redirect;
+ return p;
+#else
+ BUG();
+ return NULL;
+#endif
+}
+
+void bman_destroy_portal(struct bman_portal *bm)
+{
+ const struct bm_portal_config *pcfg;
+ pcfg = bm->config;
+ bm_rcr_cce_update(&bm->p);
+ bm_rcr_cce_update(&bm->p);
+
+ free_irq(pcfg->public_cfg.irq, bm);
+
+ kfree(bm->pools);
+ bm_isr_finish(&bm->p);
+ bm_mc_finish(&bm->p);
+ bm_rcr_finish(&bm->p);
+ bm->config = NULL;
+ if (bm->alloced)
+ kfree(bm);
+}
+
+const struct bm_portal_config *bman_destroy_affine_portal(void)
+{
+ struct bman_portal *bm = get_raw_affine_portal();
+ const struct bm_portal_config *pcfg;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (bm->sharing_redirect) {
+ bm->sharing_redirect = NULL;
+ put_affine_portal();
+ return NULL;
+ }
+ bm->is_shared = 0;
+#endif
+ pcfg = bm->config;
+ bman_destroy_portal(bm);
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ put_affine_portal();
+ return pcfg;
+}
+
+/* When release logic waits on available RCR space, we need a global waitqueue
+ * in the case of "affine" use (as the waits wake on different cpus which means
+ * different portals - so we can't wait on any per-portal waitqueue). */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static u32 __poll_portal_slow(struct bman_portal *p, u32 is)
+{
+ struct bman_depletion tmp;
+ u32 ret = is;
+
+ /* There is a gotcha to be aware of. If we do the query before clearing
+ * the status register, we may miss state changes that occur between the
+ * two. If we write to clear the status register before the query, the
+ * cache-enabled query command may overtake the status register write
+ * unless we use a heavyweight sync (which we don't want). Instead, we
+ * write-to-clear the status register then *read it back* before doing
+ * the query, hence the odd while loop with the 'is' accumulation. */
+ if (is & BM_PIRQ_BSCN) {
+ struct bm_mc_result *mcr;
+ __maybe_unused unsigned long irqflags;
+ unsigned int i, j;
+ u32 __is;
+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+ while ((__is = bm_isr_status_read(&p->p)) & BM_PIRQ_BSCN) {
+ is |= __is;
+ bm_isr_status_clear(&p->p, BM_PIRQ_BSCN);
+ }
+ is &= ~BM_PIRQ_BSCN;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bm_mc_start(&p->p);
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ tmp = mcr->query.ds.state;
+ tmp.__state[0] = be32_to_cpu(tmp.__state[0]);
+ tmp.__state[1] = be32_to_cpu(tmp.__state[1]);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ for (i = 0; i < 2; i++) {
+ int idx = i * 32;
+ /* tmp is a mask of currently-depleted pools.
+ * pools[0] is mask of those we care about.
+ * pools[1] is our previous view (we only want to
+ * be told about changes). */
+ tmp.__state[i] &= p->pools[0].__state[i];
+ if (tmp.__state[i] == p->pools[1].__state[i])
+ /* fast-path, nothing to see, move along */
+ continue;
+ for (j = 0; j <= 31; j++, idx++) {
+ struct bman_pool *pool = p->cb[idx];
+ int b4 = bman_depletion_get(&p->pools[1], idx);
+ int af = bman_depletion_get(&tmp, idx);
+ if (b4 == af)
+ continue;
+ while (pool) {
+ pool->params.cb(p, pool,
+ pool->params.cb_ctx, af);
+ pool = pool->next;
+ }
+ }
+ }
+ p->pools[1] = tmp;
+ }
+
+ if (is & BM_PIRQ_RCRI) {
+ __maybe_unused unsigned long irqflags;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bm_rcr_cce_update(&p->p);
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ /* If waiting for sync, we only cancel the interrupt threshold
+ * when the ring utilisation hits zero. */
+ if (p->rcri_owned) {
+ if (!bm_rcr_get_fill(&p->p)) {
+ p->rcri_owned = NULL;
+ bm_rcr_set_ithresh(&p->p, 0);
+ }
+ } else
+#endif
+ bm_rcr_set_ithresh(&p->p, 0);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ wake_up(&affine_queue);
+ bm_isr_status_clear(&p->p, BM_PIRQ_RCRI);
+ is &= ~BM_PIRQ_RCRI;
+ }
+
+ /* There should be no status register bits left undefined */
+ DPA_ASSERT(!is);
+ return ret;
+}
+
+const struct bman_portal_config *bman_get_portal_config(void)
+{
+ struct bman_portal *p = get_affine_portal();
+ const struct bman_portal_config *ret = &p->config->public_cfg;
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_get_portal_config);
+
+u32 bman_irqsource_get(void)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ u32 ret = p->irq_sources & BM_PIRQ_VISIBLE;
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_get);
+
+int bman_p_irqsource_add(struct bman_portal *p, __maybe_unused u32 bits)
+{
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (p->sharing_redirect)
+ return -EINVAL;
+ else
+#endif
+ {
+ __maybe_unused unsigned long irqflags;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
+ bm_isr_enable_write(&p->p, p->irq_sources);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(bman_p_irqsource_add);
+
+int bman_irqsource_add(__maybe_unused u32 bits)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ int ret = 0;
+ ret = bman_p_irqsource_add(p, bits);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_irqsource_add);
+
+int bman_irqsource_remove(u32 bits)
+{
+ struct bman_portal *p = get_raw_affine_portal();
+ __maybe_unused unsigned long irqflags;
+ u32 ier;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (p->sharing_redirect) {
+ put_affine_portal();
+ return -EINVAL;
+ }
+#endif
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert. */
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bits &= BM_PIRQ_VISIBLE;
+ clear_bits(bits, &p->irq_sources);
+ bm_isr_enable_write(&p->p, p->irq_sources);
+ ier = bm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering. */
+ bm_isr_status_clear(&p->p, ~ier);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_irqsource_remove);
+
+const cpumask_t *bman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(bman_affine_cpus);
+
+u32 bman_poll_slow(void)
+{
+ struct bman_portal *p = get_poll_portal();
+ u32 ret;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (unlikely(p->sharing_redirect))
+ ret = (u32)-1;
+ else
+#endif
+ {
+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+ ret = __poll_portal_slow(p, is);
+ bm_isr_status_clear(&p->p, ret);
+ }
+ put_poll_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_poll_slow);
+
+/* Legacy wrapper */
+void bman_poll(void)
+{
+ struct bman_portal *p = get_poll_portal();
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (unlikely(p->sharing_redirect))
+ goto done;
+#endif
+ if (!(p->slowpoll--)) {
+ u32 is = bm_isr_status_read(&p->p) & ~p->irq_sources;
+ u32 active = __poll_portal_slow(p, is);
+ if (active)
+ p->slowpoll = SLOW_POLL_BUSY;
+ else
+ p->slowpoll = SLOW_POLL_IDLE;
+ }
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+done:
+#endif
+ put_poll_portal();
+}
+EXPORT_SYMBOL(bman_poll);
+
+static const u32 zero_thresholds[4] = {0, 0, 0, 0};
+
+struct bman_pool *bman_new_pool(const struct bman_pool_params *params)
+{
+ struct bman_pool *pool = NULL;
+ u32 bpid;
+
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID) {
+ int ret = bman_alloc_bpid(&bpid);
+ if (ret)
+ return NULL;
+ } else {
+ if (params->bpid >= bman_pool_max)
+ return NULL;
+ bpid = params->bpid;
+ }
+#ifdef CONFIG_FSL_BMAN_CONFIG
+ if (params->flags & BMAN_POOL_FLAG_THRESH) {
+ int ret = bm_pool_set(bpid, params->thresholds);
+ if (ret)
+ goto err;
+ }
+#else
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ goto err;
+#endif
+ pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto err;
+ pool->sp = NULL;
+ pool->sp_fill = 0;
+ pool->params = *params;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_set(&pool->in_use, 1);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ pool->params.bpid = bpid;
+ if (params->flags & BMAN_POOL_FLAG_STOCKPILE) {
+ pool->sp = kmalloc(sizeof(struct bm_buffer) * BMAN_STOCKPILE_SZ,
+ GFP_KERNEL);
+ if (!pool->sp)
+ goto err;
+ }
+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION) {
+ struct bman_portal *p = get_affine_portal();
+ if (!p->pools || !bman_depletion_get(&p->pools[0], bpid)) {
+ pr_err("Depletion events disabled for bpid %d\n", bpid);
+ goto err;
+ }
+ depletion_link(p, pool);
+ put_affine_portal();
+ }
+ return pool;
+err:
+#ifdef CONFIG_FSL_BMAN_CONFIG
+ if (params->flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(bpid, zero_thresholds);
+#endif
+ if (params->flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(bpid);
+ if (pool) {
+ kfree(pool->sp);
+ kfree(pool);
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(bman_new_pool);
+
+void bman_free_pool(struct bman_pool *pool)
+{
+#ifdef CONFIG_FSL_BMAN_CONFIG
+ if (pool->params.flags & BMAN_POOL_FLAG_THRESH)
+ bm_pool_set(pool->params.bpid, zero_thresholds);
+#endif
+ if (pool->params.flags & BMAN_POOL_FLAG_DEPLETION)
+ depletion_unlink(pool);
+ if (pool->params.flags & BMAN_POOL_FLAG_STOCKPILE) {
+ if (pool->sp_fill)
+ pr_err("Stockpile not flushed, has %u in bpid %u.\n",
+ pool->sp_fill, pool->params.bpid);
+ kfree(pool->sp);
+ pool->sp = NULL;
+ pool->params.flags ^= BMAN_POOL_FLAG_STOCKPILE;
+ }
+ if (pool->params.flags & BMAN_POOL_FLAG_DYNAMIC_BPID)
+ bman_release_bpid(pool->params.bpid);
+ kfree(pool);
+}
+EXPORT_SYMBOL(bman_free_pool);
+
+const struct bman_pool_params *bman_get_params(const struct bman_pool *pool)
+{
+ return &pool->params;
+}
+EXPORT_SYMBOL(bman_get_params);
+
+static noinline void update_rcr_ci(struct bman_portal *p, u8 avail)
+{
+ if (avail)
+ bm_rcr_cce_prefetch(&p->p);
+ else
+ bm_rcr_cce_update(&p->p);
+}
+
+int bman_rcr_is_empty(void)
+{
+ __maybe_unused unsigned long irqflags;
+ struct bman_portal *p = get_affine_portal();
+ u8 avail;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ update_rcr_ci(p, 0);
+ avail = bm_rcr_get_fill(&p->p);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return avail == 0;
+}
+EXPORT_SYMBOL(bman_rcr_is_empty);
+
+static inline struct bm_rcr_entry *try_rel_start(struct bman_portal **p,
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ __maybe_unused struct bman_pool *pool,
+#endif
+ __maybe_unused unsigned long *irqflags,
+ __maybe_unused u32 flags)
+{
+ struct bm_rcr_entry *r;
+ u8 avail;
+
+ *p = get_affine_portal();
+ PORTAL_IRQ_LOCK(*p, (*irqflags));
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+ if ((*p)->rcri_owned) {
+ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+ put_affine_portal();
+ return NULL;
+ }
+ (*p)->rcri_owned = pool;
+ }
+#endif
+ avail = bm_rcr_get_avail(&(*p)->p);
+ if (avail < 2)
+ update_rcr_ci(*p, avail);
+ r = bm_rcr_start(&(*p)->p);
+ if (unlikely(!r)) {
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC)))
+ (*p)->rcri_owned = NULL;
+#endif
+ PORTAL_IRQ_UNLOCK(*p, (*irqflags));
+ put_affine_portal();
+ }
+ return r;
+}
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+static noinline struct bm_rcr_entry *__wait_rel_start(struct bman_portal **p,
+ struct bman_pool *pool,
+ __maybe_unused unsigned long *irqflags,
+ u32 flags)
+{
+ struct bm_rcr_entry *rcr = try_rel_start(p, pool, irqflags, flags);
+ if (!rcr)
+ bm_rcr_set_ithresh(&(*p)->p, 1);
+ return rcr;
+}
+
+static noinline struct bm_rcr_entry *wait_rel_start(struct bman_portal **p,
+ struct bman_pool *pool,
+ __maybe_unused unsigned long *irqflags,
+ u32 flags)
+{
+ struct bm_rcr_entry *rcr;
+#ifndef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ pool = NULL;
+#endif
+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+ /* NB: return NULL if signal occurs before completion. Signal
+ * can occur during return. Caller must check for signal */
+ wait_event_interruptible(affine_queue,
+ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
+ else
+ wait_event(affine_queue,
+ (rcr = __wait_rel_start(p, pool, irqflags, flags)));
+ return rcr;
+}
+#endif
+
+static inline int __bman_release(struct bman_pool *pool,
+ const struct bm_buffer *bufs, u8 num, u32 flags)
+{
+ struct bman_portal *p;
+ struct bm_rcr_entry *r;
+ __maybe_unused unsigned long irqflags;
+ u32 i = num - 1;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & BMAN_RELEASE_FLAG_WAIT)
+ r = wait_rel_start(&p, pool, &irqflags, flags);
+ else
+ r = try_rel_start(&p, pool, &irqflags, flags);
+#else
+ r = try_rel_start(&p, &irqflags, flags);
+#endif
+ if (!r)
+ return -EBUSY;
+ /* We can copy all but the first entry, as this can trigger badness
+ * with the valid-bit. Use the overlay to mask the verb byte. */
+ r->bufs[0].opaque =
+ ((cpu_to_be64((bufs[0].opaque |
+ ((u64)pool->params.bpid<<48))
+ & 0x00ffffffffffffff)));
+ if (i) {
+ for (i = 1; i < num; i++)
+ r->bufs[i].opaque =
+ cpu_to_be64(bufs[i].opaque);
+ }
+
+ bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
+ (num & BM_RCR_VERB_BUFCOUNT_MASK));
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ /* if we wish to sync we need to set the threshold after h/w sees the
+ * new ring entry. As we're mixing cache-enabled and cache-inhibited
+ * accesses, this requires a heavy-weight sync. */
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+ hwsync();
+ bm_rcr_set_ithresh(&p->p, 1);
+ }
+#endif
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & BMAN_RELEASE_FLAG_WAIT) &&
+ (flags & BMAN_RELEASE_FLAG_WAIT_SYNC))) {
+ if (flags & BMAN_RELEASE_FLAG_WAIT_INT)
+ /* NB: return success even if signal occurs before
+ * condition is true. pvb_commit guarantees success */
+ wait_event_interruptible(affine_queue,
+ (p->rcri_owned != pool));
+ else
+ wait_event(affine_queue, (p->rcri_owned != pool));
+ }
+#endif
+ return 0;
+}
+
+int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num,
+ u32 flags)
+{
+ int ret;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_NO_RELEASE)
+ return -EINVAL;
+#endif
+ /* Without stockpile, this API is a pass-through to the h/w operation */
+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+ return __bman_release(pool, bufs, num, flags);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!atomic_dec_and_test(&pool->in_use)) {
+ pr_crit("Parallel attempts to enter bman_released() detected.");
+ panic("only one instance of bman_released/acquired allowed");
+ }
+#endif
+ /* Two movements of buffers are possible, and can occur in either order.
+ * A: moving buffers from the caller to the stockpile.
+ * B: moving buffers from the stockpile to hardware.
+ * Order 1: if there is already enough space in the stockpile for A
+ * then we want to do A first, and only do B if we trigger the
+ * stockpile-high threshold.
+ * Order 2: if there is not enough space in the stockpile for A, then
+ * we want to do B first, then do A if B had succeeded. However in this
+ * case B is dependent on how many buffers the user needs to release,
+ * not the stockpile-high threshold.
+ * Due to the different handling of B between the two cases, putting A
+ * and B in a while() loop would require quite obscure logic, so handle
+ * the different sequences explicitly. */
+ if ((pool->sp_fill + num) <= BMAN_STOCKPILE_SZ) {
+ /* Order 1: do A */
+ copy_words(pool->sp + pool->sp_fill, bufs,
+ sizeof(struct bm_buffer) * num);
+ pool->sp_fill += num;
+ /* do B relative to STOCKPILE_HIGH */
+ while (pool->sp_fill >= BMAN_STOCKPILE_HIGH) {
+ ret = __bman_release(pool,
+ pool->sp + (pool->sp_fill - 8), 8,
+ flags);
+ if (ret >= 0)
+ pool->sp_fill -= 8;
+ }
+ } else {
+ /* Order 2: do B relative to 'num' */
+ do {
+ ret = __bman_release(pool,
+ pool->sp + (pool->sp_fill - 8), 8,
+ flags);
+ if (ret < 0)
+ /* failure */
+ goto release_done;
+ pool->sp_fill -= 8;
+ } while ((pool->sp_fill + num) > BMAN_STOCKPILE_SZ);
+ /* do A */
+ copy_words(pool->sp + pool->sp_fill, bufs,
+ sizeof(struct bm_buffer) * num);
+ pool->sp_fill += num;
+ }
+ /* success */
+ ret = 0;
+release_done:
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_inc(&pool->in_use);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(bman_release);
+
+static inline int __bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs,
+ u8 num)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_command *mcc;
+ struct bm_mc_result *mcr;
+ __maybe_unused unsigned long irqflags;
+ int ret, i;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = bm_mc_start(&p->p);
+ mcc->acquire.bpid = pool->params.bpid;
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
+ (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
+ if (bufs) {
+ for (i = 0; i < num; i++)
+ bufs[i].opaque =
+ be64_to_cpu(mcr->acquire.bufs[i].opaque);
+ }
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (ret != num)
+ ret = -ENOMEM;
+ return ret;
+}
+
+int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num,
+ u32 flags)
+{
+ int ret;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!num || (num > 8))
+ return -EINVAL;
+ if (pool->params.flags & BMAN_POOL_FLAG_ONLY_RELEASE)
+ return -EINVAL;
+#endif
+ /* Without stockpile, this API is a pass-through to the h/w operation */
+ if (!(pool->params.flags & BMAN_POOL_FLAG_STOCKPILE))
+ return __bman_acquire(pool, bufs, num);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (!atomic_dec_and_test(&pool->in_use)) {
+ pr_crit("Parallel attempts to enter bman_acquire() detected.");
+ panic("only one instance of bman_released/acquired allowed");
+ }
+#endif
+ /* Two movements of buffers are possible, and can occur in either order.
+ * A: moving buffers from stockpile to the caller.
+ * B: moving buffers from hardware to the stockpile.
+ * Order 1: if there are already enough buffers in the stockpile for A
+ * then we want to do A first, and only do B if we trigger the
+ * stockpile-low threshold.
+ * Order 2: if there are not enough buffers in the stockpile for A,
+ * then we want to do B first, then do A if B had succeeded. However in
+ * this case B is dependent on how many buffers the user needs, not the
+ * stockpile-low threshold.
+ * Due to the different handling of B between the two cases, putting A
+ * and B in a while() loop would require quite obscure logic, so handle
+ * the different sequences explicitly. */
+ if (num <= pool->sp_fill) {
+ /* Order 1: do A */
+ copy_words(bufs, pool->sp + (pool->sp_fill - num),
+ sizeof(struct bm_buffer) * num);
+ pool->sp_fill -= num;
+ /* do B relative to STOCKPILE_LOW */
+ while (pool->sp_fill <= BMAN_STOCKPILE_LOW) {
+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
+ if (ret < 0)
+ ret = __bman_acquire(pool,
+ pool->sp + pool->sp_fill, 1);
+ if (ret < 0)
+ break;
+ pool->sp_fill += ret;
+ }
+ } else {
+ /* Order 2: do B relative to 'num' */
+ do {
+ ret = __bman_acquire(pool, pool->sp + pool->sp_fill, 8);
+ if (ret < 0)
+ ret = __bman_acquire(pool,
+ pool->sp + pool->sp_fill, 1);
+ if (ret < 0)
+ /* failure */
+ goto acquire_done;
+ pool->sp_fill += ret;
+ } while (pool->sp_fill < num);
+ /* do A */
+ copy_words(bufs, pool->sp + (pool->sp_fill - num),
+ sizeof(struct bm_buffer) * num);
+ pool->sp_fill -= num;
+ }
+ /* success */
+ ret = num;
+acquire_done:
+#ifdef CONFIG_FSL_DPA_CHECKING
+ atomic_inc(&pool->in_use);
+#endif
+ return ret;
+}
+EXPORT_SYMBOL(bman_acquire);
+
+int bman_flush_stockpile(struct bman_pool *pool, u32 flags)
+{
+ u8 num;
+ int ret;
+
+ while (pool->sp_fill) {
+ num = ((pool->sp_fill > 8) ? 8 : pool->sp_fill);
+ ret = __bman_release(pool, pool->sp + (pool->sp_fill - num),
+ num, flags);
+ if (ret)
+ return ret;
+ pool->sp_fill -= num;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(bman_flush_stockpile);
+
+int bman_query_pools(struct bm_pool_state *state)
+{
+ struct bman_portal *p = get_affine_portal();
+ struct bm_mc_result *mcr;
+ __maybe_unused unsigned long irqflags;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bm_mc_start(&p->p);
+ bm_mc_commit(&p->p, BM_MCC_VERB_CMD_QUERY);
+ while (!(mcr = bm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & BM_MCR_VERB_CMD_MASK) == BM_MCR_VERB_CMD_QUERY);
+ *state = mcr->query;
+ state->as.state.__state[0] = be32_to_cpu(state->as.state.__state[0]);
+ state->as.state.__state[1] = be32_to_cpu(state->as.state.__state[1]);
+ state->ds.state.__state[0] = be32_to_cpu(state->ds.state.__state[0]);
+ state->ds.state.__state[1] = be32_to_cpu(state->ds.state.__state[1]);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(bman_query_pools);
+
+#ifdef CONFIG_FSL_BMAN_CONFIG
+u32 bman_query_free_buffers(struct bman_pool *pool)
+{
+ return bm_pool_free_buffers(pool->params.bpid);
+}
+EXPORT_SYMBOL(bman_query_free_buffers);
+
+int bman_update_pool_thresholds(struct bman_pool *pool, const u32 *thresholds)
+{
+ u32 bpid;
+
+ bpid = bman_get_params(pool)->bpid;
+
+ return bm_pool_set(bpid, thresholds);
+}
+EXPORT_SYMBOL(bman_update_pool_thresholds);
+#endif
+
+int bman_shutdown_pool(u32 bpid)
+{
+ struct bman_portal *p = get_affine_portal();
+ __maybe_unused unsigned long irqflags;
+ int ret;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ ret = bm_shutdown_pool(&p->p, bpid);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(bman_shutdown_pool);
+
+const struct bm_portal_config *bman_get_bm_portal_config(
+ struct bman_portal *portal)
+{
+ return portal->sharing_redirect ? NULL : portal->config;
+}
diff --git a/drivers/staging/fsl_qbman/bman_low.h b/drivers/staging/fsl_qbman/bman_low.h
new file mode 100644
index 000000000000..3da7057117c7
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_low.h
@@ -0,0 +1,565 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_private.h"
+
+/***************************/
+/* Portal register assists */
+/***************************/
+
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x0000
+#define BM_REG_RCR_CI_CINH 0x0004
+#define BM_REG_RCR_ITR 0x0008
+#define BM_REG_CFG 0x0100
+#define BM_REG_SCN(n) (0x0200 + ((n) << 2))
+#define BM_REG_ISR 0x0e00
+#define BM_REG_IIR 0x0e0c
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+#endif
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+
+/* Cache-inhibited register offsets */
+#define BM_REG_RCR_PI_CINH 0x3000
+#define BM_REG_RCR_CI_CINH 0x3100
+#define BM_REG_RCR_ITR 0x3200
+#define BM_REG_CFG 0x3300
+#define BM_REG_SCN(n) (0x3400 + ((n) << 6))
+#define BM_REG_ISR 0x3e00
+#define BM_REG_IIR 0x3ec0
+
+/* Cache-enabled register offsets */
+#define BM_CL_CR 0x0000
+#define BM_CL_RR0 0x0100
+#define BM_CL_RR1 0x0140
+#define BM_CL_RCR 0x1000
+#define BM_CL_RCR_PI_CENA 0x3000
+#define BM_CL_RCR_CI_CENA 0x3100
+
+#endif
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent). */
+
+/* Cache-inhibited register access. */
+#define __bm_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ci + (o)))
+#define __bm_out(bm, o, val) __raw_writel(cpu_to_be32(val), \
+ (bm)->addr_ci + (o));
+#define bm_in(reg) __bm_in(&portal->addr, BM_REG_##reg)
+#define bm_out(reg, val) __bm_out(&portal->addr, BM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __bm_cl_touch_ro(bm, o) dcbt_ro((bm)->addr_ce + (o))
+#define __bm_cl_touch_rw(bm, o) dcbt_rw((bm)->addr_ce + (o))
+#define __bm_cl_in(bm, o) be32_to_cpu(__raw_readl((bm)->addr_ce + (o)))
+#define __bm_cl_out(bm, o, val) \
+ do { \
+ u32 *__tmpclout = (bm)->addr_ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __bm_cl_invalidate(bm, o) dcbi((bm)->addr_ce + (o))
+#define bm_cl_touch_ro(reg) __bm_cl_touch_ro(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_touch_rw(reg) __bm_cl_touch_rw(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_in(reg) __bm_cl_in(&portal->addr, BM_CL_##reg##_CENA)
+#define bm_cl_out(reg, val) __bm_cl_out(&portal->addr, BM_CL_##reg##_CENA, val)
+#define bm_cl_invalidate(reg)\
+ __bm_cl_invalidate(&portal->addr, BM_CL_##reg##_CENA)
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues. */
+static inline u8 bm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ */
+enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
+ bm_rcr_pci = 0, /* PI index, cache-inhibited */
+ bm_rcr_pce = 1, /* PI index, cache-enabled */
+ bm_rcr_pvb = 2 /* valid-bit */
+};
+enum bm_rcr_cmode { /* s/w-only */
+ bm_rcr_cci, /* CI index, cache-inhibited */
+ bm_rcr_cce /* CI index, cache-enabled */
+};
+
+
+/* ------------------------- */
+/* --- Portal structures --- */
+
+#define BM_RCR_SIZE 8
+
+struct bm_rcr {
+ struct bm_rcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ u32 busy;
+ enum bm_rcr_pmode pmode;
+ enum bm_rcr_cmode cmode;
+#endif
+};
+
+struct bm_mc {
+ struct bm_mc_command *cr;
+ struct bm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ enum {
+ /* Can only be _mc_start()ed */
+ mc_idle,
+ /* Can only be _mc_commit()ed or _mc_abort()ed */
+ mc_user,
+ /* Can only be _mc_retry()ed */
+ mc_hw
+ } state;
+#endif
+};
+
+struct bm_addr {
+ void __iomem *addr_ce; /* cache-enabled */
+ void __iomem *addr_ci; /* cache-inhibited */
+};
+
+struct bm_portal {
+ struct bm_addr addr;
+ struct bm_rcr rcr;
+ struct bm_mc mc;
+ struct bm_portal_config config;
+} ____cacheline_aligned;
+
+
+/* --------------- */
+/* --- RCR API --- */
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define RCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(BM_RCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 RCR_PTR2IDX(struct bm_rcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (BM_RCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void RCR_INC(struct bm_rcr *rcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates
+ * fast code with essentially no branching overheads. We increment to
+ * the next RCR pointer and handle overflow and 'vbit'. */
+ struct bm_rcr_entry *partial = rcr->cursor + 1;
+ rcr->cursor = RCR_CARRYCLEAR(partial);
+ if (partial != rcr->cursor)
+ rcr->vbit ^= BM_RCR_VERB_VBIT;
+}
+
+static inline int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
+ __maybe_unused enum bm_rcr_cmode cmode)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case. */
+ register struct bm_rcr *rcr = &portal->rcr;
+ u32 cfg;
+ u8 pi;
+
+ rcr->ring = portal->addr.addr_ce + BM_CL_RCR;
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+
+ pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ rcr->cursor = rcr->ring + pi;
+ rcr->vbit = (bm_in(RCR_PI_CINH) & BM_RCR_SIZE) ? BM_RCR_VERB_VBIT : 0;
+ rcr->available = BM_RCR_SIZE - 1
+ - bm_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
+ rcr->ithresh = bm_in(RCR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+ rcr->pmode = pmode;
+ rcr->cmode = cmode;
+#endif
+ cfg = (bm_in(CFG) & 0xffffffe0) | (pmode & 0x3); /* BCSP_CFG::RPM */
+ bm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void bm_rcr_finish(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 pi = bm_in(RCR_PI_CINH) & (BM_RCR_SIZE - 1);
+ u8 ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ DPA_ASSERT(!rcr->busy);
+ if (pi != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("losing uncommited RCR entries\n");
+ if (ci != rcr->ci)
+ pr_crit("missing existing RCR completions\n");
+ if (rcr->ci != RCR_PTR2IDX(rcr->cursor))
+ pr_crit("RCR destroyed unquiesced\n");
+}
+
+static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ DPA_ASSERT(!rcr->busy);
+ if (!rcr->available)
+ return NULL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 1;
+#endif
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
+ dcbz_64(rcr->cursor);
+#endif
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+ DPA_ASSERT(rcr->busy);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline struct bm_rcr_entry *bm_rcr_pend_and_next(
+ struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode != bm_rcr_pvb);
+ if (rcr->available == 1)
+ return NULL;
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcr->cursor);
+ RCR_INC(rcr);
+ rcr->available--;
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
+ dcbz_64(rcr->cursor);
+#endif
+ return rcr->cursor;
+}
+
+static inline void bm_rcr_pci_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode == bm_rcr_pci);
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ hwsync();
+ bm_out(RCR_PI_CINH, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+ bm_cl_invalidate(RCR_PI);
+ bm_cl_touch_rw(RCR_PI);
+}
+
+static inline void bm_rcr_pce_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode == bm_rcr_pce);
+ rcr->cursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ RCR_INC(rcr);
+ rcr->available--;
+ lwsync();
+ bm_cl_out(RCR_PI, RCR_PTR2IDX(rcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ struct bm_rcr_entry *rcursor;
+ DPA_ASSERT(rcr->busy);
+ DPA_ASSERT(rcr->pmode == bm_rcr_pvb);
+ lwsync();
+ rcursor = rcr->cursor;
+ rcursor->__dont_write_directly__verb = myverb | rcr->vbit;
+ dcbf_64(rcursor);
+ RCR_INC(rcr);
+ rcr->available--;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ rcr->busy = 0;
+#endif
+}
+
+static inline u8 bm_rcr_cci_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+ DPA_ASSERT(rcr->cmode == bm_rcr_cci);
+ rcr->ci = bm_in(RCR_CI_CINH) & (BM_RCR_SIZE - 1);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline void bm_rcr_cce_prefetch(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_rcr *rcr = &portal->rcr;
+ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+ bm_cl_touch_ro(RCR_CI);
+}
+
+static inline u8 bm_rcr_cce_update(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ u8 diff, old_ci = rcr->ci;
+ DPA_ASSERT(rcr->cmode == bm_rcr_cce);
+ rcr->ci = bm_cl_in(RCR_CI) & (BM_RCR_SIZE - 1);
+ bm_cl_invalidate(RCR_CI);
+ diff = bm_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
+ rcr->available += diff;
+ return diff;
+}
+
+static inline u8 bm_rcr_get_ithresh(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ return rcr->ithresh;
+}
+
+static inline void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ rcr->ithresh = ithresh;
+ bm_out(RCR_ITR, ithresh);
+}
+
+static inline u8 bm_rcr_get_avail(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ return rcr->available;
+}
+
+static inline u8 bm_rcr_get_fill(struct bm_portal *portal)
+{
+ register struct bm_rcr *rcr = &portal->rcr;
+ return BM_RCR_SIZE - 1 - rcr->available;
+}
+
+
+/* ------------------------------ */
+/* --- Management command API --- */
+
+static inline int bm_mc_init(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+ mc->cr = portal->addr.addr_ce + BM_CL_CR;
+ mc->rr = portal->addr.addr_ce + BM_CL_RR0;
+ mc->rridx = (__raw_readb(&mc->cr->__dont_write_directly__verb) &
+ BM_MCC_VERB_VBIT) ? 0 : 1;
+ mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return 0;
+}
+
+static inline void bm_mc_finish(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+ DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (mc->state != mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+ DPA_ASSERT(mc->state == mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_user;
+#endif
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
+ dcbz_64(mc->cr);
+#endif
+ return mc->cr;
+}
+
+static inline void bm_mc_abort(struct bm_portal *portal)
+{
+ __maybe_unused register struct bm_mc *mc = &portal->mc;
+ DPA_ASSERT(mc->state == mc_user);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_idle;
+#endif
+}
+
+static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+ DPA_ASSERT(mc->state == mc_user);
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_hw;
+#endif
+}
+
+static inline struct bm_mc_result *bm_mc_result(struct bm_portal *portal)
+{
+ register struct bm_mc *mc = &portal->mc;
+ struct bm_mc_result *rr = mc->rr + mc->rridx;
+ DPA_ASSERT(mc->state == mc_hw);
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering... */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= BM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = mc_idle;
+#endif
+ return rr;
+}
+
+
+/* ------------------------------------- */
+/* --- Portal interrupt register API --- */
+
+static inline int bm_isr_init(__always_unused struct bm_portal *portal)
+{
+ return 0;
+}
+
+static inline void bm_isr_finish(__always_unused struct bm_portal *portal)
+{
+}
+
+#define SCN_REG(bpid) BM_REG_SCN((bpid) / 32)
+#define SCN_BIT(bpid) (0x80000000 >> (bpid & 31))
+static inline void bm_isr_bscn_mask(struct bm_portal *portal, u8 bpid,
+ int enable)
+{
+ u32 val;
+ DPA_ASSERT(bpid < bman_pool_max);
+ /* REG_SCN for bpid=0..31, REG_SCN+4 for bpid=32..63 */
+ val = __bm_in(&portal->addr, SCN_REG(bpid));
+ if (enable)
+ val |= SCN_BIT(bpid);
+ else
+ val &= ~SCN_BIT(bpid);
+ __bm_out(&portal->addr, SCN_REG(bpid), val);
+}
+
+static inline u32 __bm_isr_read(struct bm_portal *portal, enum bm_isr_reg n)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 6));
+#else
+ return __bm_in(&portal->addr, BM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __bm_isr_write(struct bm_portal *portal, enum bm_isr_reg n,
+ u32 val)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 6), val);
+#else
+ __bm_out(&portal->addr, BM_REG_ISR + (n << 2), val);
+#endif
+}
+
+/* Buffer Pool Cleanup */
+static inline int bm_shutdown_pool(struct bm_portal *p, u32 bpid)
+{
+ struct bm_mc_command *bm_cmd;
+ struct bm_mc_result *bm_res;
+
+ int aq_count = 0;
+ bool stop = false;
+ while (!stop) {
+ /* Acquire buffers until empty */
+ bm_cmd = bm_mc_start(p);
+ bm_cmd->acquire.bpid = bpid;
+ bm_mc_commit(p, BM_MCC_VERB_CMD_ACQUIRE | 1);
+ while (!(bm_res = bm_mc_result(p)))
+ cpu_relax();
+ if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
+ /* Pool is empty */
+ /* TBD : Should we do a few extra iterations in
+ case some other some blocks keep buffers 'on deck',
+ which may also be problematic */
+ stop = true;
+ } else
+ ++aq_count;
+ }
+ return 0;
+}
diff --git a/drivers/staging/fsl_qbman/bman_private.h b/drivers/staging/fsl_qbman/bman_private.h
new file mode 100644
index 000000000000..64eefe7da65d
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_private.h
@@ -0,0 +1,166 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpa_sys.h"
+#include <linux/fsl_bman.h>
+
+/* Revision info (for errata and feature handling) */
+#define BMAN_REV10 0x0100
+#define BMAN_REV20 0x0200
+#define BMAN_REV21 0x0201
+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
+extern u16 bman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+
+/*
+ * Global variables of the max portal/pool number this bman version supported
+ */
+extern u16 bman_pool_max;
+
+/* used by CCSR and portal interrupt code */
+enum bm_isr_reg {
+ bm_isr_status = 0,
+ bm_isr_enable = 1,
+ bm_isr_disable = 2,
+ bm_isr_inhibit = 3
+};
+
+struct bm_portal_config {
+ /* Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited. */
+ __iomem void *addr_virt[2];
+ struct resource addr_phys[2];
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ struct bman_portal_config public_cfg;
+ /* power management saved data */
+ u32 saved_isdr;
+};
+
+#ifdef CONFIG_FSL_BMAN_CONFIG
+/* Hooks from bman_driver.c to bman_config.c */
+int bman_init_ccsr(struct device_node *node);
+#endif
+
+/* Hooks from bman_driver.c in to bman_high.c */
+struct bman_portal *bman_create_portal(
+ struct bman_portal *portal,
+ const struct bm_portal_config *config);
+struct bman_portal *bman_create_affine_portal(
+ const struct bm_portal_config *config);
+struct bman_portal *bman_create_affine_slave(struct bman_portal *redirect,
+ int cpu);
+void bman_destroy_portal(struct bman_portal *bm);
+
+const struct bm_portal_config *bman_destroy_affine_portal(void);
+
+/* Hooks from fsl_usdpaa.c to bman_driver.c */
+struct bm_portal_config *bm_get_unused_portal(void);
+struct bm_portal_config *bm_get_unused_portal_idx(uint32_t idx);
+void bm_put_unused_portal(struct bm_portal_config *pcfg);
+void bm_set_liodns(struct bm_portal_config *pcfg);
+
+/* Pool logic in the portal driver, during initialisation, needs to know if
+ * there's access to CCSR or not (if not, it'll cripple the pool allocator). */
+#ifdef CONFIG_FSL_BMAN_CONFIG
+int bman_have_ccsr(void);
+#else
+#define bman_have_ccsr() 0
+#endif
+
+/* Stockpile build constants. The _LOW value: when bman_acquire() is called and
+ * the stockpile fill-level is <= _LOW, an acquire is attempted from h/w but it
+ * might fail (if the buffer pool is depleted). So this value provides some
+ * "stagger" in that the bman_acquire() function will only fail if lots of bufs
+ * are requested at once or if h/w has been tested a couple of times without
+ * luck. The _HIGH value: when bman_release() is called and the stockpile
+ * fill-level is >= _HIGH, a release is attempted to h/w but it might fail (if
+ * the release ring is full). So this value provides some "stagger" so that
+ * ring-access is retried a couple of times prior to the API returning a
+ * failure. The following *must* be true;
+ * BMAN_STOCKPILE_HIGH-BMAN_STOCKPILE_LOW > 8
+ * (to avoid thrashing)
+ * BMAN_STOCKPILE_SZ >= 16
+ * (as the release logic expects to either send 8 buffers to hw prior to
+ * adding the given buffers to the stockpile or add the buffers to the
+ * stockpile before sending 8 to hw, as the API must be an all-or-nothing
+ * success/fail.)
+ */
+#define BMAN_STOCKPILE_SZ 16u /* number of bufs in per-pool cache */
+#define BMAN_STOCKPILE_LOW 2u /* when fill is <= this, acquire from hw */
+#define BMAN_STOCKPILE_HIGH 14u /* when fill is >= this, release to hw */
+
+/*************************************************/
+/* BMan s/w corenet portal, low-level i/face */
+/*************************************************/
+
+/* Used by all portal interrupt registers except 'inhibit'
+ * This mask contains all the "irqsource" bits visible to API users
+ */
+#define BM_PIRQ_VISIBLE (BM_PIRQ_RCRI | BM_PIRQ_BSCN)
+
+/* These are bm_<reg>_<verb>(). So for example, bm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write". */
+#define bm_isr_status_read(bm) __bm_isr_read(bm, bm_isr_status)
+#define bm_isr_status_clear(bm, m) __bm_isr_write(bm, bm_isr_status, m)
+#define bm_isr_enable_read(bm) __bm_isr_read(bm, bm_isr_enable)
+#define bm_isr_enable_write(bm, v) __bm_isr_write(bm, bm_isr_enable, v)
+#define bm_isr_disable_read(bm) __bm_isr_read(bm, bm_isr_disable)
+#define bm_isr_disable_write(bm, v) __bm_isr_write(bm, bm_isr_disable, v)
+#define bm_isr_inhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 1)
+#define bm_isr_uninhibit(bm) __bm_isr_write(bm, bm_isr_inhibit, 0)
+
+#ifdef CONFIG_FSL_BMAN_CONFIG
+/* Set depletion thresholds associated with a buffer pool. Requires that the
+ * operating system have access to Bman CCSR (ie. compiled in support and
+ * run-time access courtesy of the device-tree). */
+int bm_pool_set(u32 bpid, const u32 *thresholds);
+#define BM_POOL_THRESH_SW_ENTER 0
+#define BM_POOL_THRESH_SW_EXIT 1
+#define BM_POOL_THRESH_HW_ENTER 2
+#define BM_POOL_THRESH_HW_EXIT 3
+
+/* Read the free buffer count for a given buffer */
+u32 bm_pool_free_buffers(u32 bpid);
+
+__init int bman_init(void);
+__init int bman_resource_init(void);
+
+const struct bm_portal_config *bman_get_bm_portal_config(
+ struct bman_portal *portal);
+
+/* power management */
+#ifdef CONFIG_SUSPEND
+void suspend_unused_bportal(void);
+void resume_unused_bportal(void);
+#endif
+
+#endif /* CONFIG_FSL_BMAN_CONFIG */
diff --git a/drivers/staging/fsl_qbman/bman_test.c b/drivers/staging/fsl_qbman/bman_test.c
new file mode 100644
index 000000000000..db5b7fd303e2
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_test.c
@@ -0,0 +1,56 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Bman testing");
+
+static int test_init(void)
+{
+#ifdef CONFIG_FSL_BMAN_TEST_HIGH
+ int loop = 1;
+ while (loop--)
+ bman_test_high();
+#endif
+#ifdef CONFIG_FSL_BMAN_TEST_THRESH
+ bman_test_thresh();
+#endif
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/staging/fsl_qbman/bman_test.h b/drivers/staging/fsl_qbman/bman_test.h
new file mode 100644
index 000000000000..fcd650566e3e
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_test.h
@@ -0,0 +1,44 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+
+#include <linux/fsl_bman.h>
+
+void bman_test_high(void);
+void bman_test_thresh(void);
diff --git a/drivers/staging/fsl_qbman/bman_test_high.c b/drivers/staging/fsl_qbman/bman_test_high.c
new file mode 100644
index 000000000000..1617a531679b
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_test_high.c
@@ -0,0 +1,183 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+#include "bman_private.h"
+
+/*************/
+/* constants */
+/*************/
+
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define POOL_OPAQUE ((void *)0xdeadabba)
+#define NUM_BUFS 93
+#define LOOPS 3
+#define BMAN_TOKEN_MASK 0x00FFFFFFFFFFLLU
+
+/***************/
+/* global vars */
+/***************/
+
+static struct bman_pool *pool;
+static int depleted;
+static struct bm_buffer bufs_in[NUM_BUFS] ____cacheline_aligned;
+static struct bm_buffer bufs_out[NUM_BUFS] ____cacheline_aligned;
+static int bufs_received;
+
+/* Predeclare the callback so we can instantiate pool parameters */
+static void depletion_cb(struct bman_portal *, struct bman_pool *, void *, int);
+
+/**********************/
+/* internal functions */
+/**********************/
+
+static void bufs_init(void)
+{
+ int i;
+ for (i = 0; i < NUM_BUFS; i++)
+ bm_buffer_set64(&bufs_in[i], 0xfedc01234567LLU * i);
+ bufs_received = 0;
+}
+
+static inline int bufs_cmp(const struct bm_buffer *a, const struct bm_buffer *b)
+{
+ if ((bman_ip_rev == BMAN_REV20) || (bman_ip_rev == BMAN_REV21)) {
+
+ /* On SoCs with Bman revison 2.0, Bman only respects the 40
+ * LS-bits of buffer addresses, masking off the upper 8-bits on
+ * release commands. The API provides for 48-bit addresses
+ * because some SoCs support all 48-bits. When generating
+ * garbage addresses for testing, we either need to zero the
+ * upper 8-bits when releasing to Bman (otherwise we'll be
+ * disappointed when the buffers we acquire back from Bman
+ * don't match), or we need to mask the upper 8-bits off when
+ * comparing. We do the latter.
+ */
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
+ < (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return -1;
+ if ((bm_buffer_get64(a) & BMAN_TOKEN_MASK)
+ > (bm_buffer_get64(b) & BMAN_TOKEN_MASK))
+ return 1;
+ } else {
+ if (bm_buffer_get64(a) < bm_buffer_get64(b))
+ return -1;
+ if (bm_buffer_get64(a) > bm_buffer_get64(b))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void bufs_confirm(void)
+{
+ int i, j;
+ for (i = 0; i < NUM_BUFS; i++) {
+ int matches = 0;
+ for (j = 0; j < NUM_BUFS; j++)
+ if (!bufs_cmp(&bufs_in[i], &bufs_out[j]))
+ matches++;
+ BUG_ON(matches != 1);
+ }
+}
+
+/********/
+/* test */
+/********/
+
+static void depletion_cb(struct bman_portal *__portal, struct bman_pool *__pool,
+ void *pool_ctx, int __depleted)
+{
+ BUG_ON(__pool != pool);
+ BUG_ON(pool_ctx != POOL_OPAQUE);
+ depleted = __depleted;
+}
+
+void bman_test_high(void)
+{
+ struct bman_pool_params pparams = {
+ .flags = BMAN_POOL_FLAG_DEPLETION | BMAN_POOL_FLAG_DYNAMIC_BPID,
+ .cb = depletion_cb,
+ .cb_ctx = POOL_OPAQUE,
+ };
+ int i, loops = LOOPS;
+ struct bm_buffer tmp_buf;
+
+ bufs_init();
+
+ pr_info("BMAN: --- starting high-level test ---\n");
+
+ pool = bman_new_pool(&pparams);
+ BUG_ON(!pool);
+
+ /*******************/
+ /* Release buffers */
+ /*******************/
+do_loop:
+ i = 0;
+ while (i < NUM_BUFS) {
+ u32 flags = BMAN_RELEASE_FLAG_WAIT;
+ int num = 8;
+ if ((i + num) > NUM_BUFS)
+ num = NUM_BUFS - i;
+ if ((i + num) == NUM_BUFS)
+ flags |= BMAN_RELEASE_FLAG_WAIT_SYNC;
+ if (bman_release(pool, bufs_in + i, num, flags))
+ panic("bman_release() failed\n");
+ i += num;
+ }
+
+ /*******************/
+ /* Acquire buffers */
+ /*******************/
+ while (i > 0) {
+ int tmp, num = 8;
+ if (num > i)
+ num = i;
+ tmp = bman_acquire(pool, bufs_out + i - num, num, 0);
+ BUG_ON(tmp != num);
+ i -= num;
+ }
+
+ i = bman_acquire(pool, &tmp_buf, 1, 0);
+ BUG_ON(i > 0);
+
+ bufs_confirm();
+
+ if (--loops)
+ goto do_loop;
+
+ /************/
+ /* Clean up */
+ /************/
+ bman_free_pool(pool);
+ pr_info("BMAN: --- finished high-level test ---\n");
+}
diff --git a/drivers/staging/fsl_qbman/bman_test_thresh.c b/drivers/staging/fsl_qbman/bman_test_thresh.c
new file mode 100644
index 000000000000..67093693609e
--- /dev/null
+++ b/drivers/staging/fsl_qbman/bman_test_thresh.c
@@ -0,0 +1,196 @@
+/* Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "bman_test.h"
+
+/* Test constants */
+#define TEST_NUMBUFS 129728
+#define TEST_EXIT 129536
+#define TEST_ENTRY 129024
+
+struct affine_test_data {
+ struct task_struct *t;
+ int cpu;
+ int expect_affinity;
+ int drain;
+ int num_enter;
+ int num_exit;
+ struct list_head node;
+ struct completion wakethread;
+ struct completion wakeparent;
+};
+
+static void cb_depletion(struct bman_portal *portal,
+ struct bman_pool *pool,
+ void *opaque,
+ int depleted)
+{
+ struct affine_test_data *data = opaque;
+ int c = smp_processor_id();
+ pr_info("cb_depletion: bpid=%d, depleted=%d, cpu=%d, original=%d\n",
+ bman_get_params(pool)->bpid, !!depleted, c, data->cpu);
+ /* We should be executing on the CPU of the thread that owns the pool if
+ * and that CPU has an affine portal (ie. it isn't slaved). */
+ BUG_ON((c != data->cpu) && data->expect_affinity);
+ BUG_ON((c == data->cpu) && !data->expect_affinity);
+ if (depleted)
+ data->num_enter++;
+ else
+ data->num_exit++;
+}
+
+/* Params used to set up a pool, this also dynamically allocates a BPID */
+static const struct bman_pool_params params_nocb = {
+ .flags = BMAN_POOL_FLAG_DYNAMIC_BPID | BMAN_POOL_FLAG_THRESH,
+ .thresholds = { TEST_ENTRY, TEST_EXIT, 0, 0 }
+};
+
+/* Params used to set up each cpu's pool with callbacks enabled */
+static struct bman_pool_params params_cb = {
+ .bpid = 0, /* will be replaced to match pool_nocb */
+ .flags = BMAN_POOL_FLAG_DEPLETION,
+ .cb = cb_depletion
+};
+
+static struct bman_pool *pool_nocb;
+static LIST_HEAD(threads);
+
+static int affine_test(void *__data)
+{
+ struct bman_pool *pool;
+ struct affine_test_data *data = __data;
+ struct bman_pool_params my_params = params_cb;
+
+ pr_info("thread %d: starting\n", data->cpu);
+ /* create the pool */
+ my_params.cb_ctx = data;
+ pool = bman_new_pool(&my_params);
+ BUG_ON(!pool);
+ complete(&data->wakeparent);
+ wait_for_completion(&data->wakethread);
+ init_completion(&data->wakethread);
+
+ /* if we're the drainer, we get signalled for that */
+ if (data->drain) {
+ struct bm_buffer buf;
+ int ret;
+ pr_info("thread %d: draining...\n", data->cpu);
+ do {
+ ret = bman_acquire(pool, &buf, 1, 0);
+ } while (ret > 0);
+ pr_info("thread %d: draining done.\n", data->cpu);
+ complete(&data->wakeparent);
+ wait_for_completion(&data->wakethread);
+ init_completion(&data->wakethread);
+ }
+
+ /* cleanup */
+ bman_free_pool(pool);
+ while (!kthread_should_stop())
+ cpu_relax();
+ pr_info("thread %d: exiting\n", data->cpu);
+ return 0;
+}
+
+static struct affine_test_data *start_affine_test(int cpu, int drain)
+{
+ struct affine_test_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
+
+ if (!data)
+ return NULL;
+ data->cpu = cpu;
+ data->expect_affinity = cpumask_test_cpu(cpu, bman_affine_cpus());
+ data->drain = drain;
+ data->num_enter = 0;
+ data->num_exit = 0;
+ init_completion(&data->wakethread);
+ init_completion(&data->wakeparent);
+ list_add_tail(&data->node, &threads);
+ data->t = kthread_create(affine_test, data, "threshtest%d", cpu);
+ BUG_ON(IS_ERR(data->t));
+ kthread_bind(data->t, cpu);
+ wake_up_process(data->t);
+ return data;
+}
+
+void bman_test_thresh(void)
+{
+ int loop = TEST_NUMBUFS;
+ int ret, num_cpus = 0;
+ struct affine_test_data *data, *drainer = NULL;
+
+ pr_info("bman_test_thresh: start\n");
+
+ /* allocate a BPID and seed it */
+ pool_nocb = bman_new_pool(&params_nocb);
+ BUG_ON(!pool_nocb);
+ while (loop--) {
+ struct bm_buffer buf;
+ bm_buffer_set64(&buf, 0x0badbeef + loop);
+ ret = bman_release(pool_nocb, &buf, 1,
+ BMAN_RELEASE_FLAG_WAIT);
+ BUG_ON(ret);
+ }
+ while (!bman_rcr_is_empty())
+ cpu_relax();
+ pr_info("bman_test_thresh: buffers are in\n");
+
+ /* create threads and wait for them to create pools */
+ params_cb.bpid = bman_get_params(pool_nocb)->bpid;
+ for_each_cpu(loop, cpu_online_mask) {
+ data = start_affine_test(loop, drainer ? 0 : 1);
+ BUG_ON(!data);
+ if (!drainer)
+ drainer = data;
+ num_cpus++;
+ wait_for_completion(&data->wakeparent);
+ }
+
+ /* signal the drainer to start draining */
+ complete(&drainer->wakethread);
+ wait_for_completion(&drainer->wakeparent);
+ init_completion(&drainer->wakeparent);
+
+ /* tear down */
+ list_for_each_entry_safe(data, drainer, &threads, node) {
+ complete(&data->wakethread);
+ ret = kthread_stop(data->t);
+ BUG_ON(ret);
+ list_del(&data->node);
+ /* check that we get the expected callbacks (and no others) */
+ BUG_ON(data->num_enter != 1);
+ BUG_ON(data->num_exit != 0);
+ kfree(data);
+ }
+ bman_free_pool(pool_nocb);
+
+ pr_info("bman_test_thresh: done\n");
+}
diff --git a/drivers/staging/fsl_qbman/dpa_alloc.c b/drivers/staging/fsl_qbman/dpa_alloc.c
new file mode 100644
index 000000000000..44db3e1ed5c2
--- /dev/null
+++ b/drivers/staging/fsl_qbman/dpa_alloc.c
@@ -0,0 +1,706 @@
+/* Copyright 2009-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpa_sys.h"
+#include <linux/fsl_qman.h>
+#include <linux/fsl_bman.h>
+
+/* Qman and Bman APIs are front-ends to the common code; */
+
+static DECLARE_DPA_ALLOC(bpalloc); /* BPID allocator */
+static DECLARE_DPA_ALLOC(fqalloc); /* FQID allocator */
+static DECLARE_DPA_ALLOC(qpalloc); /* pool-channel allocator */
+static DECLARE_DPA_ALLOC(cgralloc); /* CGR ID allocator */
+static DECLARE_DPA_ALLOC(ceetm0_challoc); /* CEETM Channel ID allocator */
+static DECLARE_DPA_ALLOC(ceetm0_lfqidalloc); /* CEETM LFQID allocator */
+static DECLARE_DPA_ALLOC(ceetm1_challoc); /* CEETM Channel ID allocator */
+static DECLARE_DPA_ALLOC(ceetm1_lfqidalloc); /* CEETM LFQID allocator */
+
+/* This is a sort-of-conditional dpa_alloc_free() routine. Eg. when releasing
+ * FQIDs (probably from user-space), it can filter out those that aren't in the
+ * OOS state (better to leak a h/w resource than to crash). This function
+ * returns the number of invalid IDs that were not released. */
+static u32 release_id_range(struct dpa_alloc *alloc, u32 id, u32 count,
+ int (*is_valid)(u32 id))
+{
+ int valid_mode = 0;
+ u32 loop = id, total_invalid = 0;
+ while (loop < (id + count)) {
+ int isvalid = is_valid ? is_valid(loop) : 1;
+ if (!valid_mode) {
+ /* We're looking for a valid ID to terminate an invalid
+ * range */
+ if (isvalid) {
+ /* We finished a range of invalid IDs, a valid
+ * range is now underway */
+ valid_mode = 1;
+ count -= (loop - id);
+ id = loop;
+ } else
+ total_invalid++;
+ } else {
+ /* We're looking for an invalid ID to terminate a
+ * valid range */
+ if (!isvalid) {
+ /* Release the range of valid IDs, an unvalid
+ * range is now underway */
+ if (loop > id)
+ dpa_alloc_free(alloc, id, loop - id);
+ valid_mode = 0;
+ }
+ }
+ loop++;
+ }
+ /* Release any unterminated range of valid IDs */
+ if (valid_mode && count)
+ dpa_alloc_free(alloc, id, count);
+ return total_invalid;
+}
+
+/* BPID allocator front-end */
+
+int bman_alloc_bpid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return dpa_alloc_new(&bpalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(bman_alloc_bpid_range);
+
+static int bp_cleanup(u32 bpid)
+{
+ return bman_shutdown_pool(bpid) == 0;
+}
+void bman_release_bpid_range(u32 bpid, u32 count)
+{
+ u32 total_invalid = release_id_range(&bpalloc, bpid, count, bp_cleanup);
+ if (total_invalid)
+ pr_err("BPID range [%d..%d] (%d) had %d leaks\n",
+ bpid, bpid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(bman_release_bpid_range);
+
+void bman_seed_bpid_range(u32 bpid, u32 count)
+{
+ dpa_alloc_seed(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_seed_bpid_range);
+
+int bman_reserve_bpid_range(u32 bpid, u32 count)
+{
+ return dpa_alloc_reserve(&bpalloc, bpid, count);
+}
+EXPORT_SYMBOL(bman_reserve_bpid_range);
+
+
+/* FQID allocator front-end */
+
+int qman_alloc_fqid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return dpa_alloc_new(&fqalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_fqid_range);
+
+static int fq_cleanup(u32 fqid)
+{
+ return qman_shutdown_fq(fqid) == 0;
+}
+void qman_release_fqid_range(u32 fqid, u32 count)
+{
+ u32 total_invalid = release_id_range(&fqalloc, fqid, count, fq_cleanup);
+ if (total_invalid)
+ pr_err("FQID range [%d..%d] (%d) had %d leaks\n",
+ fqid, fqid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_fqid_range);
+
+int qman_reserve_fqid_range(u32 fqid, u32 count)
+{
+ return dpa_alloc_reserve(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_reserve_fqid_range);
+
+void qman_seed_fqid_range(u32 fqid, u32 count)
+{
+ dpa_alloc_seed(&fqalloc, fqid, count);
+}
+EXPORT_SYMBOL(qman_seed_fqid_range);
+
+/* Pool-channel allocator front-end */
+
+int qman_alloc_pool_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return dpa_alloc_new(&qpalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_pool_range);
+
+static int qpool_cleanup(u32 qp)
+{
+ /* We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose destination channel is the pool-channel being released.
+ * When a non-OOS FQD is found we attempt to clean it up */
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ int err;
+ do {
+ struct qm_mcr_queryfq_np np;
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 1;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+ err = qman_query_fq(&fq, &fqd);
+ BUG_ON(err);
+ if (fqd.dest.channel == qp) {
+ /* The channel is the FQ's target, clean it */
+ if (qman_shutdown_fq(fq.fqid) != 0)
+ /* Couldn't shut down the FQ
+ so the pool must be leaked */
+ return 0;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+void qman_release_pool_range(u32 qp, u32 count)
+{
+ u32 total_invalid = release_id_range(&qpalloc, qp,
+ count, qpool_cleanup);
+ if (total_invalid) {
+ /* Pool channels are almost always used individually */
+ if (count == 1)
+ pr_err("Pool channel 0x%x had %d leaks\n",
+ qp, total_invalid);
+ else
+ pr_err("Pool channels [%d..%d] (%d) had %d leaks\n",
+ qp, qp + count - 1, count, total_invalid);
+ }
+}
+EXPORT_SYMBOL(qman_release_pool_range);
+
+
+void qman_seed_pool_range(u32 poolid, u32 count)
+{
+ dpa_alloc_seed(&qpalloc, poolid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_pool_range);
+
+int qman_reserve_pool_range(u32 poolid, u32 count)
+{
+ return dpa_alloc_reserve(&qpalloc, poolid, count);
+}
+EXPORT_SYMBOL(qman_reserve_pool_range);
+
+
+/* CGR ID allocator front-end */
+
+int qman_alloc_cgrid_range(u32 *result, u32 count, u32 align, int partial)
+{
+ return dpa_alloc_new(&cgralloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_cgrid_range);
+
+static int cqr_cleanup(u32 cgrid)
+{
+ /* We query all FQDs starting from
+ * FQID 1 until we get an "invalid FQID" error, looking for non-OOS FQDs
+ * whose CGR is the CGR being released.
+ */
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ int err;
+ do {
+ struct qm_mcr_queryfq_np np;
+ err = qman_query_fq_np(&fq, &np);
+ if (err)
+ /* FQID range exceeded, found no problems */
+ return 1;
+ if ((np.state & QM_MCR_NP_STATE_MASK) != QM_MCR_NP_STATE_OOS) {
+ struct qm_fqd fqd;
+ err = qman_query_fq(&fq, &fqd);
+ BUG_ON(err);
+ if ((fqd.fq_ctrl & QM_FQCTRL_CGE) &&
+ (fqd.cgid == cgrid)) {
+ pr_err("CRGID 0x%x is being used by FQID 0x%x,"
+ " CGR will be leaked\n",
+ cgrid, fq.fqid);
+ return 1;
+ }
+ }
+ /* Move to the next FQID */
+ fq.fqid++;
+ } while (1);
+}
+
+void qman_release_cgrid_range(u32 cgrid, u32 count)
+{
+ u32 total_invalid = release_id_range(&cgralloc, cgrid,
+ count, cqr_cleanup);
+ if (total_invalid)
+ pr_err("CGRID range [%d..%d] (%d) had %d leaks\n",
+ cgrid, cgrid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_cgrid_range);
+
+void qman_seed_cgrid_range(u32 cgrid, u32 count)
+{
+ dpa_alloc_seed(&cgralloc, cgrid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_cgrid_range);
+
+/* CEETM CHANNEL ID allocator front-end */
+int qman_alloc_ceetm0_channel_range(u32 *result, u32 count, u32 align,
+ int partial)
+{
+ return dpa_alloc_new(&ceetm0_challoc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_ceetm0_channel_range);
+
+int qman_alloc_ceetm1_channel_range(u32 *result, u32 count, u32 align,
+ int partial)
+{
+ return dpa_alloc_new(&ceetm1_challoc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_ceetm1_channel_range);
+
+void qman_release_ceetm0_channel_range(u32 channelid, u32 count)
+{
+ u32 total_invalid;
+
+ total_invalid = release_id_range(&ceetm0_challoc, channelid, count,
+ NULL);
+ if (total_invalid)
+ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
+ channelid, channelid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_ceetm0_channel_range);
+
+void qman_seed_ceetm0_channel_range(u32 channelid, u32 count)
+{
+ dpa_alloc_seed(&ceetm0_challoc, channelid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_ceetm0_channel_range);
+
+void qman_release_ceetm1_channel_range(u32 channelid, u32 count)
+{
+ u32 total_invalid;
+ total_invalid = release_id_range(&ceetm1_challoc, channelid, count,
+ NULL);
+ if (total_invalid)
+ pr_err("CEETM channel range [%d..%d] (%d) had %d leaks\n",
+ channelid, channelid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_ceetm1_channel_range);
+
+void qman_seed_ceetm1_channel_range(u32 channelid, u32 count)
+{
+ dpa_alloc_seed(&ceetm1_challoc, channelid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_ceetm1_channel_range);
+
+/* CEETM LFQID allocator front-end */
+int qman_alloc_ceetm0_lfqid_range(u32 *result, u32 count, u32 align,
+ int partial)
+{
+ return dpa_alloc_new(&ceetm0_lfqidalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_ceetm0_lfqid_range);
+
+int qman_alloc_ceetm1_lfqid_range(u32 *result, u32 count, u32 align,
+ int partial)
+{
+ return dpa_alloc_new(&ceetm1_lfqidalloc, result, count, align, partial);
+}
+EXPORT_SYMBOL(qman_alloc_ceetm1_lfqid_range);
+
+void qman_release_ceetm0_lfqid_range(u32 lfqid, u32 count)
+{
+ u32 total_invalid;
+
+ total_invalid = release_id_range(&ceetm0_lfqidalloc, lfqid, count,
+ NULL);
+ if (total_invalid)
+ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
+ lfqid, lfqid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_ceetm0_lfqid_range);
+
+void qman_seed_ceetm0_lfqid_range(u32 lfqid, u32 count)
+{
+ dpa_alloc_seed(&ceetm0_lfqidalloc, lfqid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_ceetm0_lfqid_range);
+
+void qman_release_ceetm1_lfqid_range(u32 lfqid, u32 count)
+{
+ u32 total_invalid;
+
+ total_invalid = release_id_range(&ceetm1_lfqidalloc, lfqid, count,
+ NULL);
+ if (total_invalid)
+ pr_err("CEETM LFQID range [0x%x..0x%x] (%d) had %d leaks\n",
+ lfqid, lfqid + count - 1, count, total_invalid);
+}
+EXPORT_SYMBOL(qman_release_ceetm1_lfqid_range);
+
+void qman_seed_ceetm1_lfqid_range(u32 lfqid, u32 count)
+{
+ dpa_alloc_seed(&ceetm1_lfqidalloc, lfqid, count);
+
+}
+EXPORT_SYMBOL(qman_seed_ceetm1_lfqid_range);
+
+
+/* Everything else is the common backend to all the allocators */
+
+/* The allocator is a (possibly-empty) list of these; */
+struct alloc_node {
+ struct list_head list;
+ u32 base;
+ u32 num;
+ /* refcount and is_alloced are only set
+ when the node is in the used list */
+ unsigned int refcount;
+ int is_alloced;
+};
+
+/* #define DPA_ALLOC_DEBUG */
+
+#ifdef DPA_ALLOC_DEBUG
+#define DPRINT pr_info
+static void DUMP(struct dpa_alloc *alloc)
+{
+ int off = 0;
+ char buf[256];
+ struct alloc_node *p;
+ pr_info("Free Nodes\n");
+ list_for_each_entry(p, &alloc->free, list) {
+ if (off < 255)
+ off += snprintf(buf + off, 255-off, "{%d,%d}",
+ p->base, p->base + p->num - 1);
+ }
+ pr_info("%s\n", buf);
+
+ off = 0;
+ pr_info("Used Nodes\n");
+ list_for_each_entry(p, &alloc->used, list) {
+ if (off < 255)
+ off += snprintf(buf + off, 255-off, "{%d,%d}",
+ p->base, p->base + p->num - 1);
+ }
+ pr_info("%s\n", buf);
+
+
+
+}
+#else
+#define DPRINT(x...)
+#define DUMP(a)
+#endif
+
+int dpa_alloc_new(struct dpa_alloc *alloc, u32 *result, u32 count, u32 align,
+ int partial)
+{
+ struct alloc_node *i = NULL, *next_best = NULL, *used_node = NULL;
+ u32 base, next_best_base = 0, num = 0, next_best_num = 0;
+ struct alloc_node *margin_left, *margin_right;
+
+ *result = (u32)-1;
+ DPRINT("alloc_range(%d,%d,%d)\n", count, align, partial);
+ DUMP(alloc);
+ /* If 'align' is 0, it should behave as though it was 1 */
+ if (!align)
+ align = 1;
+ margin_left = kmalloc(sizeof(*margin_left), GFP_KERNEL);
+ if (!margin_left)
+ goto err;
+ margin_right = kmalloc(sizeof(*margin_right), GFP_KERNEL);
+ if (!margin_right) {
+ kfree(margin_left);
+ goto err;
+ }
+ spin_lock_irq(&alloc->lock);
+ list_for_each_entry(i, &alloc->free, list) {
+ base = (i->base + align - 1) / align;
+ base *= align;
+ if ((base - i->base) >= i->num)
+ /* alignment is impossible, regardless of count */
+ continue;
+ num = i->num - (base - i->base);
+ if (num >= count) {
+ /* this one will do nicely */
+ num = count;
+ goto done;
+ }
+ if (num > next_best_num) {
+ next_best = i;
+ next_best_base = base;
+ next_best_num = num;
+ }
+ }
+ if (partial && next_best) {
+ i = next_best;
+ base = next_best_base;
+ num = next_best_num;
+ } else
+ i = NULL;
+done:
+ if (i) {
+ if (base != i->base) {
+ margin_left->base = i->base;
+ margin_left->num = base - i->base;
+ list_add_tail(&margin_left->list, &i->list);
+ } else
+ kfree(margin_left);
+ if ((base + num) < (i->base + i->num)) {
+ margin_right->base = base + num;
+ margin_right->num = (i->base + i->num) -
+ (base + num);
+ list_add(&margin_right->list, &i->list);
+ } else
+ kfree(margin_right);
+ list_del(&i->list);
+ kfree(i);
+ *result = base;
+ } else {
+ spin_unlock_irq(&alloc->lock);
+ kfree(margin_left);
+ kfree(margin_right);
+ }
+
+err:
+ DPRINT("returning %d\n", i ? num : -ENOMEM);
+ DUMP(alloc);
+ if (!i)
+ return -ENOMEM;
+
+ /* Add the allocation to the used list with a refcount of 1 */
+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+ if (!used_node) {
+ spin_unlock_irq(&alloc->lock);
+ return -ENOMEM;
+ }
+ used_node->base = *result;
+ used_node->num = num;
+ used_node->refcount = 1;
+ used_node->is_alloced = 1;
+ list_add_tail(&used_node->list, &alloc->used);
+ spin_unlock_irq(&alloc->lock);
+ return (int)num;
+}
+
+/* Allocate the list node using GFP_ATOMIC, because we *really* want to avoid
+ * forcing error-handling on to users in the deallocation path. */
+static void _dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
+{
+ struct alloc_node *i, *node = kmalloc(sizeof(*node), GFP_ATOMIC);
+ BUG_ON(!node);
+ DPRINT("release_range(%d,%d)\n", base_id, count);
+ DUMP(alloc);
+ BUG_ON(!count);
+ spin_lock_irq(&alloc->lock);
+
+
+ node->base = base_id;
+ node->num = count;
+ list_for_each_entry(i, &alloc->free, list) {
+ if (i->base >= node->base) {
+ /* BUG_ON(any overlapping) */
+ BUG_ON(i->base < (node->base + node->num));
+ list_add_tail(&node->list, &i->list);
+ goto done;
+ }
+ }
+ list_add_tail(&node->list, &alloc->free);
+done:
+ /* Merge to the left */
+ i = list_entry(node->list.prev, struct alloc_node, list);
+ if (node->list.prev != &alloc->free) {
+ BUG_ON((i->base + i->num) > node->base);
+ if ((i->base + i->num) == node->base) {
+ node->base = i->base;
+ node->num += i->num;
+ list_del(&i->list);
+ kfree(i);
+ }
+ }
+ /* Merge to the right */
+ i = list_entry(node->list.next, struct alloc_node, list);
+ if (node->list.next != &alloc->free) {
+ BUG_ON((node->base + node->num) > i->base);
+ if ((node->base + node->num) == i->base) {
+ node->num += i->num;
+ list_del(&i->list);
+ kfree(i);
+ }
+ }
+ spin_unlock_irq(&alloc->lock);
+ DUMP(alloc);
+}
+
+
+void dpa_alloc_free(struct dpa_alloc *alloc, u32 base_id, u32 count)
+{
+ struct alloc_node *i = NULL;
+ spin_lock_irq(&alloc->lock);
+
+ /* First find the node in the used list and decrement its ref count */
+ list_for_each_entry(i, &alloc->used, list) {
+ if (i->base == base_id && i->num == count) {
+ --i->refcount;
+ if (i->refcount == 0) {
+ list_del(&i->list);
+ spin_unlock_irq(&alloc->lock);
+ if (i->is_alloced)
+ _dpa_alloc_free(alloc, base_id, count);
+ kfree(i);
+ return;
+ }
+ spin_unlock_irq(&alloc->lock);
+ return;
+ }
+ }
+ /* Couldn't find the allocation */
+ pr_err("Attempt to free ID 0x%x COUNT %d that wasn't alloc'd or reserved\n",
+ base_id, count);
+ spin_unlock_irq(&alloc->lock);
+}
+
+void dpa_alloc_seed(struct dpa_alloc *alloc, u32 base_id, u32 count)
+{
+ /* Same as free but no previous allocation checking is needed */
+ _dpa_alloc_free(alloc, base_id, count);
+}
+
+
+int dpa_alloc_reserve(struct dpa_alloc *alloc, u32 base, u32 num)
+{
+ struct alloc_node *i = NULL, *used_node;
+
+ DPRINT("alloc_reserve(%d,%d)\n", base, num);
+ DUMP(alloc);
+
+ spin_lock_irq(&alloc->lock);
+
+ /* Check for the node in the used list.
+ If found, increase it's refcount */
+ list_for_each_entry(i, &alloc->used, list) {
+ if ((i->base == base) && (i->num == num)) {
+ ++i->refcount;
+ spin_unlock_irq(&alloc->lock);
+ return 0;
+ }
+ if ((base >= i->base) && (base < (i->base + i->num))) {
+ /* This is an attempt to reserve a region that was
+ already reserved or alloced with a different
+ base or num */
+ pr_err("Cannot reserve %d - %d, it overlaps with"
+ " existing reservation from %d - %d\n",
+ base, base + num - 1, i->base,
+ i->base + i->num - 1);
+ spin_unlock_irq(&alloc->lock);
+ return -1;
+ }
+ }
+ /* Check to make sure this ID isn't in the free list */
+ list_for_each_entry(i, &alloc->free, list) {
+ if ((base >= i->base) && (base < (i->base + i->num))) {
+ /* yep, the reservation is within this node */
+ pr_err("Cannot reserve %d - %d, it overlaps with"
+ " free range %d - %d and must be alloced\n",
+ base, base + num - 1,
+ i->base, i->base + i->num - 1);
+ spin_unlock_irq(&alloc->lock);
+ return -1;
+ }
+ }
+ /* Add the allocation to the used list with a refcount of 1 */
+ used_node = kmalloc(sizeof(*used_node), GFP_KERNEL);
+ if (!used_node) {
+ spin_unlock_irq(&alloc->lock);
+ return -ENOMEM;
+
+ }
+ used_node->base = base;
+ used_node->num = num;
+ used_node->refcount = 1;
+ used_node->is_alloced = 0;
+ list_add_tail(&used_node->list, &alloc->used);
+ spin_unlock_irq(&alloc->lock);
+ return 0;
+}
+
+
+int dpa_alloc_pop(struct dpa_alloc *alloc, u32 *result, u32 *count)
+{
+ struct alloc_node *i = NULL;
+ DPRINT("alloc_pop()\n");
+ DUMP(alloc);
+ spin_lock_irq(&alloc->lock);
+ if (!list_empty(&alloc->free)) {
+ i = list_entry(alloc->free.next, struct alloc_node, list);
+ list_del(&i->list);
+ }
+ spin_unlock_irq(&alloc->lock);
+ DPRINT("returning %d\n", i ? 0 : -ENOMEM);
+ DUMP(alloc);
+ if (!i)
+ return -ENOMEM;
+ *result = i->base;
+ *count = i->num;
+ kfree(i);
+ return 0;
+}
+
+int dpa_alloc_check(struct dpa_alloc *list_head, u32 item)
+{
+ struct alloc_node *i = NULL;
+ int res = 0;
+ DPRINT("alloc_check()\n");
+ spin_lock_irq(&list_head->lock);
+
+ list_for_each_entry(i, &list_head->free, list) {
+ if ((item >= i->base) && (item < (i->base + i->num))) {
+ res = 1;
+ break;
+ }
+ }
+ spin_unlock_irq(&list_head->lock);
+ return res;
+}
diff --git a/drivers/staging/fsl_qbman/dpa_sys.h b/drivers/staging/fsl_qbman/dpa_sys.h
new file mode 100644
index 000000000000..66699426b2cd
--- /dev/null
+++ b/drivers/staging/fsl_qbman/dpa_sys.h
@@ -0,0 +1,258 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DPA_SYS_H
+#define DPA_SYS_H
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/kthread.h>
+#include <linux/memblock.h>
+#include <linux/completion.h>
+#include <linux/log2.h>
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/device.h>
+#include <linux/uio_driver.h>
+#include <linux/smp.h>
+#include <linux/fsl_hypervisor.h>
+#include <linux/vmalloc.h>
+#include <linux/ctype.h>
+#include <linux/math64.h>
+#include <linux/bitops.h>
+
+#include <linux/fsl_usdpaa.h>
+
+/* When copying aligned words or shorts, try to avoid memcpy() */
+#define CONFIG_TRY_BETTER_MEMCPY
+
+/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
+#define DPA_PORTAL_CE 0
+#define DPA_PORTAL_CI 1
+
+/***********************/
+/* Misc inline assists */
+/***********************/
+
+#if defined CONFIG_PPC32
+#include "dpa_sys_ppc32.h"
+#elif defined CONFIG_PPC64
+#include "dpa_sys_ppc64.h"
+#elif defined CONFIG_ARM
+#include "dpa_sys_arm.h"
+#elif defined CONFIG_ARM64
+#include "dpa_sys_arm64.h"
+#endif
+
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+#define DPA_ASSERT(x) \
+ do { \
+ if (!(x)) { \
+ pr_crit("ASSERT: (%s:%d) %s\n", __FILE__, __LINE__, \
+ __stringify_1(x)); \
+ dump_stack(); \
+ panic("assertion failure"); \
+ } \
+ } while (0)
+#else
+#define DPA_ASSERT(x)
+#endif
+
+/* memcpy() stuff - when you know alignments in advance */
+#ifdef CONFIG_TRY_BETTER_MEMCPY
+static inline void copy_words(void *dest, const void *src, size_t sz)
+{
+ u32 *__dest = dest;
+ const u32 *__src = src;
+ size_t __sz = sz >> 2;
+ BUG_ON((unsigned long)dest & 0x3);
+ BUG_ON((unsigned long)src & 0x3);
+ BUG_ON(sz & 0x3);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+static inline void copy_shorts(void *dest, const void *src, size_t sz)
+{
+ u16 *__dest = dest;
+ const u16 *__src = src;
+ size_t __sz = sz >> 1;
+ BUG_ON((unsigned long)dest & 0x1);
+ BUG_ON((unsigned long)src & 0x1);
+ BUG_ON(sz & 0x1);
+ while (__sz--)
+ *(__dest++) = *(__src++);
+}
+static inline void copy_bytes(void *dest, const void *src, size_t sz)
+{
+ u8 *__dest = dest;
+ const u8 *__src = src;
+ while (sz--)
+ *(__dest++) = *(__src++);
+}
+#else
+#define copy_words memcpy
+#define copy_shorts memcpy
+#define copy_bytes memcpy
+#endif
+
+/************/
+/* RB-trees */
+/************/
+
+/* We encapsulate RB-trees so that its easier to use non-linux forms in
+ * non-linux systems. This also encapsulates the extra plumbing that linux code
+ * usually provides when using RB-trees. This encapsulation assumes that the
+ * data type held by the tree is u32. */
+
+struct dpa_rbtree {
+ struct rb_root root;
+};
+#define DPA_RBTREE { .root = RB_ROOT }
+
+static inline void dpa_rbtree_init(struct dpa_rbtree *tree)
+{
+ tree->root = RB_ROOT;
+}
+
+#define IMPLEMENT_DPA_RBTREE(name, type, node_field, val_field) \
+static inline int name##_push(struct dpa_rbtree *tree, type *obj) \
+{ \
+ struct rb_node *parent = NULL, **p = &tree->root.rb_node; \
+ while (*p) { \
+ u32 item; \
+ parent = *p; \
+ item = rb_entry(parent, type, node_field)->val_field; \
+ if (obj->val_field < item) \
+ p = &parent->rb_left; \
+ else if (obj->val_field > item) \
+ p = &parent->rb_right; \
+ else \
+ return -EBUSY; \
+ } \
+ rb_link_node(&obj->node_field, parent, p); \
+ rb_insert_color(&obj->node_field, &tree->root); \
+ return 0; \
+} \
+static inline void name##_del(struct dpa_rbtree *tree, type *obj) \
+{ \
+ rb_erase(&obj->node_field, &tree->root); \
+} \
+static inline type *name##_find(struct dpa_rbtree *tree, u32 val) \
+{ \
+ type *ret; \
+ struct rb_node *p = tree->root.rb_node; \
+ while (p) { \
+ ret = rb_entry(p, type, node_field); \
+ if (val < ret->val_field) \
+ p = p->rb_left; \
+ else if (val > ret->val_field) \
+ p = p->rb_right; \
+ else \
+ return ret; \
+ } \
+ return NULL; \
+}
+
+/************/
+/* Bootargs */
+/************/
+
+/* Qman has "qportals=" and Bman has "bportals=", they use the same syntax
+ * though; a comma-separated list of items, each item being a cpu index and/or a
+ * range of cpu indices, and each item optionally be prefixed by "s" to indicate
+ * that the portal associated with that cpu should be shared. See bman_driver.c
+ * for more specifics. */
+static int __parse_portals_cpu(const char **s, unsigned int *cpu)
+{
+ *cpu = 0;
+ if (!isdigit(**s))
+ return -EINVAL;
+ while (isdigit(**s))
+ *cpu = *cpu * 10 + (*((*s)++) - '0');
+ return 0;
+}
+static inline int parse_portals_bootarg(char *str, struct cpumask *want_shared,
+ struct cpumask *want_unshared,
+ const char *argname)
+{
+ const char *s = str;
+ unsigned int shared, cpu1, cpu2, loop;
+
+keep_going:
+ if (*s == 's') {
+ shared = 1;
+ s++;
+ } else
+ shared = 0;
+ if (__parse_portals_cpu(&s, &cpu1))
+ goto err;
+ if (*s == '-') {
+ s++;
+ if (__parse_portals_cpu(&s, &cpu2))
+ goto err;
+ if (cpu2 < cpu1)
+ goto err;
+ } else
+ cpu2 = cpu1;
+ for (loop = cpu1; loop <= cpu2; loop++)
+ cpumask_set_cpu(loop, shared ? want_shared : want_unshared);
+ if (*s == ',') {
+ s++;
+ goto keep_going;
+ } else if ((*s == '\0') || isspace(*s))
+ return 0;
+err:
+ pr_crit("Malformed %s argument: %s, offset: %lu\n", argname, str,
+ (unsigned long)s - (unsigned long)str);
+ return -EINVAL;
+}
+#ifdef CONFIG_FSL_USDPAA
+/* Hooks from fsl_usdpaa_irq.c to fsl_usdpaa.c */
+int usdpaa_get_portal_config(struct file *filp, void *cinh,
+ enum usdpaa_portal_type ptype, unsigned int *irq,
+ void **iir_reg);
+#endif
+#endif /* DPA_SYS_H */
diff --git a/drivers/staging/fsl_qbman/dpa_sys_arm.h b/drivers/staging/fsl_qbman/dpa_sys_arm.h
new file mode 100644
index 000000000000..17c5500ef760
--- /dev/null
+++ b/drivers/staging/fsl_qbman/dpa_sys_arm.h
@@ -0,0 +1,95 @@
+/* Copyright 2016 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DPA_SYS_ARM_H
+#define DPA_SYS_ARM_H
+
+#include <asm/cacheflush.h>
+#include <asm/barrier.h>
+
+/* Implementation of ARM specific routines */
+
+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
+ * barriers and that dcb*() won't fall victim to compiler or execution
+ * reordering with respect to other code/instructions that manipulate the same
+ * cacheline. */
+#define hwsync() { asm volatile("dmb st" : : : "memory"); }
+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
+#define dcbf(p) { asm volatile("mcr p15, 0, %0, c7, c10, 1" : : "r" (p) : "memory"); }
+#define dcbt_ro(p) { asm volatile("pld [%0, #64];": : "r" (p)); }
+#define dcbt_rw(p) { asm volatile("pldw [%0, #64];": : "r" (p)); }
+#define dcbi(p) { asm volatile("mcr p15, 0, %0, c7, c6, 1" : : "r" (p) : "memory"); }
+
+#define dcbz_64(p) { memset(p, 0, sizeof(*p)); }
+
+#define dcbf_64(p) \
+ do { \
+ dcbf((u32)p); \
+ } while (0)
+/* Commonly used combo */
+#define dcbit_ro(p) \
+ do { \
+ dcbi((u32)p); \
+ dcbt_ro((u32)p); \
+ } while (0)
+
+static inline u64 mfatb(void)
+{
+ return get_cycles();
+}
+
+static inline u32 in_be32(volatile void *addr)
+{
+ return be32_to_cpu(*((volatile u32 *) addr));
+}
+
+static inline void out_be32(void *addr, u32 val)
+{
+ *((u32 *) addr) = cpu_to_be32(val);
+}
+
+
+static inline void set_bits(unsigned long mask, volatile unsigned long *p)
+{
+ *p |= mask;
+}
+static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
+{
+ *p &= ~mask;
+}
+
+static inline void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+ __cpuc_flush_dcache_area((void *) start, stop - start);
+}
+
+#define hard_smp_processor_id() raw_smp_processor_id()
+#endif
diff --git a/drivers/staging/fsl_qbman/dpa_sys_arm64.h b/drivers/staging/fsl_qbman/dpa_sys_arm64.h
new file mode 100644
index 000000000000..247c8d97b6a6
--- /dev/null
+++ b/drivers/staging/fsl_qbman/dpa_sys_arm64.h
@@ -0,0 +1,102 @@
+/* Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DPA_SYS_ARM64_H
+#define DPA_SYS_ARM64_H
+
+#include <asm/cacheflush.h>
+#include <asm/barrier.h>
+
+/* Implementation of ARM 64 bit specific routines */
+
+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
+ * barriers and that dcb*() won't fall victim to compiler or execution
+ * reordering with respect to other code/instructions that manipulate the same
+ * cacheline. */
+#define hwsync() { asm volatile("dmb st" : : : "memory"); }
+#define lwsync() { asm volatile("dmb st" : : : "memory"); }
+#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
+#define dcbt_ro(p) { asm volatile("prfm pldl1keep, [%0, #0]" : : "r" (p)); }
+#define dcbt_rw(p) { asm volatile("prfm pstl1keep, [%0, #0]" : : "r" (p)); }
+#define dcbi(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); }
+
+#define dcbz_64(p) \
+ do { \
+ dcbz(p); \
+ } while (0)
+
+#define dcbf_64(p) \
+ do { \
+ dcbf(p); \
+ } while (0)
+/* Commonly used combo */
+#define dcbit_ro(p) \
+ do { \
+ dcbi(p); \
+ dcbt_ro(p); \
+ } while (0)
+
+static inline u64 mfatb(void)
+{
+ return get_cycles();
+}
+
+static inline u32 in_be32(volatile void *addr)
+{
+ return be32_to_cpu(*((volatile u32 *) addr));
+}
+
+static inline void out_be32(void *addr, u32 val)
+{
+ *((u32 *) addr) = cpu_to_be32(val);
+}
+
+
+static inline void set_bits(unsigned long mask, volatile unsigned long *p)
+{
+ *p |= mask;
+}
+static inline void clear_bits(unsigned long mask, volatile unsigned long *p)
+{
+ *p &= ~mask;
+}
+
+static inline void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+ __flush_dcache_area((void *) start, stop - start);
+}
+
+#define hard_smp_processor_id() raw_smp_processor_id()
+
+
+
+#endif
diff --git a/drivers/staging/fsl_qbman/dpa_sys_ppc32.h b/drivers/staging/fsl_qbman/dpa_sys_ppc32.h
new file mode 100644
index 000000000000..874616dfdca6
--- /dev/null
+++ b/drivers/staging/fsl_qbman/dpa_sys_ppc32.h
@@ -0,0 +1,70 @@
+/* Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DPA_SYS_PPC32_H
+#define DPA_SYS_PPC32_H
+
+/* Implementation of PowerPC 32 bit specific routines */
+
+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
+ * barriers and that dcb*() won't fall victim to compiler or execution
+ * reordering with respect to other code/instructions that manipulate the same
+ * cacheline. */
+#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
+#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
+#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
+#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
+#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
+#define dcbi(p) dcbf(p)
+
+#define dcbzl(p) __asm__ __volatile__ ("dcbzl 0,%0" : : "r" (p))
+#define dcbz_64(p) dcbzl(p)
+#define dcbf_64(p) dcbf(p)
+
+/* Commonly used combo */
+#define dcbit_ro(p) \
+ do { \
+ dcbi(p); \
+ dcbt_ro(p); \
+ } while (0)
+
+static inline u64 mfatb(void)
+{
+ u32 hi, lo, chk;
+ do {
+ hi = mfspr(SPRN_ATBU);
+ lo = mfspr(SPRN_ATBL);
+ chk = mfspr(SPRN_ATBU);
+ } while (unlikely(hi != chk));
+ return ((u64)hi << 32) | (u64)lo;
+}
+
+#endif
diff --git a/drivers/staging/fsl_qbman/dpa_sys_ppc64.h b/drivers/staging/fsl_qbman/dpa_sys_ppc64.h
new file mode 100644
index 000000000000..d9803199631c
--- /dev/null
+++ b/drivers/staging/fsl_qbman/dpa_sys_ppc64.h
@@ -0,0 +1,79 @@
+/* Copyright 2014 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DPA_SYS_PPC64_H
+#define DPA_SYS_PPC64_H
+
+/* Implementation of PowerPC 64 bit specific routines */
+
+/* TODO: NB, we currently assume that hwsync() and lwsync() imply compiler
+ * barriers and that dcb*() won't fall victim to compiler or execution
+ * reordering with respect to other code/instructions that manipulate the same
+ * cacheline. */
+#define hwsync() __asm__ __volatile__ ("sync" : : : "memory")
+#define lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : : "memory")
+#define dcbf(p) __asm__ __volatile__ ("dcbf 0,%0" : : "r" (p) : "memory")
+#define dcbt_ro(p) __asm__ __volatile__ ("dcbt 0,%0" : : "r" (p))
+#define dcbt_rw(p) __asm__ __volatile__ ("dcbtst 0,%0" : : "r" (p))
+#define dcbi(p) dcbf(p)
+
+#define dcbz(p) __asm__ __volatile__ ("dcbz 0,%0" : : "r" (p))
+#define dcbz_64(p) \
+ do { \
+ dcbz((void*)p + 32); \
+ dcbz(p); \
+ } while (0)
+#define dcbf_64(p) \
+ do { \
+ dcbf((void*)p + 32); \
+ dcbf(p); \
+ } while (0)
+/* Commonly used combo */
+#define dcbit_ro(p) \
+ do { \
+ dcbi(p); \
+ dcbi((void*)p + 32); \
+ dcbt_ro(p); \
+ dcbt_ro((void*)p + 32); \
+ } while (0)
+
+static inline u64 mfatb(void)
+{
+ u32 hi, lo, chk;
+ do {
+ hi = mfspr(SPRN_ATBU);
+ lo = mfspr(SPRN_ATBL);
+ chk = mfspr(SPRN_ATBU);
+ } while (unlikely(hi != chk));
+ return ((u64)hi << 32) | (u64)lo;
+}
+
+#endif
diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa.c b/drivers/staging/fsl_qbman/fsl_usdpaa.c
new file mode 100644
index 000000000000..478962cfa773
--- /dev/null
+++ b/drivers/staging/fsl_qbman/fsl_usdpaa.c
@@ -0,0 +1,2284 @@
+/* Copyright (C) 2008-2012 Freescale Semiconductor, Inc.
+ * Authors: Andy Fleming <afleming@freescale.com>
+ * Timur Tabi <timur@freescale.com>
+ * Geoff Thorpe <Geoff.Thorpe@freescale.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/mman.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/eventfd.h>
+#include <linux/fdtable.h>
+
+#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
+#include <mm/mmu_decl.h>
+#endif
+
+#include "dpa_sys.h"
+#include <linux/fsl_usdpaa.h>
+#include "bman_low.h"
+#include "qman_low.h"
+/* Headers requires for
+ * Link status support
+ */
+#include <linux/device.h>
+#include <linux/of_mdio.h>
+#include "mac.h"
+#include "dpaa_eth_common.h"
+
+/* Private data for Proxy Interface */
+struct dpa_proxy_priv_s {
+ struct mac_device *mac_dev;
+ struct eventfd_ctx *efd_ctx;
+};
+/* Interface Helpers */
+static inline struct device *get_dev_ptr(char *if_name);
+static void phy_link_updates(struct net_device *net_dev);
+/* IOCTL handlers */
+static inline int ioctl_usdpaa_get_link_status(char *if_name);
+static int ioctl_en_if_link_status(struct usdpaa_ioctl_link_status *args);
+static int ioctl_disable_if_link_status(char *if_name);
+
+/* Physical address range of the memory reservation, exported for mm/mem.c */
+static u64 phys_start;
+static u64 phys_size;
+static u64 arg_phys_size;
+
+/* PFN versions of the above */
+static unsigned long pfn_start;
+static unsigned long pfn_size;
+
+/* Memory reservations are manipulated under this spinlock (which is why 'refs'
+ * isn't atomic_t). */
+static DEFINE_SPINLOCK(mem_lock);
+
+/* The range of TLB1 indices */
+static unsigned int first_tlb;
+static unsigned int num_tlb = 1;
+static unsigned int current_tlb; /* loops around for fault handling */
+
+/* Memory reservation is represented as a list of 'mem_fragment's, some of which
+ * may be mapped. Unmapped fragments are always merged where possible. */
+static LIST_HEAD(mem_list);
+
+struct mem_mapping;
+
+/* Memory fragments are in 'mem_list'. */
+struct mem_fragment {
+ u64 base;
+ u64 len;
+ unsigned long pfn_base; /* PFN version of 'base' */
+ unsigned long pfn_len; /* PFN version of 'len' */
+ unsigned int refs; /* zero if unmapped */
+ u64 root_len; /* Size of the orignal fragment */
+ unsigned long root_pfn; /* PFN of the orignal fragment */
+ struct list_head list;
+ /* if mapped, flags+name captured at creation time */
+ u32 flags;
+ char name[USDPAA_DMA_NAME_MAX];
+ u64 map_len;
+ /* support multi-process locks per-memory-fragment. */
+ int has_locking;
+ wait_queue_head_t wq;
+ struct mem_mapping *owner;
+};
+
+/* Mappings of memory fragments in 'struct ctx'. These are created from
+ * ioctl(USDPAA_IOCTL_DMA_MAP), though the actual mapping then happens via a
+ * mmap(). */
+struct mem_mapping {
+ struct mem_fragment *root_frag;
+ u32 frag_count;
+ u64 total_size;
+ struct list_head list;
+ int refs;
+ void *virt_addr;
+};
+
+struct portal_mapping {
+ struct usdpaa_ioctl_portal_map user;
+ union {
+ struct qm_portal_config *qportal;
+ struct bm_portal_config *bportal;
+ };
+ /* Declare space for the portals in case the process
+ exits unexpectedly and needs to be cleaned by the kernel */
+ union {
+ struct qm_portal qman_portal_low;
+ struct bm_portal bman_portal_low;
+ };
+ struct list_head list;
+ struct resource *phys;
+ struct iommu_domain *iommu_domain;
+};
+
+/* Track the DPAA resources the process is using */
+struct active_resource {
+ struct list_head list;
+ u32 id;
+ u32 num;
+ unsigned int refcount;
+};
+
+/* Per-FD state (which should also be per-process but we don't enforce that) */
+struct ctx {
+ /* Lock to protect the context */
+ spinlock_t lock;
+ /* Allocated resources get put here for accounting */
+ struct list_head resources[usdpaa_id_max];
+ /* list of DMA maps */
+ struct list_head maps;
+ /* list of portal maps */
+ struct list_head portals;
+};
+
+/* Different resource classes */
+static const struct alloc_backend {
+ enum usdpaa_id_type id_type;
+ int (*alloc)(u32 *, u32, u32, int);
+ void (*release)(u32 base, unsigned int count);
+ int (*reserve)(u32 base, unsigned int count);
+ const char *acronym;
+} alloc_backends[] = {
+ {
+ .id_type = usdpaa_id_fqid,
+ .alloc = qman_alloc_fqid_range,
+ .release = qman_release_fqid_range,
+ .reserve = qman_reserve_fqid_range,
+ .acronym = "FQID"
+ },
+ {
+ .id_type = usdpaa_id_bpid,
+ .alloc = bman_alloc_bpid_range,
+ .release = bman_release_bpid_range,
+ .reserve = bman_reserve_bpid_range,
+ .acronym = "BPID"
+ },
+ {
+ .id_type = usdpaa_id_qpool,
+ .alloc = qman_alloc_pool_range,
+ .release = qman_release_pool_range,
+ .reserve = qman_reserve_pool_range,
+ .acronym = "QPOOL"
+ },
+ {
+ .id_type = usdpaa_id_cgrid,
+ .alloc = qman_alloc_cgrid_range,
+ .release = qman_release_cgrid_range,
+ .acronym = "CGRID"
+ },
+ {
+ .id_type = usdpaa_id_ceetm0_lfqid,
+ .alloc = qman_alloc_ceetm0_lfqid_range,
+ .release = qman_release_ceetm0_lfqid_range,
+ .acronym = "CEETM0_LFQID"
+ },
+ {
+ .id_type = usdpaa_id_ceetm0_channelid,
+ .alloc = qman_alloc_ceetm0_channel_range,
+ .release = qman_release_ceetm0_channel_range,
+ .acronym = "CEETM0_LFQID"
+ },
+ {
+ .id_type = usdpaa_id_ceetm1_lfqid,
+ .alloc = qman_alloc_ceetm1_lfqid_range,
+ .release = qman_release_ceetm1_lfqid_range,
+ .acronym = "CEETM1_LFQID"
+ },
+ {
+ .id_type = usdpaa_id_ceetm1_channelid,
+ .alloc = qman_alloc_ceetm1_channel_range,
+ .release = qman_release_ceetm1_channel_range,
+ .acronym = "CEETM1_LFQID"
+ },
+ {
+ /* This terminates the array */
+ .id_type = usdpaa_id_max
+ }
+};
+
+/* Determines the largest acceptable page size for a given size
+ The sizes are determined by what the TLB1 acceptable page sizes are */
+static u32 largest_page_size(u32 size)
+{
+ int shift = 30; /* Start at 1G size */
+ if (size < 4096)
+ return 0;
+ do {
+ if (size >= (1<<shift))
+ return 1<<shift;
+ shift -= 2;
+ } while (shift >= 12); /* Up to 4k */
+ return 0;
+}
+
+/* Determine if value is power of 4 */
+static inline bool is_power_of_4(u64 x)
+{
+ if (x == 0 || ((x & (x - 1)) != 0))
+ return false;
+ return !!(x & 0x5555555555555555ull);
+}
+
+/* Helper for ioctl_dma_map() when we have a larger fragment than we need. This
+ * splits the fragment into 4 and returns the upper-most. (The caller can loop
+ * until it has a suitable fragment size.) */
+static struct mem_fragment *split_frag(struct mem_fragment *frag)
+{
+ struct mem_fragment *x[3];
+
+ x[0] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
+ x[1] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
+ x[2] = kmalloc(sizeof(struct mem_fragment), GFP_ATOMIC);
+ if (!x[0] || !x[1] || !x[2]) {
+ kfree(x[0]);
+ kfree(x[1]);
+ kfree(x[2]);
+ return NULL;
+ }
+ BUG_ON(frag->refs);
+ frag->len >>= 2;
+ frag->pfn_len >>= 2;
+ x[0]->base = frag->base + frag->len;
+ x[1]->base = x[0]->base + frag->len;
+ x[2]->base = x[1]->base + frag->len;
+ x[0]->len = x[1]->len = x[2]->len = frag->len;
+ x[0]->pfn_base = frag->pfn_base + frag->pfn_len;
+ x[1]->pfn_base = x[0]->pfn_base + frag->pfn_len;
+ x[2]->pfn_base = x[1]->pfn_base + frag->pfn_len;
+ x[0]->pfn_len = x[1]->pfn_len = x[2]->pfn_len = frag->pfn_len;
+ x[0]->refs = x[1]->refs = x[2]->refs = 0;
+ x[0]->root_len = x[1]->root_len = x[2]->root_len = frag->root_len;
+ x[0]->root_pfn = x[1]->root_pfn = x[2]->root_pfn = frag->root_pfn;
+ x[0]->name[0] = x[1]->name[0] = x[2]->name[0] = 0;
+ list_add_tail(&x[0]->list, &frag->list);
+ list_add_tail(&x[1]->list, &x[0]->list);
+ list_add_tail(&x[2]->list, &x[1]->list);
+ return x[2];
+}
+
+static __maybe_unused void dump_frags(void)
+{
+ struct mem_fragment *frag;
+ int i = 0;
+ list_for_each_entry(frag, &mem_list, list) {
+ pr_info("FRAG %d: base 0x%llx pfn_base 0x%lx len 0x%llx root_len 0x%llx root_pfn 0x%lx refs %d name %s\n",
+ i, frag->base, frag->pfn_base,
+ frag->len, frag->root_len, frag->root_pfn,
+ frag->refs, frag->name);
+ ++i;
+ }
+}
+
+/* Walk the list of fragments and adjoin neighbouring segments if possible */
+static void compress_frags(void)
+{
+ /* Walk the fragment list and combine fragments */
+ struct mem_fragment *frag, *nxtfrag;
+ u64 len = 0;
+
+ int i, numfrags;
+
+
+ frag = list_entry(mem_list.next, struct mem_fragment, list);
+
+ while (&frag->list != &mem_list) {
+ /* Must combine consecutive fragemenst with
+ same root_pfn such that they are power of 4 */
+ if (frag->refs != 0) {
+ frag = list_entry(frag->list.next,
+ struct mem_fragment, list);
+ continue; /* Not this window */
+ }
+ len = frag->len;
+ numfrags = 0;
+ nxtfrag = list_entry(frag->list.next,
+ struct mem_fragment, list);
+ while (true) {
+ if (&nxtfrag->list == &mem_list) {
+ numfrags = 0;
+ break; /* End of list */
+ }
+ if (nxtfrag->refs) {
+ numfrags = 0;
+ break; /* In use still */
+ }
+ if (nxtfrag->root_pfn != frag->root_pfn) {
+ numfrags = 0;
+ break; /* Crosses root fragment boundary */
+ }
+ len += nxtfrag->len;
+ numfrags++;
+ if (is_power_of_4(len)) {
+ /* These fragments can be combined */
+ break;
+ }
+ nxtfrag = list_entry(nxtfrag->list.next,
+ struct mem_fragment, list);
+ }
+ if (numfrags == 0) {
+ frag = list_entry(frag->list.next,
+ struct mem_fragment, list);
+ continue; /* try the next window */
+ }
+ for (i = 0; i < numfrags; i++) {
+ struct mem_fragment *todel =
+ list_entry(nxtfrag->list.prev,
+ struct mem_fragment, list);
+ nxtfrag->len += todel->len;
+ nxtfrag->pfn_len += todel->pfn_len;
+ list_del(&todel->list);
+ }
+ /* Re evaluate the list, things may merge now */
+ frag = list_entry(mem_list.next, struct mem_fragment, list);
+ }
+}
+
+/* Hook from arch/powerpc/mm/mem.c */
+int usdpaa_test_fault(unsigned long pfn, u64 *phys_addr, u64 *size)
+{
+ struct mem_fragment *frag;
+ int idx = -1;
+ if ((pfn < pfn_start) || (pfn >= (pfn_start + pfn_size)))
+ return -1;
+ /* It's in-range, we need to find the fragment */
+ spin_lock(&mem_lock);
+ list_for_each_entry(frag, &mem_list, list) {
+ if ((pfn >= frag->pfn_base) && (pfn < (frag->pfn_base +
+ frag->pfn_len))) {
+ *phys_addr = frag->base;
+ *size = frag->len;
+ idx = current_tlb++;
+ if (current_tlb >= (first_tlb + num_tlb))
+ current_tlb = first_tlb;
+ break;
+ }
+ }
+ spin_unlock(&mem_lock);
+ return idx;
+}
+
+static int usdpaa_open(struct inode *inode, struct file *filp)
+{
+ const struct alloc_backend *backend = &alloc_backends[0];
+ struct ctx *ctx = kmalloc(sizeof(struct ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ filp->private_data = ctx;
+
+ while (backend->id_type != usdpaa_id_max) {
+ INIT_LIST_HEAD(&ctx->resources[backend->id_type]);
+ backend++;
+ }
+
+ INIT_LIST_HEAD(&ctx->maps);
+ INIT_LIST_HEAD(&ctx->portals);
+ spin_lock_init(&ctx->lock);
+
+ //filp->f_mapping->backing_dev_info = &directly_mappable_cdev_bdi;
+
+ return 0;
+}
+
+#define DQRR_MAXFILL 15
+
+
+/* Invalidate a portal */
+void dbci_portal(void *addr)
+{
+ int i;
+
+ for (i = 0; i < 0x4000; i += 64)
+ dcbi(addr + i);
+}
+
+/* Reset a QMan portal to its default state */
+static int init_qm_portal(struct qm_portal_config *config,
+ struct qm_portal *portal)
+{
+ const struct qm_dqrr_entry *dqrr = NULL;
+ int i;
+
+ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+
+ /* Make sure interrupts are inhibited */
+ qm_out(IIR, 1);
+
+ /*
+ * Invalidate the entire CE portal are to ensure no stale
+ * cachelines are present. This should be done on all
+ * cores as the portal is mapped as M=0 (non-coherent).
+ */
+ on_each_cpu(dbci_portal, portal->addr.addr_ce, 1);
+
+ /* Initialize the DQRR. This will stop any dequeue
+ commands that are in progress */
+ if (qm_dqrr_init(portal, config, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ pr_err("qm_dqrr_init() failed when trying to"
+ " recover portal, portal will be leaked\n");
+ return 1;
+ }
+
+ /* Discard any entries on the DQRR */
+ /* If we consume the ring twice something is wrong */
+ for (i = 0; i < DQRR_MAXFILL * 2; i++) {
+ qm_dqrr_pvb_update(portal);
+ dqrr = qm_dqrr_current(portal);
+ if (!dqrr)
+ break;
+ qm_dqrr_cdc_consume_1ptr(portal, dqrr, 0);
+ qm_dqrr_pvb_update(portal);
+ qm_dqrr_next(portal);
+ }
+ /* Initialize the EQCR */
+ if (qm_eqcr_init(portal, qm_eqcr_pvb,
+ qm_eqcr_get_ci_stashing(portal), 1)) {
+ pr_err("Qman EQCR initialisation failed\n");
+ return 1;
+ }
+ /* initialize the MR */
+ if (qm_mr_init(portal, qm_mr_pvb, qm_mr_cci)) {
+ pr_err("Qman MR initialisation failed\n");
+ return 1;
+ }
+ qm_mr_pvb_update(portal);
+ while (qm_mr_current(portal)) {
+ qm_mr_next(portal);
+ qm_mr_cci_consume_to_current(portal);
+ qm_mr_pvb_update(portal);
+ }
+
+ if (qm_mc_init(portal)) {
+ pr_err("Qman MC initialisation failed\n");
+ return 1;
+ }
+ return 0;
+}
+
+static int init_bm_portal(struct bm_portal_config *config,
+ struct bm_portal *portal)
+{
+ portal->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+ portal->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+
+ /*
+ * Invalidate the entire CE portal are to ensure no stale
+ * cachelines are present. This should be done on all
+ * cores as the portal is mapped as M=0 (non-coherent).
+ */
+ on_each_cpu(dbci_portal, portal->addr.addr_ce, 1);
+
+ if (bm_rcr_init(portal, bm_rcr_pvb, bm_rcr_cce)) {
+ pr_err("Bman RCR initialisation failed\n");
+ return 1;
+ }
+ if (bm_mc_init(portal)) {
+ pr_err("Bman MC initialisation failed\n");
+ return 1;
+ }
+ return 0;
+}
+
+/* Function that will scan all FQ's in the system. For each FQ that is not
+ OOS it will call the check_channel helper to determine if the FQ should
+ be torn down. If the check_channel helper returns true the FQ will be
+ transitioned to the OOS state */
+static int qm_check_and_destroy_fqs(struct qm_portal *portal, void *ctx,
+ bool (*check_channel)(void*, u32))
+{
+ u32 fq_id = 0;
+ while (1) {
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ u8 state;
+ u32 channel;
+
+ /* Determine the channel for the FQID */
+ mcc = qm_mc_start(portal);
+ mcc->queryfq.fqid = fq_id;
+ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(portal)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
+ == QM_MCR_VERB_QUERYFQ);
+ if (mcr->result != QM_MCR_RESULT_OK)
+ break; /* End of valid FQIDs */
+
+ channel = mcr->queryfq.fqd.dest.channel;
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(portal);
+ mcc->queryfq_np.fqid = fq_id;
+ qm_mc_commit(portal, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(portal)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK)
+ == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ /* Already OOS, no need to do anymore checks */
+ goto next;
+
+ if (check_channel(ctx, channel))
+ qm_shutdown_fq(&portal, 1, fq_id);
+ next:
+ ++fq_id;
+ }
+ return 0;
+}
+
+static bool check_channel_device(void *_ctx, u32 channel)
+{
+ struct ctx *ctx = _ctx;
+ struct portal_mapping *portal, *tmpportal;
+ struct active_resource *res;
+
+ /* See if the FQ is destined for one of the portals we're cleaning up */
+ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
+ if (portal->user.type == usdpaa_portal_qman) {
+ if (portal->qportal->public_cfg.channel == channel) {
+ /* This FQs destination is a portal
+ we're cleaning, send a retire */
+ return true;
+ }
+ }
+ }
+
+ /* Check the pool channels that will be released as well */
+ list_for_each_entry(res, &ctx->resources[usdpaa_id_qpool], list) {
+ if ((res->id >= channel) &&
+ ((res->id + res->num - 1) <= channel))
+ return true;
+ }
+ return false;
+}
+
+static bool check_portal_channel(void *ctx, u32 channel)
+{
+ u32 portal_channel = *(u32 *)ctx;
+ if (portal_channel == channel) {
+ /* This FQs destination is a portal
+ we're cleaning, send a retire */
+ return true;
+ }
+ return false;
+}
+
+
+
+static int usdpaa_release(struct inode *inode, struct file *filp)
+{
+ int err = 0;
+ struct ctx *ctx = filp->private_data;
+ struct mem_mapping *map, *tmpmap;
+ struct portal_mapping *portal, *tmpportal;
+ const struct alloc_backend *backend = &alloc_backends[0];
+ struct active_resource *res;
+ struct qm_portal *qm_cleanup_portal = NULL;
+ struct bm_portal *bm_cleanup_portal = NULL;
+ struct qm_portal_config *qm_alloced_portal = NULL;
+ struct bm_portal_config *bm_alloced_portal = NULL;
+
+ struct qm_portal **portal_array;
+ int portal_count = 0;
+
+ portal_array = kmalloc_array(qman_portal_max,
+ sizeof(struct qm_portal *), GFP_KERNEL);
+ if (!portal_array)
+ return -ENOMEM;
+
+ /* Ensure the release operation cannot be migrated to another
+ CPU as CPU specific variables may be needed during cleanup */
+#ifdef CONFIG_PREEMPT_RT_FULL
+ migrate_disable();
+#endif
+ /* The following logic is used to recover resources that were not
+ correctly released by the process that is closing the FD.
+ Step 1: syncronize the HW with the qm_portal/bm_portal structures
+ in the kernel
+ */
+
+ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
+ /* Try to recover any portals that weren't shut down */
+ if (portal->user.type == usdpaa_portal_qman) {
+ portal_array[portal_count] = &portal->qman_portal_low;
+ ++portal_count;
+ init_qm_portal(portal->qportal,
+ &portal->qman_portal_low);
+ if (!qm_cleanup_portal) {
+ qm_cleanup_portal = &portal->qman_portal_low;
+ } else {
+ /* Clean FQs on the dedicated channel */
+ u32 chan = portal->qportal->public_cfg.channel;
+ qm_check_and_destroy_fqs(
+ &portal->qman_portal_low, &chan,
+ check_portal_channel);
+ }
+ } else {
+ /* BMAN */
+ init_bm_portal(portal->bportal,
+ &portal->bman_portal_low);
+ if (!bm_cleanup_portal)
+ bm_cleanup_portal = &portal->bman_portal_low;
+ }
+ }
+ /* If no portal was found, allocate one for cleanup */
+ if (!qm_cleanup_portal) {
+ qm_alloced_portal = qm_get_unused_portal();
+ if (!qm_alloced_portal) {
+ pr_crit("No QMan portal avalaible for cleanup\n");
+ err = -1;
+ goto done;
+ }
+ qm_cleanup_portal = kmalloc(sizeof(struct qm_portal),
+ GFP_KERNEL);
+ if (!qm_cleanup_portal) {
+ err = -ENOMEM;
+ goto done;
+ }
+ init_qm_portal(qm_alloced_portal, qm_cleanup_portal);
+ portal_array[portal_count] = qm_cleanup_portal;
+ ++portal_count;
+ }
+ if (!bm_cleanup_portal) {
+ bm_alloced_portal = bm_get_unused_portal();
+ if (!bm_alloced_portal) {
+ pr_crit("No BMan portal avalaible for cleanup\n");
+ err = -1;
+ goto done;
+ }
+ bm_cleanup_portal = kmalloc(sizeof(struct bm_portal),
+ GFP_KERNEL);
+ if (!bm_cleanup_portal) {
+ err = -ENOMEM;
+ goto done;
+ }
+ init_bm_portal(bm_alloced_portal, bm_cleanup_portal);
+ }
+
+ /* OOS the FQs associated with this process */
+ qm_check_and_destroy_fqs(qm_cleanup_portal, ctx, check_channel_device);
+
+ while (backend->id_type != usdpaa_id_max) {
+ int leaks = 0;
+ list_for_each_entry(res, &ctx->resources[backend->id_type],
+ list) {
+ if (backend->id_type == usdpaa_id_fqid) {
+ int i = 0;
+ for (; i < res->num; i++) {
+ /* Clean FQs with the cleanup portal */
+ qm_shutdown_fq(portal_array,
+ portal_count,
+ res->id + i);
+ }
+ }
+ leaks += res->num;
+ backend->release(res->id, res->num);
+ }
+ if (leaks)
+ pr_crit("USDPAA process leaking %d %s%s\n", leaks,
+ backend->acronym, (leaks > 1) ? "s" : "");
+ backend++;
+ }
+ /* Release any DMA regions */
+ spin_lock(&mem_lock);
+ list_for_each_entry_safe(map, tmpmap, &ctx->maps, list) {
+ struct mem_fragment *current_frag = map->root_frag;
+ int i;
+ if (map->root_frag->has_locking &&
+ (map->root_frag->owner == map)) {
+ map->root_frag->owner = NULL;
+ wake_up(&map->root_frag->wq);
+ }
+ /* Check each fragment and merge if the ref count is 0 */
+ for (i = 0; i < map->frag_count; i++) {
+ --current_frag->refs;
+ current_frag = list_entry(current_frag->list.prev,
+ struct mem_fragment, list);
+ }
+
+ compress_frags();
+ list_del(&map->list);
+ kfree(map);
+ }
+ spin_unlock(&mem_lock);
+
+ /* Return portals */
+ list_for_each_entry_safe(portal, tmpportal, &ctx->portals, list) {
+ if (portal->user.type == usdpaa_portal_qman) {
+ /* Give the portal back to the allocator */
+ init_qm_portal(portal->qportal,
+ &portal->qman_portal_low);
+ qm_put_unused_portal(portal->qportal);
+ } else {
+ init_bm_portal(portal->bportal,
+ &portal->bman_portal_low);
+ bm_put_unused_portal(portal->bportal);
+ }
+ list_del(&portal->list);
+ kfree(portal);
+ }
+ if (qm_alloced_portal) {
+ qm_put_unused_portal(qm_alloced_portal);
+ kfree(qm_cleanup_portal);
+ }
+ if (bm_alloced_portal) {
+ bm_put_unused_portal(bm_alloced_portal);
+ kfree(bm_cleanup_portal);
+ }
+
+ kfree(ctx);
+done:
+#ifdef CONFIG_PREEMPT_RT_FULL
+ migrate_enable();
+#endif
+ kfree(portal_array);
+ return err;
+}
+
+static int check_mmap_dma(struct ctx *ctx, struct vm_area_struct *vma,
+ int *match, unsigned long *pfn)
+{
+ struct mem_mapping *map;
+
+ list_for_each_entry(map, &ctx->maps, list) {
+ int i;
+ struct mem_fragment *frag = map->root_frag;
+
+ for (i = 0; i < map->frag_count; i++) {
+ if (frag->pfn_base == vma->vm_pgoff) {
+ *match = 1;
+ *pfn = frag->pfn_base;
+ return 0;
+ }
+ frag = list_entry(frag->list.next, struct mem_fragment,
+ list);
+ }
+ }
+ *match = 0;
+ return 0;
+}
+
+static int check_mmap_resource(struct resource *res, struct vm_area_struct *vma,
+ int *match, unsigned long *pfn)
+{
+ *pfn = res->start >> PAGE_SHIFT;
+ if (*pfn == vma->vm_pgoff) {
+ *match = 1;
+ if ((vma->vm_end - vma->vm_start) != resource_size(res))
+ return -EINVAL;
+ } else
+ *match = 0;
+ return 0;
+}
+
+static int check_mmap_portal(struct ctx *ctx, struct vm_area_struct *vma,
+ int *match, unsigned long *pfn)
+{
+ struct portal_mapping *portal;
+ int ret;
+
+ list_for_each_entry(portal, &ctx->portals, list) {
+ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CE], vma,
+ match, pfn);
+ if (*match) {
+ vma->vm_page_prot =
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ pgprot_cached_ns(vma->vm_page_prot);
+#else
+ pgprot_cached_noncoherent(vma->vm_page_prot);
+#endif
+ return ret;
+ }
+ ret = check_mmap_resource(&portal->phys[DPA_PORTAL_CI], vma,
+ match, pfn);
+ if (*match) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ return ret;
+ }
+ }
+ *match = 0;
+ return 0;
+}
+
+static int usdpaa_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ struct ctx *ctx = filp->private_data;
+ unsigned long pfn = 0;
+ int match, ret;
+
+ spin_lock(&mem_lock);
+ ret = check_mmap_dma(ctx, vma, &match, &pfn);
+ if (!match)
+ ret = check_mmap_portal(ctx, vma, &match, &pfn);
+ spin_unlock(&mem_lock);
+ if (!match)
+ return -EINVAL;
+ if (!ret)
+ ret = remap_pfn_range(vma, vma->vm_start, pfn,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot);
+ return ret;
+}
+
+/* Return the nearest rounded-up address >= 'addr' that is 'sz'-aligned. 'sz'
+ * must be a power of 2, but both 'addr' and 'sz' can be expressions. */
+#define USDPAA_MEM_ROUNDUP(addr, sz) \
+ ({ \
+ unsigned long foo_align = (sz) - 1; \
+ ((addr) + foo_align) & ~foo_align; \
+ })
+/* Searching for a size-aligned virtual address range starting from 'addr' */
+static unsigned long usdpaa_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ struct vm_area_struct *vma;
+
+ if (len % PAGE_SIZE)
+ return -EINVAL;
+ if (!len)
+ return -EINVAL;
+
+ /* Need to align the address to the largest pagesize of the mapping
+ * because the MMU requires the virtual address to have the same
+ * alignment as the physical address */
+ addr = USDPAA_MEM_ROUNDUP(addr, largest_page_size(len));
+ vma = find_vma(current->mm, addr);
+ /* Keep searching until we reach the end of currently-used virtual
+ * address-space or we find a big enough gap. */
+ while (vma) {
+ if ((addr + len) < vma->vm_start)
+ return addr;
+
+ addr = USDPAA_MEM_ROUNDUP(vma->vm_end, largest_page_size(len));
+ vma = vma->vm_next;
+ }
+ if ((TASK_SIZE - len) < addr)
+ return -ENOMEM;
+ return addr;
+}
+
+static long ioctl_id_alloc(struct ctx *ctx, void __user *arg)
+{
+ struct usdpaa_ioctl_id_alloc i;
+ const struct alloc_backend *backend;
+ struct active_resource *res;
+ int ret = copy_from_user(&i, arg, sizeof(i));
+ if (ret)
+ return ret;
+ if ((i.id_type >= usdpaa_id_max) || !i.num)
+ return -EINVAL;
+ backend = &alloc_backends[i.id_type];
+ /* Allocate the required resource type */
+ ret = backend->alloc(&i.base, i.num, i.align, i.partial);
+ if (ret < 0)
+ return ret;
+ i.num = ret;
+ /* Copy the result to user-space */
+ ret = copy_to_user(arg, &i, sizeof(i));
+ if (ret) {
+ backend->release(i.base, i.num);
+ return ret;
+ }
+ /* Assign the allocated range to the FD accounting */
+ res = kmalloc(sizeof(*res), GFP_KERNEL);
+ if (!res) {
+ backend->release(i.base, i.num);
+ return -ENOMEM;
+ }
+ spin_lock(&ctx->lock);
+ res->id = i.base;
+ res->num = i.num;
+ res->refcount = 1;
+ list_add(&res->list, &ctx->resources[i.id_type]);
+ spin_unlock(&ctx->lock);
+ return 0;
+}
+
+static long ioctl_id_release(struct ctx *ctx, void __user *arg)
+{
+ struct usdpaa_ioctl_id_release i;
+ const struct alloc_backend *backend;
+ struct active_resource *tmp, *pos;
+
+ int ret = copy_from_user(&i, arg, sizeof(i));
+ if (ret)
+ return ret;
+ if ((i.id_type >= usdpaa_id_max) || !i.num)
+ return -EINVAL;
+ backend = &alloc_backends[i.id_type];
+ /* Pull the range out of the FD accounting - the range is valid iff this
+ * succeeds. */
+ spin_lock(&ctx->lock);
+ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
+ if (pos->id == i.base && pos->num == i.num) {
+ pos->refcount--;
+ if (pos->refcount) {
+ spin_unlock(&ctx->lock);
+ return 0; /* Still being used */
+ }
+ list_del(&pos->list);
+ kfree(pos);
+ spin_unlock(&ctx->lock);
+ goto found;
+ }
+ }
+ /* Failed to find the resource */
+ spin_unlock(&ctx->lock);
+ pr_err("Couldn't find resource type %d base 0x%x num %d\n",
+ i.id_type, i.base, i.num);
+ return -EINVAL;
+found:
+ /* Release the resource to the backend */
+ backend->release(i.base, i.num);
+ return 0;
+}
+
+static long ioctl_id_reserve(struct ctx *ctx, void __user *arg)
+{
+ struct usdpaa_ioctl_id_reserve i;
+ const struct alloc_backend *backend;
+ struct active_resource *tmp, *pos;
+
+ int ret = copy_from_user(&i, arg, sizeof(i));
+ if (ret)
+ return ret;
+ if ((i.id_type >= usdpaa_id_max) || !i.num)
+ return -EINVAL;
+ backend = &alloc_backends[i.id_type];
+ if (!backend->reserve)
+ return -EINVAL;
+ /* Pull the range out of the FD accounting - the range is valid iff this
+ * succeeds. */
+ spin_lock(&ctx->lock);
+ list_for_each_entry_safe(pos, tmp, &ctx->resources[i.id_type], list) {
+ if (pos->id == i.base && pos->num == i.num) {
+ pos->refcount++;
+ spin_unlock(&ctx->lock);
+ return 0;
+ }
+ }
+
+ /* Failed to find the resource */
+ spin_unlock(&ctx->lock);
+
+ /* Reserve the resource in the backend */
+ ret = backend->reserve(i.base, i.num);
+ if (ret)
+ return ret;
+ /* Assign the reserved range to the FD accounting */
+ pos = kmalloc(sizeof(*pos), GFP_KERNEL);
+ if (!pos) {
+ backend->release(i.base, i.num);
+ return -ENOMEM;
+ }
+ spin_lock(&ctx->lock);
+ pos->id = i.base;
+ pos->num = i.num;
+ pos->refcount = 1;
+ list_add(&pos->list, &ctx->resources[i.id_type]);
+ spin_unlock(&ctx->lock);
+ return 0;
+}
+
+static long ioctl_dma_map(struct file *fp, struct ctx *ctx,
+ struct usdpaa_ioctl_dma_map *i)
+{
+ struct mem_fragment *frag, *start_frag, *next_frag;
+ struct mem_mapping *map, *tmp;
+ int ret = 0;
+ u32 largest_page, so_far = 0;
+ int frag_count = 0;
+ unsigned long next_addr = PAGE_SIZE, populate;
+
+ /* error checking to ensure values copied from user space are valid */
+ if (i->len % PAGE_SIZE)
+ return -EINVAL;
+
+ map = kmalloc(sizeof(*map), GFP_KERNEL);
+ if (!map)
+ return -ENOMEM;
+
+ spin_lock(&mem_lock);
+ if (i->flags & USDPAA_DMA_FLAG_SHARE) {
+ list_for_each_entry(frag, &mem_list, list) {
+ if (frag->refs && (frag->flags &
+ USDPAA_DMA_FLAG_SHARE) &&
+ !strncmp(i->name, frag->name,
+ USDPAA_DMA_NAME_MAX)) {
+ /* Matching entry */
+ if ((i->flags & USDPAA_DMA_FLAG_CREATE) &&
+ !(i->flags & USDPAA_DMA_FLAG_LAZY)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ /* Check to ensure size matches record */
+ if (i->len != frag->map_len && i->len) {
+ pr_err("ioctl_dma_map() Size requested does not match %s and is none zero\n",
+ frag->name);
+ return -EINVAL;
+ }
+
+ /* Check if this has already been mapped
+ to this process */
+ list_for_each_entry(tmp, &ctx->maps, list)
+ if (tmp->root_frag == frag) {
+ /* Already mapped, just need to
+ inc ref count */
+ tmp->refs++;
+ kfree(map);
+ i->did_create = 0;
+ i->len = tmp->total_size;
+ i->phys_addr = frag->base;
+ i->ptr = tmp->virt_addr;
+ spin_unlock(&mem_lock);
+ return 0;
+ }
+ /* Matching entry - just need to map */
+ i->has_locking = frag->has_locking;
+ i->did_create = 0;
+ i->len = frag->map_len;
+ start_frag = frag;
+ goto do_map;
+ }
+ }
+ /* No matching entry */
+ if (!(i->flags & USDPAA_DMA_FLAG_CREATE)) {
+ pr_err("ioctl_dma_map() No matching entry\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+ }
+ /* New fragment required, size must be provided. */
+ if (!i->len) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* Find one of more contiguous fragments that satisfy the total length
+ trying to minimize the number of fragments
+ compute the largest page size that the allocation could use */
+ largest_page = largest_page_size(i->len);
+ start_frag = NULL;
+ while (largest_page &&
+ largest_page <= largest_page_size(phys_size) &&
+ start_frag == NULL) {
+ /* Search the list for a frag of that size */
+ list_for_each_entry(frag, &mem_list, list) {
+ if (!frag->refs && (frag->len == largest_page)) {
+ /* See if the next x fragments are free
+ and can accomidate the size */
+ u32 found_size = largest_page;
+ next_frag = list_entry(frag->list.prev,
+ struct mem_fragment,
+ list);
+ /* If the fragement is too small check
+ if the neighbours cab support it */
+ while (found_size < i->len) {
+ if (&mem_list == &next_frag->list)
+ break; /* End of list */
+ if (next_frag->refs != 0 ||
+ next_frag->len == 0)
+ break; /* not enough space */
+ found_size += next_frag->len;
+ next_frag = list_entry(
+ next_frag->list.prev,
+ struct mem_fragment,
+ list);
+ }
+ if (found_size >= i->len) {
+ /* Success! there is enough contigous
+ free space */
+ start_frag = frag;
+ break;
+ }
+ }
+ } /* next frag loop */
+ /* Couldn't statisfy the request with this
+ largest page size, try a smaller one */
+ largest_page <<= 2;
+ }
+ if (start_frag == NULL) {
+ /* Couldn't find proper amount of space */
+ ret = -ENOMEM;
+ goto out;
+ }
+ i->did_create = 1;
+do_map:
+ /* Verify there is sufficient space to do the mapping */
+ down_write(&current->mm->mmap_sem);
+ next_addr = usdpaa_get_unmapped_area(fp, next_addr, i->len, 0, 0);
+ up_write(&current->mm->mmap_sem);
+
+ if (next_addr & ~PAGE_MASK) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* We may need to divide the final fragment to accomidate the mapping */
+ next_frag = start_frag;
+ while (so_far != i->len) {
+ BUG_ON(next_frag->len == 0);
+ while ((next_frag->len + so_far) > i->len) {
+ /* Split frag until they match */
+ split_frag(next_frag);
+ }
+ so_far += next_frag->len;
+ next_frag->refs++;
+ ++frag_count;
+ next_frag = list_entry(next_frag->list.prev,
+ struct mem_fragment, list);
+ }
+ if (i->did_create) {
+ size_t name_len = 0;
+ start_frag->flags = i->flags;
+ strncpy(start_frag->name, i->name, USDPAA_DMA_NAME_MAX);
+ name_len = strnlen(start_frag->name, USDPAA_DMA_NAME_MAX);
+ if (name_len >= USDPAA_DMA_NAME_MAX) {
+ ret = -EFAULT;
+ goto out;
+ }
+ start_frag->map_len = i->len;
+ start_frag->has_locking = i->has_locking;
+ init_waitqueue_head(&start_frag->wq);
+ start_frag->owner = NULL;
+ }
+
+ /* Setup the map entry */
+ map->root_frag = start_frag;
+ map->total_size = i->len;
+ map->frag_count = frag_count;
+ map->refs = 1;
+ list_add(&map->list, &ctx->maps);
+ i->phys_addr = start_frag->base;
+out:
+ spin_unlock(&mem_lock);
+
+ if (!ret) {
+ unsigned long longret;
+ down_write(&current->mm->mmap_sem);
+ longret = do_mmap_pgoff(fp, next_addr, map->total_size,
+ PROT_READ |
+ (i->flags &
+ USDPAA_DMA_FLAG_RDONLY ? 0
+ : PROT_WRITE),
+ MAP_SHARED,
+ start_frag->pfn_base,
+ &populate,
+ NULL);
+ up_write(&current->mm->mmap_sem);
+ if (longret & ~PAGE_MASK) {
+ ret = (int)longret;
+ } else {
+ i->ptr = (void *)longret;
+ map->virt_addr = i->ptr;
+ }
+ } else
+ kfree(map);
+ return ret;
+}
+
+static long ioctl_dma_unmap(struct ctx *ctx, void __user *arg)
+{
+ struct mem_mapping *map;
+ struct vm_area_struct *vma;
+ int ret, i;
+ struct mem_fragment *current_frag;
+ size_t sz;
+ unsigned long base;
+ unsigned long vaddr;
+
+ down_write(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, (unsigned long)arg);
+ if (!vma || (vma->vm_start > (unsigned long)arg)) {
+ up_write(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+ spin_lock(&mem_lock);
+ list_for_each_entry(map, &ctx->maps, list) {
+ if (map->root_frag->pfn_base == vma->vm_pgoff) {
+ /* Drop the map lock if we hold it */
+ if (map->root_frag->has_locking &&
+ (map->root_frag->owner == map)) {
+ map->root_frag->owner = NULL;
+ wake_up(&map->root_frag->wq);
+ }
+ goto map_match;
+ }
+ }
+ /* Failed to find a matching mapping for this process */
+ ret = -EFAULT;
+ spin_unlock(&mem_lock);
+ goto out;
+map_match:
+ map->refs--;
+ if (map->refs != 0) {
+ /* Another call the dma_map is referencing this */
+ ret = 0;
+ spin_unlock(&mem_lock);
+ goto out;
+ }
+
+ current_frag = map->root_frag;
+ vaddr = (unsigned long) map->virt_addr;
+ for (i = 0; i < map->frag_count; i++) {
+ DPA_ASSERT(current_frag->refs > 0);
+ --current_frag->refs;
+#if !(defined(CONFIG_ARM) || defined(CONFIG_ARM64))
+ /*
+ * Make sure we invalidate the TLB entry for
+ * this fragment, otherwise a remap of a different
+ * page to this vaddr would give acces to an
+ * incorrect piece of memory
+ */
+ cleartlbcam(vaddr, mfspr(SPRN_PID));
+#endif
+ vaddr += current_frag->len;
+ current_frag = list_entry(current_frag->list.prev,
+ struct mem_fragment, list);
+ }
+ map->root_frag->name[0] = 0;
+ list_del(&map->list);
+ compress_frags();
+ spin_unlock(&mem_lock);
+
+ base = vma->vm_start;
+ sz = vma->vm_end - vma->vm_start;
+ do_munmap(current->mm, base, sz, NULL);
+ ret = 0;
+ out:
+ up_write(&current->mm->mmap_sem);
+ return ret;
+}
+
+static long ioctl_dma_stats(struct ctx *ctx, void __user *arg)
+{
+ struct mem_fragment *frag;
+ struct usdpaa_ioctl_dma_used result;
+
+ result.free_bytes = 0;
+ result.total_bytes = phys_size;
+
+ list_for_each_entry(frag, &mem_list, list) {
+ if (frag->refs == 0)
+ result.free_bytes += frag->len;
+ }
+
+ return copy_to_user(arg, &result, sizeof(result)); }
+
+static int test_lock(struct mem_mapping *map)
+{
+ int ret = 0;
+ spin_lock(&mem_lock);
+ if (!map->root_frag->owner) {
+ map->root_frag->owner = map;
+ ret = 1;
+ }
+ spin_unlock(&mem_lock);
+ return ret;
+}
+
+static long ioctl_dma_lock(struct ctx *ctx, void __user *arg)
+{
+ struct mem_mapping *map;
+ struct vm_area_struct *vma;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, (unsigned long)arg);
+ if (!vma || (vma->vm_start > (unsigned long)arg)) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+ spin_lock(&mem_lock);
+ list_for_each_entry(map, &ctx->maps, list) {
+ if (map->root_frag->pfn_base == vma->vm_pgoff)
+ goto map_match;
+ }
+ map = NULL;
+map_match:
+ spin_unlock(&mem_lock);
+ up_read(&current->mm->mmap_sem);
+
+ if (!map)
+ return -EFAULT;
+ if (!map->root_frag->has_locking)
+ return -ENODEV;
+ return wait_event_interruptible(map->root_frag->wq, test_lock(map));
+}
+
+static long ioctl_dma_unlock(struct ctx *ctx, void __user *arg)
+{
+ struct mem_mapping *map;
+ struct vm_area_struct *vma;
+ int ret;
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, (unsigned long)arg);
+ if (!vma || (vma->vm_start > (unsigned long)arg))
+ ret = -EFAULT;
+ else {
+ spin_lock(&mem_lock);
+ list_for_each_entry(map, &ctx->maps, list) {
+ if (map->root_frag->pfn_base == vma->vm_pgoff) {
+ if (!map->root_frag->has_locking)
+ ret = -ENODEV;
+ else if (map->root_frag->owner == map) {
+ map->root_frag->owner = NULL;
+ wake_up(&map->root_frag->wq);
+ ret = 0;
+ } else
+ ret = -EBUSY;
+ goto map_match;
+ }
+ }
+ ret = -EINVAL;
+map_match:
+ spin_unlock(&mem_lock);
+ }
+ up_read(&current->mm->mmap_sem);
+ return ret;
+}
+
+static int portal_mmap(struct file *fp, struct resource *res, void **ptr)
+{
+ unsigned long longret = 0, populate;
+ resource_size_t len;
+
+ down_write(&current->mm->mmap_sem);
+ len = resource_size(res);
+ if (len != (unsigned long)len)
+ return -EINVAL;
+ longret = do_mmap_pgoff(fp, PAGE_SIZE, (unsigned long)len,
+ PROT_READ | PROT_WRITE, MAP_SHARED,
+ res->start >> PAGE_SHIFT, &populate, NULL);
+ up_write(&current->mm->mmap_sem);
+
+ if (longret & ~PAGE_MASK)
+ return (int)longret;
+
+ *ptr = (void *) longret;
+ return 0;
+}
+
+static void portal_munmap(struct resource *res, void *ptr)
+{
+ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, (unsigned long)ptr, resource_size(res), NULL);
+ up_write(&current->mm->mmap_sem);
+}
+
+static long ioctl_portal_map(struct file *fp, struct ctx *ctx,
+ struct usdpaa_ioctl_portal_map *arg)
+{
+ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+ int ret;
+
+ if (!mapping)
+ return -ENOMEM;
+
+ mapping->user = *arg;
+ mapping->iommu_domain = NULL;
+
+ if (mapping->user.type == usdpaa_portal_qman) {
+ mapping->qportal =
+ qm_get_unused_portal_idx(mapping->user.index);
+ if (!mapping->qportal) {
+ ret = -ENODEV;
+ goto err_get_portal;
+ }
+ mapping->phys = &mapping->qportal->addr_phys[0];
+ mapping->user.channel = mapping->qportal->public_cfg.channel;
+ mapping->user.pools = mapping->qportal->public_cfg.pools;
+ mapping->user.index = mapping->qportal->public_cfg.index;
+ } else if (mapping->user.type == usdpaa_portal_bman) {
+ mapping->bportal =
+ bm_get_unused_portal_idx(mapping->user.index);
+ if (!mapping->bportal) {
+ ret = -ENODEV;
+ goto err_get_portal;
+ }
+ mapping->phys = &mapping->bportal->addr_phys[0];
+ mapping->user.index = mapping->bportal->public_cfg.index;
+ } else {
+ ret = -EINVAL;
+ goto err_copy_from_user;
+ }
+ /* Need to put pcfg in ctx's list before the mmaps because the mmap
+ * handlers look it up. */
+ spin_lock(&mem_lock);
+ list_add(&mapping->list, &ctx->portals);
+ spin_unlock(&mem_lock);
+ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CE],
+ &mapping->user.addr.cena);
+ if (ret)
+ goto err_mmap_cena;
+ ret = portal_mmap(fp, &mapping->phys[DPA_PORTAL_CI],
+ &mapping->user.addr.cinh);
+ if (ret)
+ goto err_mmap_cinh;
+ *arg = mapping->user;
+ return ret;
+
+err_mmap_cinh:
+ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
+err_mmap_cena:
+ if ((mapping->user.type == usdpaa_portal_qman) && mapping->qportal)
+ qm_put_unused_portal(mapping->qportal);
+ else if ((mapping->user.type == usdpaa_portal_bman) && mapping->bportal)
+ bm_put_unused_portal(mapping->bportal);
+ spin_lock(&mem_lock);
+ list_del(&mapping->list);
+ spin_unlock(&mem_lock);
+err_get_portal:
+err_copy_from_user:
+ kfree(mapping);
+ return ret;
+}
+
+static long ioctl_portal_unmap(struct ctx *ctx, struct usdpaa_portal_map *i)
+{
+ struct portal_mapping *mapping;
+ struct vm_area_struct *vma;
+ unsigned long pfn;
+ u32 channel;
+
+ /* Get the PFN corresponding to one of the virt addresses */
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm, (unsigned long)i->cinh);
+ if (!vma || (vma->vm_start > (unsigned long)i->cinh)) {
+ up_read(&current->mm->mmap_sem);
+ return -EFAULT;
+ }
+ pfn = vma->vm_pgoff;
+ up_read(&current->mm->mmap_sem);
+
+ /* Find the corresponding portal */
+ spin_lock(&mem_lock);
+ list_for_each_entry(mapping, &ctx->portals, list) {
+ if (pfn == (mapping->phys[DPA_PORTAL_CI].start >> PAGE_SHIFT))
+ goto found;
+ }
+ mapping = NULL;
+found:
+ if (mapping)
+ list_del(&mapping->list);
+ spin_unlock(&mem_lock);
+ if (!mapping)
+ return -ENODEV;
+ portal_munmap(&mapping->phys[DPA_PORTAL_CI], mapping->user.addr.cinh);
+ portal_munmap(&mapping->phys[DPA_PORTAL_CE], mapping->user.addr.cena);
+ if (mapping->user.type == usdpaa_portal_qman) {
+ init_qm_portal(mapping->qportal,
+ &mapping->qman_portal_low);
+
+ /* Tear down any FQs this portal is referencing */
+ channel = mapping->qportal->public_cfg.channel;
+ qm_check_and_destroy_fqs(&mapping->qman_portal_low,
+ &channel,
+ check_portal_channel);
+ qm_put_unused_portal(mapping->qportal);
+ } else if (mapping->user.type == usdpaa_portal_bman) {
+ init_bm_portal(mapping->bportal,
+ &mapping->bman_portal_low);
+ bm_put_unused_portal(mapping->bportal);
+ }
+ kfree(mapping);
+ return 0;
+}
+
+static void portal_config_pamu(struct qm_portal_config *pcfg, uint8_t sdest,
+ uint32_t cpu, uint32_t cache, uint32_t window)
+{
+#ifdef CONFIG_FSL_PAMU
+ int ret;
+ int window_count = 1;
+ struct iommu_domain_geometry geom_attr;
+ struct pamu_stash_attribute stash_attr;
+
+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+ if (!pcfg->iommu_domain) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
+ __func__);
+ goto _no_iommu;
+ }
+ geom_attr.aperture_start = 0;
+ geom_attr.aperture_end =
+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+ geom_attr.force_aperture = true;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+ &geom_attr);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+ &window_count);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ stash_attr.cpu = cpu;
+ stash_attr.cache = cache;
+ /* set stash information for the window */
+ stash_attr.window = 0;
+
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ &stash_attr);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ &window_count);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_detach_device;
+ }
+_no_iommu:
+#endif
+
+#ifdef CONFIG_FSL_QMAN_CONFIG
+ if (qman_set_sdest(pcfg->public_cfg.channel, sdest))
+#endif
+ pr_warn("Failed to set QMan portal's stash request queue\n");
+
+ return;
+
+#ifdef CONFIG_FSL_PAMU
+_iommu_detach_device:
+ iommu_detach_device(pcfg->iommu_domain, NULL);
+_iommu_domain_free:
+ iommu_domain_free(pcfg->iommu_domain);
+#endif
+}
+
+static long ioctl_allocate_raw_portal(struct file *fp, struct ctx *ctx,
+ struct usdpaa_ioctl_raw_portal *arg)
+{
+ struct portal_mapping *mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+ int ret;
+
+ if (!mapping)
+ return -ENOMEM;
+
+ mapping->user.type = arg->type;
+ mapping->iommu_domain = NULL;
+ if (arg->type == usdpaa_portal_qman) {
+ mapping->qportal = qm_get_unused_portal_idx(arg->index);
+ if (!mapping->qportal) {
+ ret = -ENODEV;
+ goto err;
+ }
+ mapping->phys = &mapping->qportal->addr_phys[0];
+ arg->index = mapping->qportal->public_cfg.index;
+ arg->cinh = mapping->qportal->addr_phys[DPA_PORTAL_CI].start;
+ arg->cena = mapping->qportal->addr_phys[DPA_PORTAL_CE].start;
+ if (arg->enable_stash) {
+ /* Setup the PAMU with the supplied parameters */
+ portal_config_pamu(mapping->qportal, arg->sdest,
+ arg->cpu, arg->cache, arg->window);
+ }
+ } else if (mapping->user.type == usdpaa_portal_bman) {
+ mapping->bportal =
+ bm_get_unused_portal_idx(arg->index);
+ if (!mapping->bportal) {
+ ret = -ENODEV;
+ goto err;
+ }
+ mapping->phys = &mapping->bportal->addr_phys[0];
+ arg->index = mapping->bportal->public_cfg.index;
+ arg->cinh = mapping->bportal->addr_phys[DPA_PORTAL_CI].start;
+ arg->cena = mapping->bportal->addr_phys[DPA_PORTAL_CE].start;
+ } else {
+ ret = -EINVAL;
+ goto err;
+ }
+ /* Need to put pcfg in ctx's list before the mmaps because the mmap
+ * handlers look it up. */
+ spin_lock(&mem_lock);
+ list_add(&mapping->list, &ctx->portals);
+ spin_unlock(&mem_lock);
+ return 0;
+err:
+ kfree(mapping);
+ return ret;
+}
+
+static long ioctl_free_raw_portal(struct file *fp, struct ctx *ctx,
+ struct usdpaa_ioctl_raw_portal *arg)
+{
+ struct portal_mapping *mapping;
+ u32 channel;
+
+ /* Find the corresponding portal */
+ spin_lock(&mem_lock);
+ list_for_each_entry(mapping, &ctx->portals, list) {
+ if (mapping->phys[DPA_PORTAL_CI].start == arg->cinh)
+ goto found;
+ }
+ mapping = NULL;
+found:
+ if (mapping)
+ list_del(&mapping->list);
+ spin_unlock(&mem_lock);
+ if (!mapping)
+ return -ENODEV;
+ if (mapping->user.type == usdpaa_portal_qman) {
+ init_qm_portal(mapping->qportal,
+ &mapping->qman_portal_low);
+
+ /* Tear down any FQs this portal is referencing */
+ channel = mapping->qportal->public_cfg.channel;
+ qm_check_and_destroy_fqs(&mapping->qman_portal_low,
+ &channel,
+ check_portal_channel);
+ qm_put_unused_portal(mapping->qportal);
+ } else if (mapping->user.type == usdpaa_portal_bman) {
+ init_bm_portal(mapping->bportal,
+ &mapping->bman_portal_low);
+ bm_put_unused_portal(mapping->bportal);
+ }
+ kfree(mapping);
+ return 0;
+}
+
+
+static inline struct device *get_dev_ptr(char *if_name)
+{
+ struct device *dev;
+ char node[NODE_NAME_LEN];
+
+ sprintf(node, "soc:fsl,dpaa:%s",if_name);
+ dev = bus_find_device_by_name(&platform_bus_type, NULL, node);
+ if (dev == NULL) {
+ pr_err(KBUILD_MODNAME "IF %s not found\n", if_name);
+ return NULL;
+ }
+ pr_debug("%s: found dev 0x%lX for If %s ,dev->platform_data %p\n",
+ __func__, (unsigned long)dev,
+ if_name, dev->platform_data);
+
+ return dev;
+}
+
+/* This function will return Current link status of the device
+ * '1' if Link is UP, '0' otherwise.
+ *
+ * Input parameter:
+ * if_name: Interface node name
+ *
+ */
+static inline int ioctl_usdpaa_get_link_status(char *if_name)
+{
+ struct net_device *net_dev = NULL;
+ struct device *dev;
+
+ dev = get_dev_ptr(if_name);
+ if (dev == NULL)
+ return -ENODEV;
+ net_dev = dev->platform_data;
+ if (net_dev == NULL)
+ return -ENODEV;
+
+ if (test_bit(__LINK_STATE_NOCARRIER, &net_dev->state))
+ return 0; /* Link is DOWN */
+ else
+ return 1; /* Link is UP */
+}
+
+
+/* Link Status Callback Function
+ * This function will be resgitered to PHY framework to get
+ * Link update notifications and should be responsible for waking up
+ * user space task when there is a link update notification.
+ */
+static void phy_link_updates(struct net_device *net_dev)
+{
+ struct dpa_proxy_priv_s *priv = NULL;
+
+ pr_debug("%s: Link '%s': Speed '%d-Mbps': Autoneg '%d': Duplex '%d'\n",
+ net_dev->name,
+ ioctl_usdpaa_get_link_status(net_dev->name)?"UP":"DOWN",
+ net_dev->phydev->speed,
+ net_dev->phydev->autoneg,
+ net_dev->phydev->duplex);
+
+ /* Wake up the user space context to notify PHY update */
+ priv = netdev_priv(net_dev);
+ eventfd_signal(priv->efd_ctx, 1);
+}
+
+
+/* IOCTL handler for enabling Link status request for a given interface
+ * Input parameters:
+ * args->if_name: This the network interface node name as defind in
+ * device tree file. Currently, it has format of
+ * "ethernet@x" type for each interface.
+ * args->efd: The eventfd value which should be waked up when
+ * there is any link update received.
+ */
+static int ioctl_en_if_link_status(struct usdpaa_ioctl_link_status *args)
+{
+ struct net_device *net_dev = NULL;
+ struct dpa_proxy_priv_s *priv = NULL;
+ struct device *dev;
+ struct mac_device *mac_dev;
+ struct proxy_device *proxy_dev;
+ struct task_struct *userspace_task = NULL;
+ struct file *efd_file = NULL;
+
+ dev = get_dev_ptr(args->if_name);
+ if (dev == NULL)
+ return -ENODEV;
+ /* Utilize dev->platform_data to save netdevice
+ pointer as it will not be registered */
+ if (dev->platform_data) {
+ pr_debug("%s: IF %s already initialized\n",
+ __func__, args->if_name);
+ /* This will happen when application is not able to initiate
+ * cleanup in last run. We still need to save the new
+ * eventfd context.
+ */
+ net_dev = dev->platform_data;
+ priv = netdev_priv(net_dev);
+
+ /* Get current task context from which IOCTL was called */
+ userspace_task = current;
+
+ rcu_read_lock();
+ efd_file = fcheck_files(userspace_task->files, args->efd);
+ rcu_read_unlock();
+
+ priv->efd_ctx = eventfd_ctx_fileget(efd_file);
+ if (!priv->efd_ctx) {
+ pr_err(KBUILD_MODNAME "get eventfd context failed\n");
+ /* Free the allocated memory for net device */
+ dev->platform_data = NULL;
+ free_netdev(net_dev);
+ return -EINVAL;
+ }
+ /* Since there will be NO PHY update as link is already setup,
+ * wake user context once so that current PHY status can
+ * be fetched.
+ */
+ phy_link_updates(net_dev);
+ return 0;
+ }
+
+ proxy_dev = dev_get_drvdata(dev);
+ mac_dev = proxy_dev->mac_dev;
+ /* Allocate an dummy net device for proxy interface */
+ net_dev = alloc_etherdev(sizeof(*priv));
+ if (!net_dev) {
+ pr_err(KBUILD_MODNAME "alloc_etherdev failed\n");
+ return -ENOMEM;
+ } else {
+ SET_NETDEV_DEV(net_dev, dev);
+ priv = netdev_priv(net_dev);
+ priv->mac_dev = mac_dev;
+ /* Get current task context from which IOCTL was called */
+ userspace_task = current;
+
+ rcu_read_lock();
+ efd_file = fcheck_files(userspace_task->files, args->efd);
+ rcu_read_unlock();
+
+ priv->efd_ctx = eventfd_ctx_fileget(efd_file);
+
+ if (!priv->efd_ctx) {
+ pr_err(KBUILD_MODNAME "get eventfd context failed\n");
+ /* Free the allocated memory for net device */
+ free_netdev(net_dev);
+ return -EINVAL;
+ }
+ strncpy(net_dev->name, args->if_name, IF_NAME_MAX_LEN);
+ dev->platform_data = net_dev;
+ }
+
+ pr_debug("%s: mac_dev %p cell_index %d\n",
+ __func__, mac_dev, mac_dev->cell_index);
+ mac_dev->phy_dev = of_phy_connect(net_dev, mac_dev->phy_node,
+ phy_link_updates, 0, mac_dev->phy_if);
+ if (unlikely(mac_dev->phy_dev == NULL) || IS_ERR(mac_dev->phy_dev)) {
+ pr_err("%s: --------Error in PHY Connect\n", __func__);
+ /* Free the allocated memory for net device */
+ free_netdev(net_dev);
+ return -ENODEV;
+ }
+ net_dev->phydev = mac_dev->phy_dev;
+ mac_dev->start(mac_dev);
+ pr_debug("%s: --- PHY connected for %s\n", __func__, args->if_name);
+
+ return 0;
+}
+
+/* IOCTL handler for disabling Link status for a given interface
+ * Input parameters:
+ * if_name: This the network interface node name as defind in
+ * device tree file. Currently, it has format of
+ * "ethernet@x" type for each interface.
+ */
+static int ioctl_disable_if_link_status(char *if_name)
+{
+ struct net_device *net_dev = NULL;
+ struct device *dev;
+ struct mac_device *mac_dev;
+ struct proxy_device *proxy_dev;
+ struct dpa_proxy_priv_s *priv = NULL;
+
+ dev = get_dev_ptr(if_name);
+ if (dev == NULL)
+ return -ENODEV;
+ /* Utilize dev->platform_data to save netdevice
+ pointer as it will not be registered */
+ if (!dev->platform_data) {
+ pr_debug("%s: IF %s already Disabled for Link status\n",
+ __func__, if_name);
+ return 0;
+ }
+
+ net_dev = dev->platform_data;
+ proxy_dev = dev_get_drvdata(dev);
+ mac_dev = proxy_dev->mac_dev;
+ mac_dev->stop(mac_dev);
+
+ priv = netdev_priv(net_dev);
+ eventfd_ctx_put(priv->efd_ctx);
+
+ /* This will also deregister the call back */
+ phy_disconnect(mac_dev->phy_dev);
+ phy_resume(mac_dev->phy_dev);
+
+ free_netdev(net_dev);
+ dev->platform_data = NULL;
+
+ pr_debug("%s: Link status Disabled for %s\n", __func__, if_name);
+ return 0;
+}
+
+static long usdpaa_ioctl(struct file *fp, unsigned int cmd, unsigned long arg)
+{
+ struct ctx *ctx = fp->private_data;
+ void __user *a = (void __user *)arg;
+ switch (cmd) {
+ case USDPAA_IOCTL_ID_ALLOC:
+ return ioctl_id_alloc(ctx, a);
+ case USDPAA_IOCTL_ID_RELEASE:
+ return ioctl_id_release(ctx, a);
+ case USDPAA_IOCTL_ID_RESERVE:
+ return ioctl_id_reserve(ctx, a);
+ case USDPAA_IOCTL_DMA_MAP:
+ {
+ struct usdpaa_ioctl_dma_map input;
+ int ret;
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ ret = ioctl_dma_map(fp, ctx, &input);
+ if (copy_to_user(a, &input, sizeof(input)))
+ return -EFAULT;
+ return ret;
+ }
+ case USDPAA_IOCTL_DMA_UNMAP:
+ return ioctl_dma_unmap(ctx, a);
+ case USDPAA_IOCTL_DMA_LOCK:
+ return ioctl_dma_lock(ctx, a);
+ case USDPAA_IOCTL_DMA_UNLOCK:
+ return ioctl_dma_unlock(ctx, a);
+ case USDPAA_IOCTL_PORTAL_MAP:
+ {
+ struct usdpaa_ioctl_portal_map input;
+ int ret;
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ ret = ioctl_portal_map(fp, ctx, &input);
+ if (copy_to_user(a, &input, sizeof(input)))
+ return -EFAULT;
+ return ret;
+ }
+ case USDPAA_IOCTL_PORTAL_UNMAP:
+ {
+ struct usdpaa_portal_map input;
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ return ioctl_portal_unmap(ctx, &input);
+ }
+ case USDPAA_IOCTL_DMA_USED:
+ return ioctl_dma_stats(ctx, a);
+ case USDPAA_IOCTL_ALLOC_RAW_PORTAL:
+ {
+ struct usdpaa_ioctl_raw_portal input;
+ int ret;
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ ret = ioctl_allocate_raw_portal(fp, ctx, &input);
+ if (copy_to_user(a, &input, sizeof(input)))
+ return -EFAULT;
+ return ret;
+ }
+ case USDPAA_IOCTL_FREE_RAW_PORTAL:
+ {
+ struct usdpaa_ioctl_raw_portal input;
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ return ioctl_free_raw_portal(fp, ctx, &input);
+ }
+ case USDPAA_IOCTL_ENABLE_LINK_STATUS_INTERRUPT:
+ {
+ struct usdpaa_ioctl_link_status input;
+ int ret;
+
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ ret = ioctl_en_if_link_status(&input);
+ if (ret)
+ pr_err("Error(%d) enable link interrupt:IF: %s\n",
+ ret, input.if_name);
+ return ret;
+ }
+ case USDPAA_IOCTL_DISABLE_LINK_STATUS_INTERRUPT:
+ {
+ char *input;
+ int ret;
+
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ ret = ioctl_disable_if_link_status(input);
+ if (ret)
+ pr_err("Error(%d) Disabling link interrupt:IF: %s\n",
+ ret, input);
+ return ret;
+ }
+ case USDPAA_IOCTL_GET_LINK_STATUS:
+ {
+ struct usdpaa_ioctl_link_status_args input;
+
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+
+ input.link_status = ioctl_usdpaa_get_link_status(input.if_name);
+ if (input.link_status < 0)
+ return input.link_status;
+ if (copy_to_user(a, &input, sizeof(input)))
+ return -EFAULT;
+
+ return 0;
+ }
+ }
+ return -EINVAL;
+}
+
+static long usdpaa_ioctl_compat(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+#ifdef CONFIG_COMPAT
+ struct ctx *ctx = fp->private_data;
+ void __user *a = (void __user *)arg;
+#endif
+ switch (cmd) {
+#ifdef CONFIG_COMPAT
+ case USDPAA_IOCTL_DMA_MAP_COMPAT:
+ {
+ int ret;
+ struct usdpaa_ioctl_dma_map_compat input;
+ struct usdpaa_ioctl_dma_map converted;
+
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+
+ converted.ptr = compat_ptr(input.ptr);
+ converted.phys_addr = input.phys_addr;
+ converted.len = input.len;
+ converted.flags = input.flags;
+ strncpy(converted.name, input.name, USDPAA_DMA_NAME_MAX);
+ converted.has_locking = input.has_locking;
+ converted.did_create = input.did_create;
+
+ ret = ioctl_dma_map(fp, ctx, &converted);
+ input.ptr = ptr_to_compat(converted.ptr);
+ input.phys_addr = converted.phys_addr;
+ input.len = converted.len;
+ input.flags = converted.flags;
+ strncpy(input.name, converted.name, USDPAA_DMA_NAME_MAX);
+ input.has_locking = converted.has_locking;
+ input.did_create = converted.did_create;
+ if (copy_to_user(a, &input, sizeof(input)))
+ return -EFAULT;
+ return ret;
+ }
+ case USDPAA_IOCTL_PORTAL_MAP_COMPAT:
+ {
+ int ret;
+ struct compat_usdpaa_ioctl_portal_map input;
+ struct usdpaa_ioctl_portal_map converted;
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ converted.type = input.type;
+ converted.index = input.index;
+ ret = ioctl_portal_map(fp, ctx, &converted);
+ input.addr.cinh = ptr_to_compat(converted.addr.cinh);
+ input.addr.cena = ptr_to_compat(converted.addr.cena);
+ input.channel = converted.channel;
+ input.pools = converted.pools;
+ input.index = converted.index;
+ if (copy_to_user(a, &input, sizeof(input)))
+ return -EFAULT;
+ return ret;
+ }
+ case USDPAA_IOCTL_PORTAL_UNMAP_COMPAT:
+ {
+ struct usdpaa_portal_map_compat input;
+ struct usdpaa_portal_map converted;
+
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ converted.cinh = compat_ptr(input.cinh);
+ converted.cena = compat_ptr(input.cena);
+ return ioctl_portal_unmap(ctx, &converted);
+ }
+ case USDPAA_IOCTL_ALLOC_RAW_PORTAL_COMPAT:
+ {
+ int ret;
+ struct usdpaa_ioctl_raw_portal converted;
+ struct compat_ioctl_raw_portal input;
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ converted.type = input.type;
+ converted.index = input.index;
+ converted.enable_stash = input.enable_stash;
+ converted.cpu = input.cpu;
+ converted.cache = input.cache;
+ converted.window = input.window;
+ converted.sdest = input.sdest;
+ ret = ioctl_allocate_raw_portal(fp, ctx, &converted);
+
+ input.cinh = converted.cinh;
+ input.cena = converted.cena;
+ input.index = converted.index;
+
+ if (copy_to_user(a, &input, sizeof(input)))
+ return -EFAULT;
+ return ret;
+ }
+ case USDPAA_IOCTL_FREE_RAW_PORTAL_COMPAT:
+ {
+ struct usdpaa_ioctl_raw_portal converted;
+ struct compat_ioctl_raw_portal input;
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ converted.type = input.type;
+ converted.index = input.index;
+ converted.cinh = input.cinh;
+ converted.cena = input.cena;
+ return ioctl_free_raw_portal(fp, ctx, &converted);
+ }
+#endif
+ default:
+ return usdpaa_ioctl(fp, cmd, arg);
+ }
+ return -EINVAL;
+}
+
+int usdpaa_get_portal_config(struct file *filp, void *cinh,
+ enum usdpaa_portal_type ptype, unsigned int *irq,
+ void **iir_reg)
+{
+ /* Walk the list of portals for filp and return the config
+ for the portal that matches the hint */
+ struct ctx *context;
+ struct portal_mapping *portal;
+
+ /* First sanitize the filp */
+ if (filp->f_op->open != usdpaa_open)
+ return -ENODEV;
+ context = filp->private_data;
+ spin_lock(&context->lock);
+ list_for_each_entry(portal, &context->portals, list) {
+ if (portal->user.type == ptype &&
+ portal->user.addr.cinh == cinh) {
+ if (ptype == usdpaa_portal_qman) {
+ *irq = portal->qportal->public_cfg.irq;
+ *iir_reg = portal->qportal->addr_virt[1] +
+ QM_REG_IIR;
+ } else {
+ *irq = portal->bportal->public_cfg.irq;
+ *iir_reg = portal->bportal->addr_virt[1] +
+ BM_REG_IIR;
+ }
+ spin_unlock(&context->lock);
+ return 0;
+ }
+ }
+ spin_unlock(&context->lock);
+ return -EINVAL;
+}
+
+static const struct file_operations usdpaa_fops = {
+ .open = usdpaa_open,
+ .release = usdpaa_release,
+ .mmap = usdpaa_mmap,
+ .get_unmapped_area = usdpaa_get_unmapped_area,
+ .unlocked_ioctl = usdpaa_ioctl,
+ .compat_ioctl = usdpaa_ioctl_compat
+};
+
+static struct miscdevice usdpaa_miscdev = {
+ .name = "fsl-usdpaa",
+ .fops = &usdpaa_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+/* Early-boot memory allocation. The boot-arg "usdpaa_mem=<x>" is used to
+ * indicate how much memory (if any) to allocate during early boot. If the
+ * format "usdpaa_mem=<x>,<y>" is used, then <y> will be interpreted as the
+ * number of TLB1 entries to reserve (default is 1). If there are more mappings
+ * than there are TLB1 entries, fault-handling will occur. */
+
+static __init int usdpaa_mem(char *arg)
+{
+ pr_warn("uspdaa_mem argument is depracated\n");
+ arg_phys_size = memparse(arg, &arg);
+ num_tlb = 1;
+ if (*arg == ',') {
+ unsigned long ul;
+ int err = kstrtoul(arg + 1, 0, &ul);
+ if (err < 0) {
+ num_tlb = 1;
+ pr_warn("ERROR, usdpaa_mem arg is invalid\n");
+ } else
+ num_tlb = (unsigned int)ul;
+ }
+ return 0;
+}
+early_param("usdpaa_mem", usdpaa_mem);
+
+static int usdpaa_mem_init(struct reserved_mem *rmem)
+{
+ phys_start = rmem->base;
+ phys_size = rmem->size;
+
+ WARN_ON(!(phys_start && phys_size));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(usdpaa_mem_init, "fsl,usdpaa-mem", usdpaa_mem_init);
+
+__init int fsl_usdpaa_init_early(void)
+{
+ if (!phys_size || !phys_start) {
+ pr_info("No USDPAA memory, no 'fsl,usdpaa-mem' in device-tree\n");
+ return 0;
+ }
+ if (phys_size % PAGE_SIZE) {
+ pr_err("'fsl,usdpaa-mem' size must be a multiple of page size\n");
+ phys_size = 0;
+ return 0;
+ }
+ if (arg_phys_size && phys_size != arg_phys_size) {
+ pr_err("'usdpaa_mem argument size (0x%llx) does not match device tree size (0x%llx)\n",
+ arg_phys_size, phys_size);
+ phys_size = 0;
+ return 0;
+ }
+ pfn_start = phys_start >> PAGE_SHIFT;
+ pfn_size = phys_size >> PAGE_SHIFT;
+#ifdef CONFIG_PPC
+ first_tlb = current_tlb = tlbcam_index;
+ tlbcam_index += num_tlb;
+#endif
+ pr_info("USDPAA region at %llx:%llx(%lx:%lx), %d TLB1 entries)\n",
+ phys_start, phys_size, pfn_start, pfn_size, num_tlb);
+ return 0;
+}
+subsys_initcall(fsl_usdpaa_init_early);
+
+
+static int __init usdpaa_init(void)
+{
+ struct mem_fragment *frag;
+ int ret;
+ u64 tmp_size = phys_size;
+ u64 tmp_start = phys_start;
+ u64 tmp_pfn_size = pfn_size;
+ u64 tmp_pfn_start = pfn_start;
+
+ pr_info("Freescale USDPAA process driver\n");
+ if (!phys_start) {
+ pr_warn("fsl-usdpaa: no region found\n");
+ return 0;
+ }
+
+ while (tmp_size != 0) {
+ u32 frag_size = largest_page_size(tmp_size);
+ frag = kmalloc(sizeof(*frag), GFP_KERNEL);
+ if (!frag) {
+ pr_err("Failed to setup USDPAA memory accounting\n");
+ return -ENOMEM;
+ }
+ frag->base = tmp_start;
+ frag->len = frag->root_len = frag_size;
+ frag->root_pfn = tmp_pfn_start;
+ frag->pfn_base = tmp_pfn_start;
+ frag->pfn_len = frag_size / PAGE_SIZE;
+ frag->refs = 0;
+ init_waitqueue_head(&frag->wq);
+ frag->owner = NULL;
+ list_add(&frag->list, &mem_list);
+
+ /* Adjust for this frag */
+ tmp_start += frag_size;
+ tmp_size -= frag_size;
+ tmp_pfn_start += frag_size / PAGE_SIZE;
+ tmp_pfn_size -= frag_size / PAGE_SIZE;
+ }
+ ret = misc_register(&usdpaa_miscdev);
+ if (ret)
+ pr_err("fsl-usdpaa: failed to register misc device\n");
+ return ret;
+}
+
+static void __exit usdpaa_exit(void)
+{
+ misc_deregister(&usdpaa_miscdev);
+}
+
+module_init(usdpaa_init);
+module_exit(usdpaa_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale USDPAA process driver");
diff --git a/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
new file mode 100644
index 000000000000..6bb589a04264
--- /dev/null
+++ b/drivers/staging/fsl_qbman/fsl_usdpaa_irq.c
@@ -0,0 +1,289 @@
+/* Copyright (c) 2013 Freescale Semiconductor, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/* define a device that allows USPDAA processes to open a file
+ descriptor and specify which IRQ it wants to montior using an ioctl()
+ When an IRQ is received, the device becomes readable so that a process
+ can use read() or select() type calls to monitor for IRQs */
+
+#include <linux/miscdevice.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/uaccess.h>
+#include <linux/fsl_usdpaa.h>
+#include <linux/module.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+
+#include "qman_low.h"
+#include "bman_low.h"
+
+struct usdpaa_irq_ctx {
+ int irq_set; /* Set to true once the irq is set via ioctl */
+ unsigned int irq_num;
+ u32 last_irq_count; /* Last value returned from read */
+ u32 irq_count; /* Number of irqs since last read */
+ wait_queue_head_t wait_queue; /* Waiting processes */
+ spinlock_t lock;
+ void *inhibit_addr; /* inhibit register address */
+ struct file *usdpaa_filp;
+ char irq_name[128];
+};
+
+static int usdpaa_irq_open(struct inode *inode, struct file *filp)
+{
+ struct usdpaa_irq_ctx *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->irq_set = 0;
+ ctx->irq_count = 0;
+ ctx->last_irq_count = 0;
+ init_waitqueue_head(&ctx->wait_queue);
+ spin_lock_init(&ctx->lock);
+ filp->private_data = ctx;
+ return 0;
+}
+
+static int usdpaa_irq_release(struct inode *inode, struct file *filp)
+{
+ struct usdpaa_irq_ctx *ctx = filp->private_data;
+ if (ctx->irq_set) {
+ /* Inhibit the IRQ */
+ out_be32(ctx->inhibit_addr, 0x1);
+ irq_set_affinity_hint(ctx->irq_num, NULL);
+ free_irq(ctx->irq_num, ctx);
+ ctx->irq_set = 0;
+ fput(ctx->usdpaa_filp);
+ }
+ kfree(filp->private_data);
+ return 0;
+}
+
+static irqreturn_t usdpaa_irq_handler(int irq, void *_ctx)
+{
+ unsigned long flags;
+ struct usdpaa_irq_ctx *ctx = _ctx;
+ spin_lock_irqsave(&ctx->lock, flags);
+ ++ctx->irq_count;
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ wake_up_all(&ctx->wait_queue);
+ /* Set the inhibit register. This will be reenabled
+ once the USDPAA code handles the IRQ */
+ out_be32(ctx->inhibit_addr, 0x1);
+ pr_debug("Inhibit at %p count %d", ctx->inhibit_addr, ctx->irq_count);
+ return IRQ_HANDLED;
+}
+
+static int map_irq(struct file *fp, struct usdpaa_ioctl_irq_map *irq_map)
+{
+ struct usdpaa_irq_ctx *ctx = fp->private_data;
+ int ret;
+
+ if (ctx->irq_set) {
+ pr_debug("Setting USDPAA IRQ when it was already set!\n");
+ return -EBUSY;
+ }
+
+ ctx->usdpaa_filp = fget(irq_map->fd);
+ if (!ctx->usdpaa_filp) {
+ pr_debug("USDPAA fget(%d) returned NULL\n", irq_map->fd);
+ return -EINVAL;
+ }
+
+ ret = usdpaa_get_portal_config(ctx->usdpaa_filp, irq_map->portal_cinh,
+ irq_map->type, &ctx->irq_num,
+ &ctx->inhibit_addr);
+ if (ret) {
+ pr_debug("USDPAA IRQ couldn't identify portal\n");
+ fput(ctx->usdpaa_filp);
+ return ret;
+ }
+
+ ctx->irq_set = 1;
+
+ snprintf(ctx->irq_name, sizeof(ctx->irq_name),
+ "usdpaa_irq %d", ctx->irq_num);
+
+ ret = request_irq(ctx->irq_num, usdpaa_irq_handler, 0,
+ ctx->irq_name, ctx);
+ if (ret) {
+ pr_err("USDPAA request_irq(%d) failed, ret= %d\n",
+ ctx->irq_num, ret);
+ ctx->irq_set = 0;
+ fput(ctx->usdpaa_filp);
+ return ret;
+ }
+ ret = irq_set_affinity(ctx->irq_num, &current->cpus_mask);
+ if (ret)
+ pr_err("USDPAA irq_set_affinity() failed, ret= %d\n", ret);
+
+ ret = irq_set_affinity_hint(ctx->irq_num, &current->cpus_mask);
+ if (ret)
+ pr_err("USDPAA irq_set_affinity_hint() failed, ret= %d\n", ret);
+
+ return 0;
+}
+
+static long usdpaa_irq_ioctl(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret;
+ struct usdpaa_ioctl_irq_map irq_map;
+
+ if (cmd != USDPAA_IOCTL_PORTAL_IRQ_MAP) {
+ pr_debug("USDPAA IRQ unknown command 0x%x\n", cmd);
+ return -EINVAL;
+ }
+
+ ret = copy_from_user(&irq_map, (void __user *)arg,
+ sizeof(irq_map));
+ if (ret)
+ return ret;
+ return map_irq(fp, &irq_map);
+}
+
+static ssize_t usdpaa_irq_read(struct file *filp, char __user *buff,
+ size_t count, loff_t *offp)
+{
+ struct usdpaa_irq_ctx *ctx = filp->private_data;
+ int ret;
+
+ if (!ctx->irq_set) {
+ pr_debug("Reading USDPAA IRQ before it was set\n");
+ return -EINVAL;
+ }
+
+ if (count < sizeof(ctx->irq_count)) {
+ pr_debug("USDPAA IRQ Read too small\n");
+ return -EINVAL;
+ }
+ if (ctx->irq_count == ctx->last_irq_count) {
+ if (filp->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = wait_event_interruptible(ctx->wait_queue,
+ ctx->irq_count != ctx->last_irq_count);
+ if (ret == -ERESTARTSYS)
+ return ret;
+ }
+
+ ctx->last_irq_count = ctx->irq_count;
+
+ if (copy_to_user(buff, &ctx->last_irq_count,
+ sizeof(ctx->last_irq_count)))
+ return -EFAULT;
+ return sizeof(ctx->irq_count);
+}
+
+static unsigned int usdpaa_irq_poll(struct file *filp, poll_table *wait)
+{
+ struct usdpaa_irq_ctx *ctx = filp->private_data;
+ unsigned int ret = 0;
+ unsigned long flags;
+
+ if (!ctx->irq_set)
+ return POLLHUP;
+
+ poll_wait(filp, &ctx->wait_queue, wait);
+
+ spin_lock_irqsave(&ctx->lock, flags);
+ if (ctx->irq_count != ctx->last_irq_count)
+ ret |= POLLIN | POLLRDNORM;
+ spin_unlock_irqrestore(&ctx->lock, flags);
+ return ret;
+}
+
+static long usdpaa_irq_ioctl_compat(struct file *fp, unsigned int cmd,
+ unsigned long arg)
+{
+#ifdef CONFIG_COMPAT
+ void __user *a = (void __user *)arg;
+#endif
+ switch (cmd) {
+#ifdef CONFIG_COMPAT
+ case USDPAA_IOCTL_PORTAL_IRQ_MAP_COMPAT:
+ {
+ struct compat_ioctl_irq_map input;
+ struct usdpaa_ioctl_irq_map converted;
+ if (copy_from_user(&input, a, sizeof(input)))
+ return -EFAULT;
+ converted.type = input.type;
+ converted.fd = input.fd;
+ converted.portal_cinh = compat_ptr(input.portal_cinh);
+ return map_irq(fp, &converted);
+ }
+#endif
+ default:
+ return usdpaa_irq_ioctl(fp, cmd, arg);
+ }
+}
+
+static const struct file_operations usdpaa_irq_fops = {
+ .open = usdpaa_irq_open,
+ .release = usdpaa_irq_release,
+ .unlocked_ioctl = usdpaa_irq_ioctl,
+ .compat_ioctl = usdpaa_irq_ioctl_compat,
+ .read = usdpaa_irq_read,
+ .poll = usdpaa_irq_poll
+};
+
+static struct miscdevice usdpaa_miscdev = {
+ .name = "fsl-usdpaa-irq",
+ .fops = &usdpaa_irq_fops,
+ .minor = MISC_DYNAMIC_MINOR,
+};
+
+static int __init usdpaa_irq_init(void)
+{
+ int ret;
+
+ pr_info("Freescale USDPAA process IRQ driver\n");
+ ret = misc_register(&usdpaa_miscdev);
+ if (ret)
+ pr_err("fsl-usdpaa-irq: failed to register misc device\n");
+ return ret;
+}
+
+static void __exit usdpaa_irq_exit(void)
+{
+ misc_deregister(&usdpaa_miscdev);
+}
+
+module_init(usdpaa_irq_init);
+module_exit(usdpaa_irq_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Freescale Semiconductor");
+MODULE_DESCRIPTION("Freescale USDPAA process IRQ driver");
diff --git a/drivers/staging/fsl_qbman/qbman_driver.c b/drivers/staging/fsl_qbman/qbman_driver.c
new file mode 100644
index 000000000000..78fc8ea33cfc
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qbman_driver.c
@@ -0,0 +1,91 @@
+/* Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/time.h>
+#include "qman_private.h"
+#include "bman_private.h"
+__init void qman_init_early(void);
+__init void bman_init_early(void);
+
+static __init int qbman_init(void)
+{
+ struct device_node *dn;
+ u32 is_portal_available;
+
+ bman_init();
+ qman_init();
+
+ is_portal_available = 0;
+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+ if (!of_device_is_available(dn))
+ continue;
+ else
+ is_portal_available = 1;
+ }
+
+ if (!qman_have_ccsr() && is_portal_available) {
+ struct qman_fq fq = {
+ .fqid = 1
+ };
+ struct qm_mcr_queryfq_np np;
+ int err, retry = CONFIG_FSL_QMAN_INIT_TIMEOUT;
+ struct timespec64 nowts, diffts, startts;
+
+ ktime_get_coarse_real_ts64(&startts);
+
+ /* Loop while querying given fqid succeeds or time out */
+ while (1) {
+ err = qman_query_fq_np(&fq, &np);
+ if (!err) {
+ /* success, control-plane has configured QMan */
+ break;
+ } else if (err != -ERANGE) {
+ pr_err("QMan: I/O error, continuing anyway\n");
+ break;
+ }
+ ktime_get_coarse_real_ts64(&nowts);
+ diffts = timespec64_sub(nowts, startts);
+ if (diffts.tv_sec > 0) {
+ if (!retry--) {
+ pr_err("QMan: time out, control-plane"
+ " dead?\n");
+ break;
+ }
+ pr_warn("QMan: polling for the control-plane"
+ " (%d)\n", retry);
+ }
+ }
+ }
+ bman_resource_init();
+ qman_resource_init();
+ return 0;
+}
+subsys_initcall(qbman_init);
diff --git a/drivers/staging/fsl_qbman/qman_config.c b/drivers/staging/fsl_qbman/qman_config.c
new file mode 100644
index 000000000000..529f840117ea
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_config.c
@@ -0,0 +1,1224 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/cacheflush.h>
+#include "qman_private.h"
+#include <linux/highmem.h>
+#include <linux/of_reserved_mem.h>
+
+/* Last updated for v00.800 of the BG */
+
+/* Register offsets */
+#define REG_QCSP_LIO_CFG(n) (0x0000 + ((n) * 0x10))
+#define REG_QCSP_IO_CFG(n) (0x0004 + ((n) * 0x10))
+#define REG_QCSP_DD_CFG(n) (0x000c + ((n) * 0x10))
+#define REG_DD_CFG 0x0200
+#define REG_DCP_CFG(n) (0x0300 + ((n) * 0x10))
+#define REG_DCP_DD_CFG(n) (0x0304 + ((n) * 0x10))
+#define REG_DCP_DLM_AVG(n) (0x030c + ((n) * 0x10))
+#define REG_PFDR_FPC 0x0400
+#define REG_PFDR_FP_HEAD 0x0404
+#define REG_PFDR_FP_TAIL 0x0408
+#define REG_PFDR_FP_LWIT 0x0410
+#define REG_PFDR_CFG 0x0414
+#define REG_SFDR_CFG 0x0500
+#define REG_SFDR_IN_USE 0x0504
+#define REG_WQ_CS_CFG(n) (0x0600 + ((n) * 0x04))
+#define REG_WQ_DEF_ENC_WQID 0x0630
+#define REG_WQ_SC_DD_CFG(n) (0x640 + ((n) * 0x04))
+#define REG_WQ_PC_DD_CFG(n) (0x680 + ((n) * 0x04))
+#define REG_WQ_DC0_DD_CFG(n) (0x6c0 + ((n) * 0x04))
+#define REG_WQ_DC1_DD_CFG(n) (0x700 + ((n) * 0x04))
+#define REG_WQ_DCn_DD_CFG(n) (0x6c0 + ((n) * 0x40)) /* n=2,3 */
+#define REG_CM_CFG 0x0800
+#define REG_ECSR 0x0a00
+#define REG_ECIR 0x0a04
+#define REG_EADR 0x0a08
+#define REG_ECIR2 0x0a0c
+#define REG_EDATA(n) (0x0a10 + ((n) * 0x04))
+#define REG_SBEC(n) (0x0a80 + ((n) * 0x04))
+#define REG_MCR 0x0b00
+#define REG_MCP(n) (0x0b04 + ((n) * 0x04))
+#define REG_MISC_CFG 0x0be0
+#define REG_HID_CFG 0x0bf0
+#define REG_IDLE_STAT 0x0bf4
+#define REG_IP_REV_1 0x0bf8
+#define REG_IP_REV_2 0x0bfc
+#define REG_FQD_BARE 0x0c00
+#define REG_PFDR_BARE 0x0c20
+#define REG_offset_BAR 0x0004 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_offset_AR 0x0010 /* relative to REG_[FQD|PFDR]_BARE */
+#define REG_QCSP_BARE 0x0c80
+#define REG_QCSP_BAR 0x0c84
+#define REG_CI_SCHED_CFG 0x0d00
+#define REG_SRCIDR 0x0d04
+#define REG_LIODNR 0x0d08
+#define REG_CI_RLM_AVG 0x0d14
+#define REG_ERR_ISR 0x0e00 /* + "enum qm_isr_reg" */
+#define REG_REV3_QCSP_LIO_CFG(n) (0x1000 + ((n) * 0x10))
+#define REG_REV3_QCSP_IO_CFG(n) (0x1004 + ((n) * 0x10))
+#define REG_REV3_QCSP_DD_CFG(n) (0x100c + ((n) * 0x10))
+#define REG_CEETM_CFG_IDX 0x900
+#define REG_CEETM_CFG_PRES 0x904
+#define REG_CEETM_XSFDR_IN_USE 0x908
+
+/* Assists for QMAN_MCR */
+#define MCR_INIT_PFDR 0x01000000
+#define MCR_get_rslt(v) (u8)((v) >> 24)
+#define MCR_rslt_idle(r) (!rslt || (rslt >= 0xf0))
+#define MCR_rslt_ok(r) (rslt == 0xf0)
+#define MCR_rslt_eaccess(r) (rslt == 0xf8)
+#define MCR_rslt_inval(r) (rslt == 0xff)
+
+struct qman;
+
+/* Follows WQ_CS_CFG0-5 */
+enum qm_wq_class {
+ qm_wq_portal = 0,
+ qm_wq_pool = 1,
+ qm_wq_fman0 = 2,
+ qm_wq_fman1 = 3,
+ qm_wq_caam = 4,
+ qm_wq_pme = 5,
+ qm_wq_first = qm_wq_portal,
+ qm_wq_last = qm_wq_pme
+};
+
+/* Follows FQD_[BARE|BAR|AR] and PFDR_[BARE|BAR|AR] */
+enum qm_memory {
+ qm_memory_fqd,
+ qm_memory_pfdr
+};
+
+/* Used by all error interrupt registers except 'inhibit' */
+#define QM_EIRQ_CIDE 0x20000000 /* Corenet Initiator Data Error */
+#define QM_EIRQ_CTDE 0x10000000 /* Corenet Target Data Error */
+#define QM_EIRQ_CITT 0x08000000 /* Corenet Invalid Target Transaction */
+#define QM_EIRQ_PLWI 0x04000000 /* PFDR Low Watermark */
+#define QM_EIRQ_MBEI 0x02000000 /* Multi-bit ECC Error */
+#define QM_EIRQ_SBEI 0x01000000 /* Single-bit ECC Error */
+#define QM_EIRQ_PEBI 0x00800000 /* PFDR Enqueues Blocked Interrupt */
+#define QM_EIRQ_IFSI 0x00020000 /* Invalid FQ Flow Control State */
+#define QM_EIRQ_ICVI 0x00010000 /* Invalid Command Verb */
+#define QM_EIRQ_IDDI 0x00000800 /* Invalid Dequeue (Direct-connect) */
+#define QM_EIRQ_IDFI 0x00000400 /* Invalid Dequeue FQ */
+#define QM_EIRQ_IDSI 0x00000200 /* Invalid Dequeue Source */
+#define QM_EIRQ_IDQI 0x00000100 /* Invalid Dequeue Queue */
+#define QM_EIRQ_IECE 0x00000010 /* Invalid Enqueue Configuration */
+#define QM_EIRQ_IEOI 0x00000008 /* Invalid Enqueue Overflow */
+#define QM_EIRQ_IESI 0x00000004 /* Invalid Enqueue State */
+#define QM_EIRQ_IECI 0x00000002 /* Invalid Enqueue Channel */
+#define QM_EIRQ_IEQI 0x00000001 /* Invalid Enqueue Queue */
+
+/* QMAN_ECIR valid error bit */
+#define PORTAL_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IESI | QM_EIRQ_IEOI | \
+ QM_EIRQ_IDQI | QM_EIRQ_IDSI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IDDI | QM_EIRQ_ICVI | QM_EIRQ_IFSI)
+#define FQID_ECSR_ERR (QM_EIRQ_IEQI | QM_EIRQ_IECI | QM_EIRQ_IESI | \
+ QM_EIRQ_IEOI | QM_EIRQ_IDQI | QM_EIRQ_IDFI | \
+ QM_EIRQ_IFSI)
+
+union qman_ecir {
+ u32 ecir_raw;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved:2;
+ u32 portal_type:1;
+ u32 portal_num:5;
+ u32 fqid:24;
+#else
+ u32 fqid:24;
+ u32 portal_num:5;
+ u32 portal_type:1;
+ u32 __reserved:2;
+#endif
+ } __packed info;
+};
+
+union qman_ecir2 {
+ u32 ecir2_raw;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 portal_type:1;
+ u32 __reserved:21;
+ u32 portal_num:10;
+#else
+ u32 portal_num:10;
+ u32 __reserved:21;
+ u32 portal_type:1;
+#endif
+ } __packed info;
+};
+
+union qman_eadr {
+ u32 eadr_raw;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved1:4;
+ u32 memid:4;
+ u32 __reserved2:12;
+ u32 eadr:12;
+#else
+ u32 eadr:12;
+ u32 __reserved2:12;
+ u32 memid:4;
+ u32 __reserved1:4;
+#endif
+ } __packed info;
+ struct {
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ u32 __reserved1:3;
+ u32 memid:5;
+ u32 __reserved:8;
+ u32 eadr:16;
+#else
+ u32 eadr:16;
+ u32 __reserved:8;
+ u32 memid:5;
+ u32 __reserved1:3;
+#endif
+ } __packed info_rev3;
+};
+
+struct qman_hwerr_txt {
+ u32 mask;
+ const char *txt;
+};
+
+#define QMAN_HWE_TXT(a, b) { .mask = QM_EIRQ_##a, .txt = b }
+
+static const struct qman_hwerr_txt qman_hwerr_txts[] = {
+ QMAN_HWE_TXT(CIDE, "Corenet Initiator Data Error"),
+ QMAN_HWE_TXT(CTDE, "Corenet Target Data Error"),
+ QMAN_HWE_TXT(CITT, "Corenet Invalid Target Transaction"),
+ QMAN_HWE_TXT(PLWI, "PFDR Low Watermark"),
+ QMAN_HWE_TXT(MBEI, "Multi-bit ECC Error"),
+ QMAN_HWE_TXT(SBEI, "Single-bit ECC Error"),
+ QMAN_HWE_TXT(PEBI, "PFDR Enqueues Blocked Interrupt"),
+ QMAN_HWE_TXT(ICVI, "Invalid Command Verb"),
+ QMAN_HWE_TXT(IFSI, "Invalid Flow Control State"),
+ QMAN_HWE_TXT(IDDI, "Invalid Dequeue (Direct-connect)"),
+ QMAN_HWE_TXT(IDFI, "Invalid Dequeue FQ"),
+ QMAN_HWE_TXT(IDSI, "Invalid Dequeue Source"),
+ QMAN_HWE_TXT(IDQI, "Invalid Dequeue Queue"),
+ QMAN_HWE_TXT(IECE, "Invalid Enqueue Configuration"),
+ QMAN_HWE_TXT(IEOI, "Invalid Enqueue Overflow"),
+ QMAN_HWE_TXT(IESI, "Invalid Enqueue State"),
+ QMAN_HWE_TXT(IECI, "Invalid Enqueue Channel"),
+ QMAN_HWE_TXT(IEQI, "Invalid Enqueue Queue")
+};
+#define QMAN_HWE_COUNT (sizeof(qman_hwerr_txts)/sizeof(struct qman_hwerr_txt))
+
+struct qman_error_info_mdata {
+ u16 addr_mask;
+ u16 bits;
+ const char *txt;
+};
+
+#define QMAN_ERR_MDATA(a, b, c) { .addr_mask = a, .bits = b, .txt = c}
+static const struct qman_error_info_mdata error_mdata[] = {
+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 0"),
+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 1"),
+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 2"),
+ QMAN_ERR_MDATA(0x01FF, 24, "FQD cache tag memory 3"),
+ QMAN_ERR_MDATA(0x0FFF, 512, "FQD cache memory"),
+ QMAN_ERR_MDATA(0x07FF, 128, "SFDR memory"),
+ QMAN_ERR_MDATA(0x01FF, 72, "WQ context memory"),
+ QMAN_ERR_MDATA(0x00FF, 240, "CGR memory"),
+ QMAN_ERR_MDATA(0x00FF, 302, "Internal Order Restoration List memory"),
+ QMAN_ERR_MDATA(0x7FFF, 256, "SW portal ring memory"),
+ QMAN_ERR_MDATA(0x07FF, 181, "CEETM class queue descriptor memory"),
+ QMAN_ERR_MDATA(0x0FFF, 140, "CEETM extended SFDR memory"),
+ QMAN_ERR_MDATA(0x0FFF, 25, "CEETM logical FQ mapping memory"),
+ QMAN_ERR_MDATA(0x0FFF, 96, "CEETM dequeue context memory"),
+ QMAN_ERR_MDATA(0x07FF, 396, "CEETM ccgr memory"),
+ QMAN_ERR_MDATA(0x00FF, 146, "CEETM CQ channel shaping memory"),
+ QMAN_ERR_MDATA(0x007F, 256, "CEETM CQ channel scheduling memory"),
+ QMAN_ERR_MDATA(0x01FF, 88, "CEETM dequeue statistics memory"),
+};
+#define QMAN_ERR_MDATA_COUNT \
+ (sizeof(error_mdata)/sizeof(struct qman_error_info_mdata))
+
+/* Add this in Kconfig */
+#define QMAN_ERRS_TO_UNENABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
+
+/**
+ * qm_err_isr_<reg>_<verb> - Manipulate global interrupt registers
+ * @v: for accessors that write values, this is the 32-bit value
+ *
+ * Manipulates QMAN_ERR_ISR, QMAN_ERR_IER, QMAN_ERR_ISDR, QMAN_ERR_IIR. All
+ * manipulations except qm_err_isr_[un]inhibit() use 32-bit masks composed of
+ * the QM_EIRQ_*** definitions. Note that "qm_err_isr_enable_write" means
+ * "write the enable register" rather than "enable the write register"!
+ */
+#define qm_err_isr_status_read(qm) \
+ __qm_err_isr_read(qm, qm_isr_status)
+#define qm_err_isr_status_clear(qm, m) \
+ __qm_err_isr_write(qm, qm_isr_status, m)
+#define qm_err_isr_enable_read(qm) \
+ __qm_err_isr_read(qm, qm_isr_enable)
+#define qm_err_isr_enable_write(qm, v) \
+ __qm_err_isr_write(qm, qm_isr_enable, v)
+#define qm_err_isr_disable_read(qm) \
+ __qm_err_isr_read(qm, qm_isr_disable)
+#define qm_err_isr_disable_write(qm, v) \
+ __qm_err_isr_write(qm, qm_isr_disable, v)
+#define qm_err_isr_inhibit(qm) \
+ __qm_err_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_err_isr_uninhibit(qm) \
+ __qm_err_isr_write(qm, qm_isr_inhibit, 0)
+
+/*
+ * TODO: unimplemented registers
+ *
+ * Keeping a list here of Qman registers I have not yet covered;
+ * QCSP_DD_IHRSR, QCSP_DD_IHRFR, QCSP_DD_HASR,
+ * DCP_DD_IHRSR, DCP_DD_IHRFR, DCP_DD_HASR, CM_CFG,
+ * QMAN_EECC, QMAN_SBET, QMAN_EINJ, QMAN_SBEC0-12
+ */
+
+/* Encapsulate "struct qman *" as a cast of the register space address. */
+
+static struct qman *qm_create(void *regs)
+{
+ return (struct qman *)regs;
+}
+
+static inline u32 __qm_in(struct qman *qm, u32 offset)
+{
+ return in_be32((void *)qm + offset);
+}
+static inline void __qm_out(struct qman *qm, u32 offset, u32 val)
+{
+ out_be32((void *)qm + offset, val);
+}
+#define qm_in(reg) __qm_in(qm, REG_##reg)
+#define qm_out(reg, val) __qm_out(qm, REG_##reg, val)
+
+static u32 __qm_err_isr_read(struct qman *qm, enum qm_isr_reg n)
+{
+ return __qm_in(qm, REG_ERR_ISR + (n << 2));
+}
+
+static void __qm_err_isr_write(struct qman *qm, enum qm_isr_reg n, u32 val)
+{
+ __qm_out(qm, REG_ERR_ISR + (n << 2), val);
+}
+
+static void qm_set_dc(struct qman *qm, enum qm_dc_portal portal,
+ int ed, u8 sernd)
+{
+ DPA_ASSERT(!ed || (portal == qm_dc_portal_fman0) ||
+ (portal == qm_dc_portal_fman1));
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_out(DCP_CFG(portal), (ed ? 0x1000 : 0) | (sernd & 0x3ff));
+ else
+ qm_out(DCP_CFG(portal), (ed ? 0x100 : 0) | (sernd & 0x1f));
+}
+
+static void qm_set_wq_scheduling(struct qman *qm, enum qm_wq_class wq_class,
+ u8 cs_elev, u8 csw2, u8 csw3, u8 csw4, u8 csw5,
+ u8 csw6, u8 csw7)
+{
+ qm_out(WQ_CS_CFG(wq_class), ((cs_elev & 0xff) << 24) |
+ ((csw2 & 0x7) << 20) | ((csw3 & 0x7) << 16) |
+ ((csw4 & 0x7) << 12) | ((csw5 & 0x7) << 8) |
+ ((csw6 & 0x7) << 4) | (csw7 & 0x7));
+}
+
+static void qm_set_hid(struct qman *qm)
+{
+ qm_out(HID_CFG, 0);
+}
+
+static void qm_set_corenet_initiator(struct qman *qm)
+{
+ qm_out(CI_SCHED_CFG,
+ 0x80000000 | /* write srcciv enable */
+ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRCCIV << 24) |
+ (CONFIG_FSL_QMAN_CI_SCHED_CFG_SRQ_W << 8) |
+ (CONFIG_FSL_QMAN_CI_SCHED_CFG_RW_W << 4) |
+ CONFIG_FSL_QMAN_CI_SCHED_CFG_BMAN_W);
+}
+
+static void qm_get_version(struct qman *qm, u16 *id, u8 *major, u8 *minor,
+ u8 *cfg)
+{
+ u32 v = qm_in(IP_REV_1);
+ u32 v2 = qm_in(IP_REV_2);
+ *id = (v >> 16);
+ *major = (v >> 8) & 0xff;
+ *minor = v & 0xff;
+ *cfg = v2 & 0xff;
+}
+
+static void qm_set_memory(struct qman *qm, enum qm_memory memory, u64 ba,
+ int enable, int prio, int stash, u32 size)
+{
+ u32 offset = (memory == qm_memory_fqd) ? REG_FQD_BARE : REG_PFDR_BARE;
+ u32 exp = ilog2(size);
+ /* choke if size isn't within range */
+ DPA_ASSERT((size >= 4096) && (size <= 1073741824) &&
+ is_power_of_2(size));
+ /* choke if 'ba' has lower-alignment than 'size' */
+ DPA_ASSERT(!(ba & (size - 1)));
+ __qm_out(qm, offset, upper_32_bits(ba));
+ __qm_out(qm, offset + REG_offset_BAR, lower_32_bits(ba));
+ __qm_out(qm, offset + REG_offset_AR,
+ (enable ? 0x80000000 : 0) |
+ (prio ? 0x40000000 : 0) |
+ (stash ? 0x20000000 : 0) |
+ (exp - 1));
+}
+
+static void qm_set_pfdr_threshold(struct qman *qm, u32 th, u8 k)
+{
+ qm_out(PFDR_FP_LWIT, th & 0xffffff);
+ qm_out(PFDR_CFG, k);
+}
+
+static void qm_set_sfdr_threshold(struct qman *qm, u16 th)
+{
+ qm_out(SFDR_CFG, th & 0x3ff);
+}
+
+static int qm_init_pfdr(struct qman *qm, u32 pfdr_start, u32 num)
+{
+ u8 rslt = MCR_get_rslt(qm_in(MCR));
+
+ DPA_ASSERT(pfdr_start && !(pfdr_start & 7) && !(num & 7) && num);
+ /* Make sure the command interface is 'idle' */
+ if (!MCR_rslt_idle(rslt))
+ panic("QMAN_MCR isn't idle");
+
+ /* Write the MCR command params then the verb */
+ qm_out(MCP(0), pfdr_start);
+ /* TODO: remove this - it's a workaround for a model bug that is
+ * corrected in more recent versions. We use the workaround until
+ * everyone has upgraded. */
+ qm_out(MCP(1), (pfdr_start + num - 16));
+ lwsync();
+ qm_out(MCR, MCR_INIT_PFDR);
+ /* Poll for the result */
+ do {
+ rslt = MCR_get_rslt(qm_in(MCR));
+ } while (!MCR_rslt_idle(rslt));
+ if (MCR_rslt_ok(rslt))
+ return 0;
+ if (MCR_rslt_eaccess(rslt))
+ return -EACCES;
+ if (MCR_rslt_inval(rslt))
+ return -EINVAL;
+ pr_crit("Unexpected result from MCR_INIT_PFDR: %02x\n", rslt);
+ return -ENOSYS;
+}
+
+/*****************/
+/* Config driver */
+/*****************/
+
+#define DEFAULT_FQD_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ)
+#define DEFAULT_PFDR_SZ (PAGE_SIZE << CONFIG_FSL_QMAN_PFDR_SZ)
+
+/* We support only one of these */
+static struct qman *qm;
+static struct device_node *qm_node;
+
+/* And this state belongs to 'qm'. It is set during fsl_qman_init(), but used
+ * during qman_init_ccsr(). */
+static dma_addr_t fqd_a, pfdr_a;
+static size_t fqd_sz = DEFAULT_FQD_SZ, pfdr_sz = DEFAULT_PFDR_SZ;
+
+static int qman_fqd(struct reserved_mem *rmem)
+{
+ fqd_a = rmem->base;
+ fqd_sz = rmem->size;
+
+ WARN_ON(!(fqd_a && fqd_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fqd, "fsl,qman-fqd", qman_fqd);
+
+static int qman_pfdr(struct reserved_mem *rmem)
+{
+ pfdr_a = rmem->base;
+ pfdr_sz = rmem->size;
+
+ WARN_ON(!(pfdr_a && pfdr_sz));
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(qman_fbpr, "fsl,qman-pfdr", qman_pfdr);
+
+size_t get_qman_fqd_size()
+{
+ return fqd_sz;
+}
+
+/* Parse the <name> property to extract the memory location and size and
+ * memblock_reserve() it. If it isn't supplied, memblock_alloc() the default
+ * size. Also flush this memory range from data cache so that QMAN originated
+ * transactions for this memory region could be marked non-coherent.
+ */
+static __init int parse_mem_property(struct device_node *node, const char *name,
+ dma_addr_t *addr, size_t *sz, int zero)
+{
+ int ret;
+
+ /* If using a "zero-pma", don't try to zero it, even if you asked */
+ if (zero && of_find_property(node, "zero-pma", &ret)) {
+ pr_info(" it's a 'zero-pma', not zeroing from s/w\n");
+ zero = 0;
+ }
+
+ if (zero) {
+ /* map as cacheable, non-guarded */
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ void __iomem *tmpp = ioremap_cache(*addr, *sz);
+#else
+ void __iomem *tmpp = ioremap(*addr, *sz);
+#endif
+
+ if (!tmpp)
+ return -ENOMEM;
+ memset_io(tmpp, 0, *sz);
+ flush_dcache_range((unsigned long)tmpp,
+ (unsigned long)tmpp + *sz);
+ iounmap(tmpp);
+ }
+
+ return 0;
+}
+
+/* TODO:
+ * - there is obviously no handling of errors,
+ * - the calls to qm_set_memory() hard-code the priority and CPC-stashing for
+ * both memory resources to zero.
+ */
+static int __init fsl_qman_init(struct device_node *node)
+{
+ struct resource res;
+ resource_size_t len;
+ u32 __iomem *regs;
+ const char *s;
+ int ret, standby = 0;
+ u16 id;
+ u8 major, minor, cfg;
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret) {
+ pr_err("Can't get %s property '%s'\n", node->full_name, "reg");
+ return ret;
+ }
+ s = of_get_property(node, "fsl,hv-claimable", &ret);
+ if (s && !strcmp(s, "standby"))
+ standby = 1;
+ if (!standby) {
+ ret = parse_mem_property(node, "fsl,qman-fqd",
+ &fqd_a, &fqd_sz, 1);
+ pr_info("qman-fqd addr %pad size 0x%zx\n", &fqd_a, fqd_sz);
+ BUG_ON(ret);
+ ret = parse_mem_property(node, "fsl,qman-pfdr",
+ &pfdr_a, &pfdr_sz, 0);
+ pr_info("qman-pfdr addr %pad size 0x%zx\n", &pfdr_a, pfdr_sz);
+ BUG_ON(ret);
+ }
+ /* Global configuration */
+ len = resource_size(&res);
+ if (len != (unsigned long)len)
+ return -EINVAL;
+ regs = ioremap(res.start, (unsigned long)len);
+ qm = qm_create(regs);
+ qm_node = node;
+ qm_get_version(qm, &id, &major, &minor, &cfg);
+ pr_info("Qman ver:%04x,%02x,%02x,%02x\n", id, major, minor, cfg);
+ if (!qman_ip_rev) {
+ if ((major == 1) && (minor == 0)) {
+ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
+ iounmap(regs);
+ return -ENODEV;
+ } else if ((major == 1) && (minor == 1))
+ qman_ip_rev = QMAN_REV11;
+ else if ((major == 1) && (minor == 2))
+ qman_ip_rev = QMAN_REV12;
+ else if ((major == 2) && (minor == 0))
+ qman_ip_rev = QMAN_REV20;
+ else if ((major == 3) && (minor == 0))
+ qman_ip_rev = QMAN_REV30;
+ else if ((major == 3) && (minor == 1))
+ qman_ip_rev = QMAN_REV31;
+ else if ((major == 3) && (minor == 2))
+ qman_ip_rev = QMAN_REV32;
+ else {
+ pr_warn("unknown Qman version, default to rev1.1\n");
+ qman_ip_rev = QMAN_REV11;
+ }
+ qman_ip_cfg = cfg;
+ }
+
+ if (standby) {
+ pr_info(" -> in standby mode\n");
+ return 0;
+ }
+ return 0;
+}
+
+int qman_have_ccsr(void)
+{
+ return qm ? 1 : 0;
+}
+
+__init int qman_init_early(void)
+{
+ struct device_node *dn;
+ int ret;
+
+ for_each_compatible_node(dn, NULL, "fsl,qman") {
+ if (qm)
+ pr_err("%s: only one 'fsl,qman' allowed\n",
+ dn->full_name);
+ else {
+ if (!of_device_is_available(dn))
+ continue;
+
+ ret = fsl_qman_init(dn);
+ BUG_ON(ret);
+ }
+ }
+ return 0;
+}
+postcore_initcall_sync(qman_init_early);
+
+static void log_edata_bits(u32 bit_count)
+{
+ u32 i, j, mask = 0xffffffff;
+
+ pr_warn("Qman ErrInt, EDATA:\n");
+ i = bit_count/32;
+ if (bit_count%32) {
+ i++;
+ mask = ~(mask << bit_count%32);
+ }
+ j = 16-i;
+ pr_warn(" 0x%08x\n", qm_in(EDATA(j)) & mask);
+ j++;
+ for (; j < 16; j++)
+ pr_warn(" 0x%08x\n", qm_in(EDATA(j)));
+}
+
+static void log_additional_error_info(u32 isr_val, u32 ecsr_val)
+{
+ union qman_ecir ecir_val;
+ union qman_eadr eadr_val;
+
+ ecir_val.ecir_raw = qm_in(ECIR);
+ /* Is portal info valid */
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ union qman_ecir2 ecir2_val;
+ ecir2_val.ecir2_raw = qm_in(ECIR2);
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ pr_warn("Qman ErrInt: %s id %d\n",
+ (ecir2_val.info.portal_type) ?
+ "DCP" : "SWP", ecir2_val.info.portal_num);
+ }
+ if (ecsr_val & (FQID_ECSR_ERR | QM_EIRQ_IECE)) {
+ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
+ ecir_val.info.fqid);
+ }
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.eadr_raw = qm_in(EADR);
+ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[eadr_val.info_rev3.memid].txt,
+ error_mdata[eadr_val.info_rev3.memid].addr_mask
+ & eadr_val.info_rev3.eadr);
+ log_edata_bits(
+ error_mdata[eadr_val.info_rev3.memid].bits);
+ }
+ } else {
+ if (ecsr_val & PORTAL_ECSR_ERR) {
+ pr_warn("Qman ErrInt: %s id %d\n",
+ (ecir_val.info.portal_type) ?
+ "DCP" : "SWP", ecir_val.info.portal_num);
+ }
+ if (ecsr_val & FQID_ECSR_ERR) {
+ pr_warn("Qman ErrInt: ecir.fqid 0x%x\n",
+ ecir_val.info.fqid);
+ }
+ if (ecsr_val & (QM_EIRQ_SBEI|QM_EIRQ_MBEI)) {
+ eadr_val.eadr_raw = qm_in(EADR);
+ pr_warn("Qman ErrInt: EADR Memory: %s, 0x%x\n",
+ error_mdata[eadr_val.info.memid].txt,
+ error_mdata[eadr_val.info.memid].addr_mask
+ & eadr_val.info.eadr);
+ log_edata_bits(error_mdata[eadr_val.info.memid].bits);
+ }
+ }
+}
+
+/* Qman interrupt handler */
+static irqreturn_t qman_isr(int irq, void *ptr)
+{
+ u32 isr_val, ier_val, ecsr_val, isr_mask, i;
+
+ ier_val = qm_err_isr_enable_read(qm);
+ isr_val = qm_err_isr_status_read(qm);
+ ecsr_val = qm_in(ECSR);
+ isr_mask = isr_val & ier_val;
+
+ if (!isr_mask)
+ return IRQ_NONE;
+ for (i = 0; i < QMAN_HWE_COUNT; i++) {
+ if (qman_hwerr_txts[i].mask & isr_mask) {
+ pr_warn("Qman ErrInt: %s\n", qman_hwerr_txts[i].txt);
+ if (qman_hwerr_txts[i].mask & ecsr_val) {
+ log_additional_error_info(isr_mask, ecsr_val);
+ /* Re-arm error capture registers */
+ qm_out(ECSR, ecsr_val);
+ }
+ if (qman_hwerr_txts[i].mask & QMAN_ERRS_TO_UNENABLE) {
+ pr_devel("Qman un-enabling error 0x%x\n",
+ qman_hwerr_txts[i].mask);
+ ier_val &= ~qman_hwerr_txts[i].mask;
+ qm_err_isr_enable_write(qm, ier_val);
+ }
+ }
+ }
+ qm_err_isr_status_clear(qm, isr_val);
+ return IRQ_HANDLED;
+}
+
+static int __bind_irq(void)
+{
+ int ret, err_irq;
+
+ err_irq = of_irq_to_resource(qm_node, 0, NULL);
+ if (err_irq == 0) {
+ pr_info("Can't get %s property '%s'\n", qm_node->full_name,
+ "interrupts");
+ return -ENODEV;
+ }
+ ret = request_irq(err_irq, qman_isr, IRQF_SHARED, "qman-err", qm_node);
+ if (ret) {
+ pr_err("request_irq() failed %d for '%s'\n", ret,
+ qm_node->full_name);
+ return -ENODEV;
+ }
+ /* Write-to-clear any stale bits, (eg. starvation being asserted prior
+ * to resource allocation during driver init). */
+ qm_err_isr_status_clear(qm, 0xffffffff);
+ /* Enable Error Interrupts */
+ qm_err_isr_enable_write(qm, 0xffffffff);
+ return 0;
+}
+
+int qman_init_ccsr(struct device_node *node)
+{
+ int ret;
+ if (!qman_have_ccsr())
+ return 0;
+ if (node != qm_node)
+ return -EINVAL;
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ /* TEMP for LS1043 : should be done in uboot */
+ qm_out(QCSP_BARE, 0x5);
+ qm_out(QCSP_BAR, 0x0);
+#endif
+ /* FQD memory */
+ qm_set_memory(qm, qm_memory_fqd, fqd_a, 1, 0, 0, fqd_sz);
+ /* PFDR memory */
+ qm_set_memory(qm, qm_memory_pfdr, pfdr_a, 1, 0, 0, pfdr_sz);
+ qm_init_pfdr(qm, 8, pfdr_sz / 64 - 8);
+ /* thresholds */
+ qm_set_pfdr_threshold(qm, 512, 64);
+ qm_set_sfdr_threshold(qm, 128);
+ /* clear stale PEBI bit from interrupt status register */
+ qm_err_isr_status_clear(qm, QM_EIRQ_PEBI);
+ /* corenet initiator settings */
+ qm_set_corenet_initiator(qm);
+ /* HID settings */
+ qm_set_hid(qm);
+ /* Set scheduling weights to defaults */
+ for (ret = qm_wq_first; ret <= qm_wq_last; ret++)
+ qm_set_wq_scheduling(qm, ret, 0, 0, 0, 0, 0, 0, 0);
+ /* We are not prepared to accept ERNs for hardware enqueues */
+ qm_set_dc(qm, qm_dc_portal_fman0, 1, 0);
+ qm_set_dc(qm, qm_dc_portal_fman1, 1, 0);
+ /* Initialise Error Interrupt Handler */
+ ret = __bind_irq();
+ if (ret)
+ return ret;
+ return 0;
+}
+
+#define LIO_CFG_LIODN_MASK 0x0fff0000
+void qman_liodn_fixup(u16 channel)
+{
+ static int done;
+ static u32 liodn_offset;
+ u32 before, after;
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+
+ if (!qman_have_ccsr())
+ return;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ before = qm_in(REV3_QCSP_LIO_CFG(idx));
+ else
+ before = qm_in(QCSP_LIO_CFG(idx));
+ if (!done) {
+ liodn_offset = before & LIO_CFG_LIODN_MASK;
+ done = 1;
+ return;
+ }
+ after = (before & (~LIO_CFG_LIODN_MASK)) | liodn_offset;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ qm_out(REV3_QCSP_LIO_CFG(idx), after);
+ else
+ qm_out(QCSP_LIO_CFG(idx), after);
+}
+
+#define IO_CFG_SDEST_MASK 0x00ff0000
+int qman_set_sdest(u16 channel, unsigned int cpu_idx)
+{
+ int idx = channel - QM_CHANNEL_SWPORTAL0;
+ u32 before, after;
+
+ if (!qman_have_ccsr())
+ return -ENODEV;
+ if ((qman_ip_rev & 0xFFFF) == QMAN_REV31) {
+ /* LS1043A - only one L2 cache */
+ cpu_idx = 0;
+ }
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ before = qm_in(REV3_QCSP_IO_CFG(idx));
+ /* Each pair of vcpu share the same SRQ(SDEST) */
+ cpu_idx /= 2;
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_out(REV3_QCSP_IO_CFG(idx), after);
+ } else {
+ before = qm_in(QCSP_IO_CFG(idx));
+ after = (before & (~IO_CFG_SDEST_MASK)) | (cpu_idx << 16);
+ qm_out(QCSP_IO_CFG(idx), after);
+ }
+ return 0;
+}
+
+#define MISC_CFG_WPM_MASK 0x00000002
+int qm_set_wpm(int wpm)
+{
+ u32 before;
+ u32 after;
+
+ if (!qman_have_ccsr())
+ return -ENODEV;
+
+ before = qm_in(MISC_CFG);
+ after = (before & (~MISC_CFG_WPM_MASK)) | (wpm << 1);
+ qm_out(MISC_CFG, after);
+ return 0;
+}
+
+int qm_get_wpm(int *wpm)
+{
+ u32 before;
+
+ if (!qman_have_ccsr())
+ return -ENODEV;
+
+ before = qm_in(MISC_CFG);
+ *wpm = (before & MISC_CFG_WPM_MASK) >> 1;
+ return 0;
+}
+
+/* CEETM_CFG_PRES register has PRES field which is calculated by:
+ * PRES = (2^22 / credit update reference period) * QMan clock period
+ * = (2^22 * 10^9)/ CONFIG_QMAN_CEETM_UPDATE_PERIOD) / qman_clk
+ */
+
+int qman_ceetm_set_prescaler(enum qm_dc_portal portal)
+{
+ u64 temp;
+ u16 pres;
+
+ if (!qman_have_ccsr())
+ return -ENODEV;
+
+ temp = 0x400000 * 100;
+ do_div(temp, CONFIG_QMAN_CEETM_UPDATE_PERIOD);
+ temp *= 10000000;
+ do_div(temp, qman_clk);
+ pres = (u16) temp;
+ qm_out(CEETM_CFG_IDX, portal);
+ qm_out(CEETM_CFG_PRES, pres);
+ return 0;
+}
+
+int qman_ceetm_get_prescaler(u16 *pres)
+{
+ if (!qman_have_ccsr())
+ return -ENODEV;
+ *pres = (u16)qm_in(CEETM_CFG_PRES);
+ return 0;
+}
+
+#define DCP_CFG_CEETME_MASK 0xFFFF0000
+#define QM_SP_ENABLE_CEETM(n) (0x80000000 >> (n))
+int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
+{
+ u32 dcp_cfg;
+
+ if (!qman_have_ccsr())
+ return -ENODEV;
+
+ dcp_cfg = qm_in(DCP_CFG(portal));
+ dcp_cfg |= QM_SP_ENABLE_CEETM(sub_portal);
+ qm_out(DCP_CFG(portal), dcp_cfg);
+ return 0;
+}
+
+int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal)
+{
+ u32 dcp_cfg;
+
+ if (!qman_have_ccsr())
+ return -ENODEV;
+ dcp_cfg = qm_in(DCP_CFG(portal));
+ dcp_cfg &= ~(QM_SP_ENABLE_CEETM(sub_portal));
+ qm_out(DCP_CFG(portal), dcp_cfg);
+ return 0;
+}
+
+int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num)
+{
+ if (!qman_have_ccsr())
+ return -ENODEV;
+ *num = qm_in(CEETM_XSFDR_IN_USE);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_get_xsfdr);
+
+#ifdef CONFIG_SYSFS
+
+#define DRV_NAME "fsl-qman"
+#define DCP_MAX_ID 3
+#define DCP_MIN_ID 0
+
+static ssize_t show_pfdr_fpc(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_FPC));
+};
+
+static ssize_t show_dlm_avg(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ u32 data;
+ int i;
+
+ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
+ return -EINVAL;
+ if (i < DCP_MIN_ID || i > DCP_MAX_ID)
+ return -EINVAL;
+ data = qm_in(DCP_DLM_AVG(i));
+ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
+ (data & 0x000000ff)*390625);
+};
+
+static ssize_t set_dlm_avg(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+ unsigned long val;
+ int i;
+
+ if (!sscanf(dev_attr->attr.name, "dcp%d_dlm_avg", &i))
+ return -EINVAL;
+ if (i < DCP_MIN_ID || i > DCP_MAX_ID)
+ return -EINVAL;
+ if (kstrtoul(buf, 0, &val)) {
+ dev_dbg(dev, "invalid input %s\n", buf);
+ return -EINVAL;
+ }
+ qm_out(DCP_DLM_AVG(i), val);
+ return count;
+};
+
+static ssize_t show_pfdr_cfg(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(PFDR_CFG));
+};
+
+static ssize_t set_pfdr_cfg(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val)) {
+ dev_dbg(dev, "invalid input %s\n", buf);
+ return -EINVAL;
+ }
+ qm_out(PFDR_CFG, val);
+ return count;
+};
+
+static ssize_t show_sfdr_in_use(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SFDR_IN_USE));
+};
+
+static ssize_t show_idle_stat(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(IDLE_STAT));
+};
+
+static ssize_t show_ci_rlm_avg(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ u32 data = qm_in(CI_RLM_AVG);
+ return snprintf(buf, PAGE_SIZE, "%d.%08d\n", data>>8,
+ (data & 0x000000ff)*390625);
+};
+
+static ssize_t set_ci_rlm_avg(struct device *dev,
+ struct device_attribute *dev_attr, const char *buf, size_t count)
+{
+ unsigned long val;
+
+ if (kstrtoul(buf, 0, &val)) {
+ dev_dbg(dev, "invalid input %s\n", buf);
+ return -EINVAL;
+ }
+ qm_out(CI_RLM_AVG, val);
+ return count;
+};
+
+static ssize_t show_err_isr(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "0x%08x\n", qm_in(ERR_ISR));
+};
+
+#define SBEC_MAX_ID 14
+#define SBEC_MIN_ID 0
+
+static ssize_t show_sbec(struct device *dev,
+ struct device_attribute *dev_attr, char *buf)
+{
+ int i;
+
+ if (!sscanf(dev_attr->attr.name, "sbec_%d", &i))
+ return -EINVAL;
+ if (i < SBEC_MIN_ID || i > SBEC_MAX_ID)
+ return -EINVAL;
+ return snprintf(buf, PAGE_SIZE, "%u\n", qm_in(SBEC(i)));
+};
+
+static DEVICE_ATTR(pfdr_fpc, S_IRUSR, show_pfdr_fpc, NULL);
+static DEVICE_ATTR(pfdr_cfg, S_IRUSR, show_pfdr_cfg, set_pfdr_cfg);
+static DEVICE_ATTR(idle_stat, S_IRUSR, show_idle_stat, NULL);
+static DEVICE_ATTR(ci_rlm_avg, (S_IRUSR|S_IWUSR),
+ show_ci_rlm_avg, set_ci_rlm_avg);
+static DEVICE_ATTR(err_isr, S_IRUSR, show_err_isr, NULL);
+static DEVICE_ATTR(sfdr_in_use, S_IRUSR, show_sfdr_in_use, NULL);
+
+static DEVICE_ATTR(dcp0_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp1_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp2_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+static DEVICE_ATTR(dcp3_dlm_avg, (S_IRUSR|S_IWUSR), show_dlm_avg, set_dlm_avg);
+
+static DEVICE_ATTR(sbec_0, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_1, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_2, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_3, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_4, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_5, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_6, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_7, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_8, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_9, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_10, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_11, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_12, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_13, S_IRUSR, show_sbec, NULL);
+static DEVICE_ATTR(sbec_14, S_IRUSR, show_sbec, NULL);
+
+static struct attribute *qman_dev_attributes[] = {
+ &dev_attr_pfdr_fpc.attr,
+ &dev_attr_pfdr_cfg.attr,
+ &dev_attr_idle_stat.attr,
+ &dev_attr_ci_rlm_avg.attr,
+ &dev_attr_err_isr.attr,
+ &dev_attr_dcp0_dlm_avg.attr,
+ &dev_attr_dcp1_dlm_avg.attr,
+ &dev_attr_dcp2_dlm_avg.attr,
+ &dev_attr_dcp3_dlm_avg.attr,
+ /* sfdr_in_use will be added if necessary */
+ NULL
+};
+
+static struct attribute *qman_dev_ecr_attributes[] = {
+ &dev_attr_sbec_0.attr,
+ &dev_attr_sbec_1.attr,
+ &dev_attr_sbec_2.attr,
+ &dev_attr_sbec_3.attr,
+ &dev_attr_sbec_4.attr,
+ &dev_attr_sbec_5.attr,
+ &dev_attr_sbec_6.attr,
+ &dev_attr_sbec_7.attr,
+ &dev_attr_sbec_8.attr,
+ &dev_attr_sbec_9.attr,
+ &dev_attr_sbec_10.attr,
+ &dev_attr_sbec_11.attr,
+ &dev_attr_sbec_12.attr,
+ &dev_attr_sbec_13.attr,
+ &dev_attr_sbec_14.attr,
+ NULL
+};
+
+/* root level */
+static const struct attribute_group qman_dev_attr_grp = {
+ .name = NULL,
+ .attrs = qman_dev_attributes
+};
+static const struct attribute_group qman_dev_ecr_grp = {
+ .name = "error_capture",
+ .attrs = qman_dev_ecr_attributes
+};
+
+static int of_fsl_qman_remove(struct platform_device *ofdev)
+{
+ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
+ return 0;
+};
+
+static int of_fsl_qman_probe(struct platform_device *ofdev)
+{
+ int ret;
+
+ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
+ if (ret)
+ goto done;
+ ret = sysfs_add_file_to_group(&ofdev->dev.kobj,
+ &dev_attr_sfdr_in_use.attr, qman_dev_attr_grp.name);
+ if (ret)
+ goto del_group_0;
+ ret = sysfs_create_group(&ofdev->dev.kobj, &qman_dev_ecr_grp);
+ if (ret)
+ goto del_group_0;
+
+ goto done;
+
+del_group_0:
+ sysfs_remove_group(&ofdev->dev.kobj, &qman_dev_attr_grp);
+done:
+ if (ret)
+ dev_err(&ofdev->dev,
+ "Cannot create dev attributes ret=%d\n", ret);
+ return ret;
+};
+
+static struct of_device_id of_fsl_qman_ids[] = {
+ {
+ .compatible = "fsl,qman",
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_qman_ids);
+
+#ifdef CONFIG_SUSPEND
+
+static u32 saved_isdr;
+static int qman_pm_suspend_noirq(struct device *dev)
+{
+ uint32_t idle_state;
+
+ suspend_unused_qportal();
+ /* save isdr, disable all, clear isr */
+ saved_isdr = qm_err_isr_disable_read(qm);
+ qm_err_isr_disable_write(qm, 0xffffffff);
+ qm_err_isr_status_clear(qm, 0xffffffff);
+ idle_state = qm_in(IDLE_STAT);
+ if (!(idle_state & 0x1)) {
+ pr_err("Qman not idle 0x%x aborting\n", idle_state);
+ qm_err_isr_disable_write(qm, saved_isdr);
+ resume_unused_qportal();
+ return -EBUSY;
+ }
+#ifdef CONFIG_PM_DEBUG
+ pr_info("Qman suspend code, IDLE_STAT = 0x%x\n", idle_state);
+#endif
+ return 0;
+}
+
+static int qman_pm_resume_noirq(struct device *dev)
+{
+ /* restore isdr */
+ qm_err_isr_disable_write(qm, saved_isdr);
+ resume_unused_qportal();
+ return 0;
+}
+#else
+#define qman_pm_suspend_noirq NULL
+#define qman_pm_resume_noirq NULL
+#endif
+
+static const struct dev_pm_ops qman_pm_ops = {
+ .suspend_noirq = qman_pm_suspend_noirq,
+ .resume_noirq = qman_pm_resume_noirq,
+};
+
+static struct platform_driver of_fsl_qman_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DRV_NAME,
+ .of_match_table = of_fsl_qman_ids,
+ .pm = &qman_pm_ops,
+ },
+ .probe = of_fsl_qman_probe,
+ .remove = of_fsl_qman_remove,
+};
+
+static int qman_ctrl_init(void)
+{
+ return platform_driver_register(&of_fsl_qman_driver);
+}
+
+static void qman_ctrl_exit(void)
+{
+ platform_driver_unregister(&of_fsl_qman_driver);
+}
+
+module_init(qman_ctrl_init);
+module_exit(qman_ctrl_exit);
+
+#endif /* CONFIG_SYSFS */
diff --git a/drivers/staging/fsl_qbman/qman_debugfs.c b/drivers/staging/fsl_qbman/qman_debugfs.c
new file mode 100644
index 000000000000..afdc9393c4ac
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_debugfs.c
@@ -0,0 +1,1597 @@
+/* Copyright 2010-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "qman_private.h"
+
+#define MAX_FQID (0x00ffffff)
+#define QM_FQD_BLOCK_SIZE 64
+#define QM_FQD_AR (0xC10)
+
+static u32 fqid_max;
+static u64 qman_ccsr_start;
+static u64 qman_ccsr_size;
+
+static const char * const state_txt[] = {
+ "Out of Service",
+ "Retired",
+ "Tentatively Scheduled",
+ "Truly Scheduled",
+ "Parked",
+ "Active, Active Held or Held Suspended",
+ "Unknown State 6",
+ "Unknown State 7",
+ NULL,
+};
+
+static const u8 fqd_states[] = {
+ QM_MCR_NP_STATE_OOS, QM_MCR_NP_STATE_RETIRED, QM_MCR_NP_STATE_TEN_SCHED,
+ QM_MCR_NP_STATE_TRU_SCHED, QM_MCR_NP_STATE_PARKED,
+ QM_MCR_NP_STATE_ACTIVE};
+
+struct mask_to_text {
+ u16 mask;
+ const char *txt;
+};
+
+struct mask_filter_s {
+ u16 mask;
+ u8 filter;
+};
+
+static const struct mask_filter_s mask_filter[] = {
+ {QM_FQCTRL_PREFERINCACHE, 0},
+ {QM_FQCTRL_PREFERINCACHE, 1},
+ {QM_FQCTRL_HOLDACTIVE, 0},
+ {QM_FQCTRL_HOLDACTIVE, 1},
+ {QM_FQCTRL_AVOIDBLOCK, 0},
+ {QM_FQCTRL_AVOIDBLOCK, 1},
+ {QM_FQCTRL_FORCESFDR, 0},
+ {QM_FQCTRL_FORCESFDR, 1},
+ {QM_FQCTRL_CPCSTASH, 0},
+ {QM_FQCTRL_CPCSTASH, 1},
+ {QM_FQCTRL_CTXASTASHING, 0},
+ {QM_FQCTRL_CTXASTASHING, 1},
+ {QM_FQCTRL_ORP, 0},
+ {QM_FQCTRL_ORP, 1},
+ {QM_FQCTRL_TDE, 0},
+ {QM_FQCTRL_TDE, 1},
+ {QM_FQCTRL_CGE, 0},
+ {QM_FQCTRL_CGE, 1}
+};
+
+static const struct mask_to_text fq_ctrl_text_list[] = {
+ {
+ .mask = QM_FQCTRL_PREFERINCACHE,
+ .txt = "Prefer in cache",
+ },
+ {
+ .mask = QM_FQCTRL_HOLDACTIVE,
+ .txt = "Hold active in portal",
+ },
+ {
+ .mask = QM_FQCTRL_AVOIDBLOCK,
+ .txt = "Avoid Blocking",
+ },
+ {
+ .mask = QM_FQCTRL_FORCESFDR,
+ .txt = "High-priority SFDRs",
+ },
+ {
+ .mask = QM_FQCTRL_CPCSTASH,
+ .txt = "CPC Stash Enable",
+ },
+ {
+ .mask = QM_FQCTRL_CTXASTASHING,
+ .txt = "Context-A stashing",
+ },
+ {
+ .mask = QM_FQCTRL_ORP,
+ .txt = "ORP Enable",
+ },
+ {
+ .mask = QM_FQCTRL_TDE,
+ .txt = "Tail-Drop Enable",
+ },
+ {
+ .mask = QM_FQCTRL_CGE,
+ .txt = "Congestion Group Enable",
+ },
+ {
+ .mask = 0,
+ .txt = NULL,
+ }
+};
+
+static const char *get_fqd_ctrl_text(u16 mask)
+{
+ int i = 0;
+
+ while (fq_ctrl_text_list[i].txt != NULL) {
+ if (fq_ctrl_text_list[i].mask == mask)
+ return fq_ctrl_text_list[i].txt;
+ i++;
+ }
+ return NULL;
+}
+
+static const struct mask_to_text stashing_text_list[] = {
+ {
+ .mask = QM_STASHING_EXCL_CTX,
+ .txt = "FQ Ctx Stash"
+ },
+ {
+ .mask = QM_STASHING_EXCL_DATA,
+ .txt = "Frame Data Stash",
+ },
+ {
+ .mask = QM_STASHING_EXCL_ANNOTATION,
+ .txt = "Frame Annotation Stash",
+ },
+ {
+ .mask = 0,
+ .txt = NULL,
+ },
+};
+
+static int user_input_convert(const char __user *user_buf, size_t count,
+ unsigned long *val)
+{
+ char buf[12];
+
+ if (count > sizeof(buf) - 1)
+ return -EINVAL;
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+ buf[count] = '\0';
+ if (kstrtoul(buf, 0, val))
+ return -EINVAL;
+ return 0;
+}
+
+struct line_buffer_fq {
+ u32 buf[8];
+ u32 buf_cnt;
+ int line_cnt;
+};
+
+static void add_to_line_buffer(struct line_buffer_fq *line_buf, u32 fqid,
+ struct seq_file *file)
+{
+ line_buf->buf[line_buf->buf_cnt] = fqid;
+ line_buf->buf_cnt++;
+ if (line_buf->buf_cnt == 8) {
+ /* Buffer is full, flush it */
+ if (line_buf->line_cnt != 0)
+ seq_puts(file, ",\n");
+ seq_printf(file, "0x%06x,0x%06x,0x%06x,0x%06x,0x%06x,"
+ "0x%06x,0x%06x,0x%06x",
+ line_buf->buf[0], line_buf->buf[1], line_buf->buf[2],
+ line_buf->buf[3], line_buf->buf[4], line_buf->buf[5],
+ line_buf->buf[6], line_buf->buf[7]);
+ line_buf->buf_cnt = 0;
+ line_buf->line_cnt++;
+ }
+}
+
+static void flush_line_buffer(struct line_buffer_fq *line_buf,
+ struct seq_file *file)
+{
+ if (line_buf->buf_cnt) {
+ int y = 0;
+ if (line_buf->line_cnt != 0)
+ seq_puts(file, ",\n");
+ while (y != line_buf->buf_cnt) {
+ if (y+1 == line_buf->buf_cnt)
+ seq_printf(file, "0x%06x", line_buf->buf[y]);
+ else
+ seq_printf(file, "0x%06x,", line_buf->buf[y]);
+ y++;
+ }
+ line_buf->line_cnt++;
+ }
+ if (line_buf->line_cnt)
+ seq_putc(file, '\n');
+}
+
+static struct dentry *dfs_root; /* debugfs root directory */
+
+/*******************************************************************************
+ * Query Frame Queue Non Programmable Fields
+ ******************************************************************************/
+struct query_fq_np_fields_data_s {
+ u32 fqid;
+};
+static struct query_fq_np_fields_data_s query_fq_np_fields_data = {
+ .fqid = 1,
+};
+
+static int query_fq_np_fields_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_queryfq_np np;
+ struct qman_fq fq;
+
+ fq.fqid = query_fq_np_fields_data.fqid;
+ ret = qman_query_fq_np(&fq, &np);
+ if (ret)
+ return ret;
+ /* Print state */
+ seq_printf(file, "Query FQ Non Programmable Fields Result fqid 0x%x\n",
+ fq.fqid);
+ seq_printf(file, " force eligible pending: %s\n",
+ (np.state & QM_MCR_NP_STATE_FE) ? "yes" : "no");
+ seq_printf(file, " retirement pending: %s\n",
+ (np.state & QM_MCR_NP_STATE_R) ? "yes" : "no");
+ seq_printf(file, " state: %s\n",
+ state_txt[np.state & QM_MCR_NP_STATE_MASK]);
+ seq_printf(file, " fq_link: 0x%x\n", np.fqd_link);
+ seq_printf(file, " odp_seq: %u\n", np.odp_seq);
+ seq_printf(file, " orp_nesn: %u\n", np.orp_nesn);
+ seq_printf(file, " orp_ea_hseq: %u\n", np.orp_ea_hseq);
+ seq_printf(file, " orp_ea_tseq: %u\n", np.orp_ea_tseq);
+ seq_printf(file, " orp_ea_hptr: 0x%x\n", np.orp_ea_hptr);
+ seq_printf(file, " orp_ea_tptr: 0x%x\n", np.orp_ea_tptr);
+ seq_printf(file, " pfdr_hptr: 0x%x\n", np.pfdr_hptr);
+ seq_printf(file, " pfdr_tptr: 0x%x\n", np.pfdr_tptr);
+ seq_printf(file, " is: ics_surp contains a %s\n",
+ (np.is) ? "deficit" : "surplus");
+ seq_printf(file, " ics_surp: %u\n", np.ics_surp);
+ seq_printf(file, " byte_cnt: %u\n", np.byte_cnt);
+ seq_printf(file, " frm_cnt: %u\n", np.frm_cnt);
+ seq_printf(file, " ra1_sfdr: 0x%x\n", np.ra1_sfdr);
+ seq_printf(file, " ra2_sfdr: 0x%x\n", np.ra2_sfdr);
+ seq_printf(file, " od1_sfdr: 0x%x\n", np.od1_sfdr);
+ seq_printf(file, " od2_sfdr: 0x%x\n", np.od2_sfdr);
+ seq_printf(file, " od3_sfdr: 0x%x\n", np.od3_sfdr);
+ return 0;
+}
+
+static int query_fq_np_fields_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, query_fq_np_fields_show, NULL);
+}
+
+static ssize_t query_fq_np_fields_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > MAX_FQID)
+ return -EINVAL;
+ query_fq_np_fields_data.fqid = (u32)val;
+ return count;
+}
+
+static const struct file_operations query_fq_np_fields_fops = {
+ .owner = THIS_MODULE,
+ .open = query_fq_np_fields_open,
+ .read = seq_read,
+ .write = query_fq_np_fields_write,
+ .release = single_release,
+};
+
+/*******************************************************************************
+ * Frame Queue Programmable Fields
+ ******************************************************************************/
+struct query_fq_fields_data_s {
+ u32 fqid;
+};
+
+static struct query_fq_fields_data_s query_fq_fields_data = {
+ .fqid = 1,
+};
+
+static int query_fq_fields_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ int i = 0;
+
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ fq.fqid = query_fq_fields_data.fqid;
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret)
+ return ret;
+ seq_printf(file, "Query FQ Programmable Fields Result fqid 0x%x\n",
+ fq.fqid);
+ seq_printf(file, " orprws: %u\n", fqd.orprws);
+ seq_printf(file, " oa: %u\n", fqd.oa);
+ seq_printf(file, " olws: %u\n", fqd.olws);
+
+ seq_printf(file, " cgid: %u\n", fqd.cgid);
+
+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) == 0)
+ seq_puts(file, " fq_ctrl: None\n");
+ else {
+ i = 0;
+ seq_puts(file, " fq_ctrl:\n");
+ while (fq_ctrl_text_list[i].txt != NULL) {
+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
+ fq_ctrl_text_list[i].mask)
+ seq_printf(file, " %s\n",
+ fq_ctrl_text_list[i].txt);
+ i++;
+ }
+ }
+ seq_printf(file, " dest_channel: %u\n", fqd.dest.channel);
+ seq_printf(file, " dest_wq: %u\n", fqd.dest.wq);
+ seq_printf(file, " ics_cred: %u\n", fqd.ics_cred);
+ seq_printf(file, " td_mant: %u\n", fqd.td.mant);
+ seq_printf(file, " td_exp: %u\n", fqd.td.exp);
+
+ seq_printf(file, " ctx_b: 0x%x\n", fqd.context_b);
+
+ seq_printf(file, " ctx_a: 0x%llx\n", qm_fqd_stashing_get64(&fqd));
+ /* Any stashing configured */
+ if ((fqd.context_a.stashing.exclusive & 0x7) == 0)
+ seq_puts(file, " ctx_a_stash_exclusive: None\n");
+ else {
+ seq_puts(file, " ctx_a_stash_exclusive:\n");
+ i = 0;
+ while (stashing_text_list[i].txt != NULL) {
+ if ((fqd.fq_ctrl & 0x7) & stashing_text_list[i].mask)
+ seq_printf(file, " %s\n",
+ stashing_text_list[i].txt);
+ i++;
+ }
+ }
+ seq_printf(file, " ctx_a_stash_annotation_cl: %u\n",
+ fqd.context_a.stashing.annotation_cl);
+ seq_printf(file, " ctx_a_stash_data_cl: %u\n",
+ fqd.context_a.stashing.data_cl);
+ seq_printf(file, " ctx_a_stash_context_cl: %u\n",
+ fqd.context_a.stashing.context_cl);
+ return 0;
+}
+
+static int query_fq_fields_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, query_fq_fields_show, NULL);
+}
+
+static ssize_t query_fq_fields_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > MAX_FQID)
+ return -EINVAL;
+ query_fq_fields_data.fqid = (u32)val;
+ return count;
+}
+
+static const struct file_operations query_fq_fields_fops = {
+ .owner = THIS_MODULE,
+ .open = query_fq_fields_open,
+ .read = seq_read,
+ .write = query_fq_fields_write,
+ .release = single_release,
+};
+
+/*******************************************************************************
+ * Query WQ lengths
+ ******************************************************************************/
+struct query_wq_lengths_data_s {
+ union {
+ u16 channel_wq; /* ignores wq (3 lsbits) */
+ struct {
+ u16 id:13; /* qm_channel */
+ u16 __reserved:3;
+ } __packed channel;
+ };
+};
+static struct query_wq_lengths_data_s query_wq_lengths_data;
+static int query_wq_lengths_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_querywq wq;
+ int i;
+
+ memset(&wq, 0, sizeof(struct qm_mcr_querywq));
+ wq.channel.id = query_wq_lengths_data.channel.id;
+ ret = qman_query_wq(0, &wq);
+ if (ret)
+ return ret;
+ seq_printf(file, "Query Result For Channel: 0x%x\n", wq.channel.id);
+ for (i = 0; i < 8; i++)
+ /* mask out upper 4 bits since they are not part of length */
+ seq_printf(file, " wq%d_len : %u\n", i, wq.wq_len[i] & 0x0fff);
+ return 0;
+}
+
+static int query_wq_lengths_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, query_wq_lengths_show, NULL);
+}
+
+static ssize_t query_wq_lengths_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > 0xfff8)
+ return -EINVAL;
+ query_wq_lengths_data.channel.id = (u16)val;
+ return count;
+}
+
+static const struct file_operations query_wq_lengths_fops = {
+ .owner = THIS_MODULE,
+ .open = query_wq_lengths_open,
+ .read = seq_read,
+ .write = query_wq_lengths_write,
+ .release = single_release,
+};
+
+/*******************************************************************************
+ * Query CGR
+ ******************************************************************************/
+struct query_cgr_s {
+ u8 cgid;
+};
+static struct query_cgr_s query_cgr_data;
+
+static int query_cgr_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_querycgr cgrd;
+ struct qman_cgr cgr;
+ int i, j;
+ u32 mask;
+
+ memset(&cgr, 0, sizeof(cgr));
+ memset(&cgrd, 0, sizeof(cgrd));
+ cgr.cgrid = query_cgr_data.cgid;
+ ret = qman_query_cgr(&cgr, &cgrd);
+ if (ret)
+ return ret;
+ seq_printf(file, "Query CGR id 0x%x\n", cgr.cgrid);
+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ cgrd.cgr.wr_parm_g.MA, cgrd.cgr.wr_parm_g.Mn,
+ cgrd.cgr.wr_parm_g.SA, cgrd.cgr.wr_parm_g.Sn,
+ cgrd.cgr.wr_parm_g.Pn);
+
+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ cgrd.cgr.wr_parm_y.MA, cgrd.cgr.wr_parm_y.Mn,
+ cgrd.cgr.wr_parm_y.SA, cgrd.cgr.wr_parm_y.Sn,
+ cgrd.cgr.wr_parm_y.Pn);
+
+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ cgrd.cgr.wr_parm_r.MA, cgrd.cgr.wr_parm_r.Mn,
+ cgrd.cgr.wr_parm_r.SA, cgrd.cgr.wr_parm_r.Sn,
+ cgrd.cgr.wr_parm_r.Pn);
+
+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
+ cgrd.cgr.wr_en_g, cgrd.cgr.wr_en_y, cgrd.cgr.wr_en_r);
+
+ seq_printf(file, " cscn_en: %u\n", cgrd.cgr.cscn_en);
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30) {
+ seq_puts(file, " cscn_targ_dcp:\n");
+ mask = 0x80000000;
+ for (i = 0; i < 32; i++) {
+ if (cgrd.cgr.cscn_targ & mask)
+ seq_printf(file, " send CSCN to dcp %u\n",
+ (31 - i));
+ mask >>= 1;
+ }
+
+ seq_puts(file, " cscn_targ_swp:\n");
+ for (i = 0; i < 4; i++) {
+ mask = 0x80000000;
+ for (j = 0; j < 32; j++) {
+ if (cgrd.cscn_targ_swp[i] & mask)
+ seq_printf(file, " send CSCN to swp"
+ " %u\n", (127 - (i * 32) - j));
+ mask >>= 1;
+ }
+ }
+ } else {
+ seq_printf(file, " cscn_targ: %u\n", cgrd.cgr.cscn_targ);
+ }
+ seq_printf(file, " cstd_en: %u\n", cgrd.cgr.cstd_en);
+ seq_printf(file, " cs: %u\n", cgrd.cgr.cs);
+
+ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
+ cgrd.cgr.cs_thres.TA, cgrd.cgr.cs_thres.Tn);
+
+ seq_printf(file, " mode: %s\n",
+ (cgrd.cgr.mode & QMAN_CGR_MODE_FRAME) ?
+ "frame count" : "byte count");
+ seq_printf(file, " i_bcnt: %llu\n", qm_mcr_querycgr_i_get64(&cgrd));
+ seq_printf(file, " a_bcnt: %llu\n", qm_mcr_querycgr_a_get64(&cgrd));
+
+ return 0;
+}
+
+static int query_cgr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, query_cgr_show, NULL);
+}
+
+static ssize_t query_cgr_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > 0xff)
+ return -EINVAL;
+ query_cgr_data.cgid = (u8)val;
+ return count;
+}
+
+static const struct file_operations query_cgr_fops = {
+ .owner = THIS_MODULE,
+ .open = query_cgr_open,
+ .read = seq_read,
+ .write = query_cgr_write,
+ .release = single_release,
+};
+
+/*******************************************************************************
+ * Test Write CGR
+ ******************************************************************************/
+struct test_write_cgr_s {
+ u64 i_bcnt;
+ u8 cgid;
+};
+static struct test_write_cgr_s test_write_cgr_data;
+
+static int testwrite_cgr_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_cgrtestwrite result;
+ struct qman_cgr cgr;
+ u64 i_bcnt;
+
+ memset(&cgr, 0, sizeof(struct qman_cgr));
+ memset(&result, 0, sizeof(struct qm_mcr_cgrtestwrite));
+ cgr.cgrid = test_write_cgr_data.cgid;
+ i_bcnt = test_write_cgr_data.i_bcnt;
+ ret = qman_testwrite_cgr(&cgr, i_bcnt, &result);
+ if (ret)
+ return ret;
+ seq_printf(file, "CGR Test Write CGR id 0x%x\n", cgr.cgrid);
+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ result.cgr.wr_parm_g.MA, result.cgr.wr_parm_g.Mn,
+ result.cgr.wr_parm_g.SA, result.cgr.wr_parm_g.Sn,
+ result.cgr.wr_parm_g.Pn);
+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ result.cgr.wr_parm_y.MA, result.cgr.wr_parm_y.Mn,
+ result.cgr.wr_parm_y.SA, result.cgr.wr_parm_y.Sn,
+ result.cgr.wr_parm_y.Pn);
+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ result.cgr.wr_parm_r.MA, result.cgr.wr_parm_r.Mn,
+ result.cgr.wr_parm_r.SA, result.cgr.wr_parm_r.Sn,
+ result.cgr.wr_parm_r.Pn);
+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
+ result.cgr.wr_en_g, result.cgr.wr_en_y, result.cgr.wr_en_r);
+ seq_printf(file, " cscn_en: %u\n", result.cgr.cscn_en);
+ seq_printf(file, " cscn_targ: %u\n", result.cgr.cscn_targ);
+ seq_printf(file, " cstd_en: %u\n", result.cgr.cstd_en);
+ seq_printf(file, " cs: %u\n", result.cgr.cs);
+ seq_printf(file, " cs_thresh_TA: %u, cs_thresh_Tn: %u\n",
+ result.cgr.cs_thres.TA, result.cgr.cs_thres.Tn);
+
+ /* Add Mode for Si 2 */
+ seq_printf(file, " mode: %s\n",
+ (result.cgr.mode & QMAN_CGR_MODE_FRAME) ?
+ "frame count" : "byte count");
+
+ seq_printf(file, " i_bcnt: %llu\n",
+ qm_mcr_cgrtestwrite_i_get64(&result));
+ seq_printf(file, " a_bcnt: %llu\n",
+ qm_mcr_cgrtestwrite_a_get64(&result));
+ seq_printf(file, " wr_prob_g: %u\n", result.wr_prob_g);
+ seq_printf(file, " wr_prob_y: %u\n", result.wr_prob_y);
+ seq_printf(file, " wr_prob_r: %u\n", result.wr_prob_r);
+ return 0;
+}
+
+static int testwrite_cgr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, testwrite_cgr_show, NULL);
+}
+
+static const struct file_operations testwrite_cgr_fops = {
+ .owner = THIS_MODULE,
+ .open = testwrite_cgr_open,
+ .read = seq_read,
+ .release = single_release,
+};
+
+
+static int testwrite_cgr_ibcnt_show(struct seq_file *file, void *offset)
+{
+ seq_printf(file, "i_bcnt: %llu\n", test_write_cgr_data.i_bcnt);
+ return 0;
+}
+static int testwrite_cgr_ibcnt_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, testwrite_cgr_ibcnt_show, NULL);
+}
+
+static ssize_t testwrite_cgr_ibcnt_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ test_write_cgr_data.i_bcnt = val;
+ return count;
+}
+
+static const struct file_operations teswrite_cgr_ibcnt_fops = {
+ .owner = THIS_MODULE,
+ .open = testwrite_cgr_ibcnt_open,
+ .read = seq_read,
+ .write = testwrite_cgr_ibcnt_write,
+ .release = single_release,
+};
+
+static int testwrite_cgr_cgrid_show(struct seq_file *file, void *offset)
+{
+ seq_printf(file, "cgrid: %u\n", (u32)test_write_cgr_data.cgid);
+ return 0;
+}
+static int testwrite_cgr_cgrid_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, testwrite_cgr_cgrid_show, NULL);
+}
+
+static ssize_t testwrite_cgr_cgrid_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > 0xff)
+ return -EINVAL;
+ test_write_cgr_data.cgid = (u8)val;
+ return count;
+}
+
+static const struct file_operations teswrite_cgr_cgrid_fops = {
+ .owner = THIS_MODULE,
+ .open = testwrite_cgr_cgrid_open,
+ .read = seq_read,
+ .write = testwrite_cgr_cgrid_write,
+ .release = single_release,
+};
+
+/*******************************************************************************
+ * Query Congestion State
+ ******************************************************************************/
+static int query_congestion_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_querycongestion cs;
+ int i, j, in_cong = 0;
+ u32 mask;
+
+ memset(&cs, 0, sizeof(struct qm_mcr_querycongestion));
+ ret = qman_query_congestion(&cs);
+ if (ret)
+ return ret;
+ seq_puts(file, "Query Congestion Result\n");
+ for (i = 0; i < 8; i++) {
+ mask = 0x80000000;
+ for (j = 0; j < 32; j++) {
+ if (cs.state.__state[i] & mask) {
+ in_cong = 1;
+ seq_printf(file, " cg %u: %s\n", (i*32)+j,
+ "in congestion");
+ }
+ mask >>= 1;
+ }
+ }
+ if (!in_cong)
+ seq_puts(file, " All congestion groups not congested.\n");
+ return 0;
+}
+
+static int query_congestion_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, query_congestion_show, NULL);
+}
+
+static const struct file_operations query_congestion_fops = {
+ .owner = THIS_MODULE,
+ .open = query_congestion_open,
+ .read = seq_read,
+ .release = single_release,
+};
+
+/*******************************************************************************
+ * Query CCGR
+ ******************************************************************************/
+struct query_ccgr_s {
+ u32 ccgid;
+};
+static struct query_ccgr_s query_ccgr_data;
+
+static int query_ccgr_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_ceetm_ccgr_query ccgr_query;
+ struct qm_mcc_ceetm_ccgr_query query_opts;
+ int i, j;
+ u32 mask;
+
+ memset(&ccgr_query, 0, sizeof(struct qm_mcr_ceetm_ccgr_query));
+ memset(&query_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_query));
+
+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
+ return -EINVAL;
+
+ seq_printf(file, "Query CCGID %x\n", query_ccgr_data.ccgid);
+ query_opts.dcpid = ((query_ccgr_data.ccgid & 0xFF000000) >> 24);
+ query_opts.ccgrid = query_ccgr_data.ccgid & 0x000001FF;
+ ret = qman_ceetm_query_ccgr(&query_opts, &ccgr_query);
+ if (ret)
+ return ret;
+ seq_printf(file, "Query CCGR id %x in DCP %d\n", query_opts.ccgrid,
+ query_opts.dcpid);
+ seq_printf(file, " wr_parm_g MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ ccgr_query.cm_query.wr_parm_g.MA,
+ ccgr_query.cm_query.wr_parm_g.Mn,
+ ccgr_query.cm_query.wr_parm_g.SA,
+ ccgr_query.cm_query.wr_parm_g.Sn,
+ ccgr_query.cm_query.wr_parm_g.Pn);
+
+ seq_printf(file, " wr_parm_y MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ ccgr_query.cm_query.wr_parm_y.MA,
+ ccgr_query.cm_query.wr_parm_y.Mn,
+ ccgr_query.cm_query.wr_parm_y.SA,
+ ccgr_query.cm_query.wr_parm_y.Sn,
+ ccgr_query.cm_query.wr_parm_y.Pn);
+
+ seq_printf(file, " wr_parm_r MA: %u, Mn: %u, SA: %u, Sn: %u, Pn: %u\n",
+ ccgr_query.cm_query.wr_parm_r.MA,
+ ccgr_query.cm_query.wr_parm_r.Mn,
+ ccgr_query.cm_query.wr_parm_r.SA,
+ ccgr_query.cm_query.wr_parm_r.Sn,
+ ccgr_query.cm_query.wr_parm_r.Pn);
+
+ seq_printf(file, " wr_en_g: %u, wr_en_y: %u, we_en_r: %u\n",
+ ccgr_query.cm_query.ctl_wr_en_g,
+ ccgr_query.cm_query.ctl_wr_en_y,
+ ccgr_query.cm_query.ctl_wr_en_r);
+
+ seq_printf(file, " cscn_en: %u\n", ccgr_query.cm_query.ctl_cscn_en);
+ seq_puts(file, " cscn_targ_dcp:\n");
+ mask = 0x80000000;
+ for (i = 0; i < 32; i++) {
+ if (ccgr_query.cm_query.cscn_targ_dcp & mask)
+ seq_printf(file, " send CSCN to dcp %u\n", (31 - i));
+ mask >>= 1;
+ }
+
+ seq_puts(file, " cscn_targ_swp:\n");
+ for (i = 0; i < 4; i++) {
+ mask = 0x80000000;
+ for (j = 0; j < 32; j++) {
+ if (ccgr_query.cm_query.cscn_targ_swp[i] & mask)
+ seq_printf(file, " send CSCN to swp"
+ "%u\n", (127 - (i * 32) - j));
+ mask >>= 1;
+ }
+ }
+
+ seq_printf(file, " td_en: %u\n", ccgr_query.cm_query.ctl_td_en);
+
+ seq_printf(file, " cs_thresh_in_TA: %u, cs_thresh_in_Tn: %u\n",
+ ccgr_query.cm_query.cs_thres.TA,
+ ccgr_query.cm_query.cs_thres.Tn);
+
+ seq_printf(file, " cs_thresh_out_TA: %u, cs_thresh_out_Tn: %u\n",
+ ccgr_query.cm_query.cs_thres_x.TA,
+ ccgr_query.cm_query.cs_thres_x.Tn);
+
+ seq_printf(file, " td_thresh_TA: %u, td_thresh_Tn: %u\n",
+ ccgr_query.cm_query.td_thres.TA,
+ ccgr_query.cm_query.td_thres.Tn);
+
+ seq_printf(file, " mode: %s\n",
+ (ccgr_query.cm_query.ctl_mode &
+ QMAN_CGR_MODE_FRAME) ?
+ "frame count" : "byte count");
+ seq_printf(file, " i_cnt: %llu\n", (u64)ccgr_query.cm_query.i_cnt);
+ seq_printf(file, " a_cnt: %llu\n", (u64)ccgr_query.cm_query.a_cnt);
+
+ return 0;
+}
+
+static int query_ccgr_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, query_ccgr_show, NULL);
+}
+
+static ssize_t query_ccgr_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ query_ccgr_data.ccgid = val;
+ return count;
+}
+
+static const struct file_operations query_ccgr_fops = {
+ .owner = THIS_MODULE,
+ .open = query_ccgr_open,
+ .read = seq_read,
+ .write = query_ccgr_write,
+ .release = single_release,
+};
+/*******************************************************************************
+ * QMan register
+ ******************************************************************************/
+struct qman_register_s {
+ u32 val;
+};
+static struct qman_register_s qman_register_data;
+
+static void init_ccsrmempeek(void)
+{
+ struct device_node *dn;
+ const u32 *regaddr_p;
+
+ dn = of_find_compatible_node(NULL, NULL, "fsl,qman");
+ if (!dn) {
+ pr_info("No fsl,qman node\n");
+ return;
+ }
+ regaddr_p = of_get_address(dn, 0, &qman_ccsr_size, NULL);
+ if (!regaddr_p) {
+ of_node_put(dn);
+ return;
+ }
+ qman_ccsr_start = of_translate_address(dn, regaddr_p);
+ of_node_put(dn);
+}
+/* This function provides access to QMan ccsr memory map */
+static int qman_ccsrmempeek(u32 *val, u32 offset)
+{
+ void __iomem *addr;
+ u64 phys_addr;
+
+ if (!qman_ccsr_start)
+ return -EINVAL;
+
+ if (offset > (qman_ccsr_size - sizeof(u32)))
+ return -EINVAL;
+
+ phys_addr = qman_ccsr_start + offset;
+ addr = ioremap(phys_addr, sizeof(u32));
+ if (!addr) {
+ pr_err("ccsrmempeek, ioremap failed\n");
+ return -EINVAL;
+ }
+ *val = in_be32(addr);
+ iounmap(addr);
+ return 0;
+}
+
+static int qman_ccsrmempeek_show(struct seq_file *file, void *offset)
+{
+ u32 b;
+
+ qman_ccsrmempeek(&b, qman_register_data.val);
+ seq_printf(file, "QMan register offset = 0x%x\n",
+ qman_register_data.val);
+ seq_printf(file, "value = 0x%08x\n", b);
+
+ return 0;
+}
+
+static int qman_ccsrmempeek_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_ccsrmempeek_show, NULL);
+}
+
+static ssize_t qman_ccsrmempeek_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ /* multiple of 4 */
+ if (val > (qman_ccsr_size - sizeof(u32))) {
+ pr_info("Input 0x%lx > 0x%llx\n",
+ val, (qman_ccsr_size - sizeof(u32)));
+ return -EINVAL;
+ }
+ if (val & 0x3) {
+ pr_info("Input 0x%lx not multiple of 4\n", val);
+ return -EINVAL;
+ }
+ qman_register_data.val = val;
+ return count;
+}
+
+static const struct file_operations qman_ccsrmempeek_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_ccsrmempeek_open,
+ .read = seq_read,
+ .write = qman_ccsrmempeek_write,
+};
+
+/*******************************************************************************
+ * QMan state
+ ******************************************************************************/
+static int qman_fqd_state_show(struct seq_file *file, void *offset)
+{
+ struct qm_mcr_queryfq_np np;
+ struct qman_fq fq;
+ struct line_buffer_fq line_buf;
+ int ret, i;
+ u8 *state = file->private;
+ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
+
+ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
+ memset(&line_buf, 0, sizeof(line_buf));
+
+ seq_printf(file, "List of fq ids in state: %s\n", state_txt[*state]);
+
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ ret = qman_query_fq_np(&fq, &np);
+ if (ret)
+ return ret;
+ if (*state == (np.state & QM_MCR_NP_STATE_MASK))
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ /* Keep a summary count of all states */
+ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
+ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
+ }
+ flush_line_buffer(&line_buf, file);
+
+ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
+ seq_printf(file, "%s count = %u\n", state_txt[i],
+ qm_fq_state_cnt[i]);
+ }
+ return 0;
+}
+
+static int qman_fqd_state_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_state_show, inode->i_private);
+}
+
+static const struct file_operations qman_fqd_state_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_state_open,
+ .read = seq_read,
+};
+
+static int qman_fqd_ctrl_show(struct seq_file *file, void *offset)
+{
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ u32 fq_en_cnt = 0, fq_di_cnt = 0;
+ int ret, i;
+ struct mask_filter_s *data = file->private;
+ const char *ctrl_txt = get_fqd_ctrl_text(data->mask);
+ struct line_buffer_fq line_buf;
+
+ memset(&line_buf, 0, sizeof(line_buf));
+ seq_printf(file, "List of fq ids with: %s :%s\n",
+ ctrl_txt, (data->filter) ? "enabled" : "disabled");
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret)
+ return ret;
+ if (data->filter) {
+ if (fqd.fq_ctrl & data->mask)
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ } else {
+ if (!(fqd.fq_ctrl & data->mask))
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ }
+ if (fqd.fq_ctrl & data->mask)
+ fq_en_cnt++;
+ else
+ fq_di_cnt++;
+ }
+ flush_line_buffer(&line_buf, file);
+
+ seq_printf(file, "Total FQD with: %s : enabled = %u\n",
+ ctrl_txt, fq_en_cnt);
+ seq_printf(file, "Total FQD with: %s : disabled = %u\n",
+ ctrl_txt, fq_di_cnt);
+ return 0;
+}
+
+/*******************************************************************************
+ * QMan ctrl CGE, TDE, ORP, CTX, CPC, SFDR, BLOCK, HOLD, CACHE
+ ******************************************************************************/
+static int qman_fqd_ctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_ctrl_show, inode->i_private);
+}
+
+static const struct file_operations qman_fqd_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_ctrl_open,
+ .read = seq_read,
+};
+
+/*******************************************************************************
+ * QMan ctrl summary
+ ******************************************************************************/
+/*******************************************************************************
+ * QMan summary state
+ ******************************************************************************/
+static int qman_fqd_non_prog_summary_show(struct seq_file *file, void *offset)
+{
+ struct qm_mcr_queryfq_np np;
+ struct qman_fq fq;
+ int ret, i;
+ u32 qm_fq_state_cnt[ARRAY_SIZE(fqd_states)];
+
+ memset(qm_fq_state_cnt, 0, sizeof(qm_fq_state_cnt));
+
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ ret = qman_query_fq_np(&fq, &np);
+ if (ret)
+ return ret;
+ /* Keep a summary count of all states */
+ if ((np.state & QM_MCR_NP_STATE_MASK) < ARRAY_SIZE(fqd_states))
+ qm_fq_state_cnt[(np.state & QM_MCR_NP_STATE_MASK)]++;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(fqd_states); i++) {
+ seq_printf(file, "%s count = %u\n", state_txt[i],
+ qm_fq_state_cnt[i]);
+ }
+ return 0;
+}
+
+static int qman_fqd_prog_summary_show(struct seq_file *file, void *offset)
+{
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ int ret, i , j;
+ u32 qm_prog_cnt[ARRAY_SIZE(mask_filter)/2];
+
+ memset(qm_prog_cnt, 0, sizeof(qm_prog_cnt));
+
+ for (i = 1; i < fqid_max; i++) {
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ fq.fqid = i;
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret)
+ return ret;
+ /* Keep a summary count of all states */
+ for (j = 0; j < ARRAY_SIZE(mask_filter); j += 2)
+ if ((fqd.fq_ctrl & QM_FQCTRL_MASK) &
+ mask_filter[j].mask)
+ qm_prog_cnt[j/2]++;
+ }
+ for (i = 0; i < ARRAY_SIZE(mask_filter) / 2; i++) {
+ seq_printf(file, "%s count = %u\n",
+ get_fqd_ctrl_text(mask_filter[i*2].mask),
+ qm_prog_cnt[i]);
+ }
+ return 0;
+}
+
+static int qman_fqd_summary_show(struct seq_file *file, void *offset)
+{
+ int ret;
+
+ /* Display summary of non programmable fields */
+ ret = qman_fqd_non_prog_summary_show(file, offset);
+ if (ret)
+ return ret;
+ seq_puts(file, "-----------------------------------------\n");
+ /* Display programmable fields */
+ ret = qman_fqd_prog_summary_show(file, offset);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static int qman_fqd_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_summary_show, NULL);
+}
+
+static const struct file_operations qman_fqd_summary_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_summary_open,
+ .read = seq_read,
+};
+
+/*******************************************************************************
+ * QMan destination work queue
+ ******************************************************************************/
+struct qman_dest_wq_s {
+ u16 wq_id;
+};
+static struct qman_dest_wq_s qman_dest_wq_data = {
+ .wq_id = 0,
+};
+
+static int qman_fqd_dest_wq_show(struct seq_file *file, void *offset)
+{
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ int ret, i;
+ u16 *wq, wq_id = qman_dest_wq_data.wq_id;
+ struct line_buffer_fq line_buf;
+
+ memset(&line_buf, 0, sizeof(line_buf));
+ /* use vmalloc : need to allocate large memory region and don't
+ * require the memory to be physically contiguous. */
+ wq = vzalloc(sizeof(u16) * (0xFFFF+1));
+ if (!wq)
+ return -ENOMEM;
+
+ seq_printf(file, "List of fq ids with destination work queue id"
+ " = 0x%x\n", wq_id);
+
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret) {
+ vfree(wq);
+ return ret;
+ }
+ if (wq_id == fqd.dest_wq)
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ wq[fqd.dest_wq]++;
+ }
+ flush_line_buffer(&line_buf, file);
+
+ seq_puts(file, "Summary of all FQD destination work queue values\n");
+ for (i = 0; i < 0xFFFF; i++) {
+ if (wq[i])
+ seq_printf(file, "Channel: 0x%x WQ: 0x%x WQ_ID: 0x%x, "
+ "count = %u\n", i >> 3, i & 0x3, i, wq[i]);
+ }
+ vfree(wq);
+ return 0;
+}
+
+static ssize_t qman_fqd_dest_wq_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > 0xFFFF)
+ return -EINVAL;
+ qman_dest_wq_data.wq_id = val;
+ return count;
+}
+
+static int qman_fqd_dest_wq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_dest_wq_show, NULL);
+}
+
+static const struct file_operations qman_fqd_dest_wq_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_dest_wq_open,
+ .read = seq_read,
+ .write = qman_fqd_dest_wq_write,
+};
+
+/*******************************************************************************
+ * QMan Intra-Class Scheduling Credit
+ ******************************************************************************/
+static int qman_fqd_cred_show(struct seq_file *file, void *offset)
+{
+ struct qm_fqd fqd;
+ struct qman_fq fq;
+ int ret, i;
+ u32 fq_cnt = 0;
+ struct line_buffer_fq line_buf;
+
+ memset(&line_buf, 0, sizeof(line_buf));
+ seq_puts(file, "List of fq ids with Intra-Class Scheduling Credit > 0"
+ "\n");
+
+ for (i = 1; i < fqid_max; i++) {
+ fq.fqid = i;
+ memset(&fqd, 0, sizeof(struct qm_fqd));
+ ret = qman_query_fq(&fq, &fqd);
+ if (ret)
+ return ret;
+ if (fqd.ics_cred > 0) {
+ add_to_line_buffer(&line_buf, fq.fqid, file);
+ fq_cnt++;
+ }
+ }
+ flush_line_buffer(&line_buf, file);
+
+ seq_printf(file, "Total FQD with ics_cred > 0 = %d\n", fq_cnt);
+ return 0;
+}
+
+static int qman_fqd_cred_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qman_fqd_cred_show, NULL);
+}
+
+static const struct file_operations qman_fqd_cred_fops = {
+ .owner = THIS_MODULE,
+ .open = qman_fqd_cred_open,
+ .read = seq_read,
+};
+
+/*******************************************************************************
+ * Class Queue Fields
+ ******************************************************************************/
+struct query_cq_fields_data_s {
+ u32 cqid;
+};
+
+static struct query_cq_fields_data_s query_cq_fields_data = {
+ .cqid = 1,
+};
+
+static int query_cq_fields_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ struct qm_mcr_ceetm_cq_query query_result;
+ unsigned int cqid;
+ unsigned int portal;
+
+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30)
+ return -EINVAL;
+
+ cqid = query_cq_fields_data.cqid & 0x00FFFFFF;
+ portal = query_cq_fields_data.cqid >> 24;
+ if (portal > qm_dc_portal_fman1)
+ return -EINVAL;
+
+ ret = qman_ceetm_query_cq(cqid, portal, &query_result);
+ if (ret)
+ return ret;
+ seq_printf(file, "Query CQ Fields Result cqid 0x%x on DCP %d\n",
+ cqid, portal);
+ seq_printf(file, " ccgid: %u\n", query_result.ccgid);
+ seq_printf(file, " state: %u\n", query_result.state);
+ seq_printf(file, " pfdr_hptr: %u\n", query_result.pfdr_hptr);
+ seq_printf(file, " pfdr_tptr: %u\n", query_result.pfdr_tptr);
+ seq_printf(file, " od1_xsfdr: %u\n", query_result.od1_xsfdr);
+ seq_printf(file, " od2_xsfdr: %u\n", query_result.od2_xsfdr);
+ seq_printf(file, " od3_xsfdr: %u\n", query_result.od3_xsfdr);
+ seq_printf(file, " od4_xsfdr: %u\n", query_result.od4_xsfdr);
+ seq_printf(file, " od5_xsfdr: %u\n", query_result.od5_xsfdr);
+ seq_printf(file, " od6_xsfdr: %u\n", query_result.od6_xsfdr);
+ seq_printf(file, " ra1_xsfdr: %u\n", query_result.ra1_xsfdr);
+ seq_printf(file, " ra2_xsfdr: %u\n", query_result.ra2_xsfdr);
+ seq_printf(file, " frame_count: %u\n", query_result.frm_cnt);
+
+ return 0;
+}
+
+static int query_cq_fields_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, query_cq_fields_show, NULL);
+}
+
+static ssize_t query_cq_fields_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ query_cq_fields_data.cqid = (u32)val;
+ return count;
+}
+
+static const struct file_operations query_cq_fields_fops = {
+ .owner = THIS_MODULE,
+ .open = query_cq_fields_open,
+ .read = seq_read,
+ .write = query_cq_fields_write,
+ .release = single_release,
+};
+
+/*******************************************************************************
+ * READ CEETM_XSFDR_IN_USE
+ ******************************************************************************/
+struct query_ceetm_xsfdr_data_s {
+ enum qm_dc_portal dcp_portal;
+};
+
+static struct query_ceetm_xsfdr_data_s query_ceetm_xsfdr_data;
+
+static int query_ceetm_xsfdr_show(struct seq_file *file, void *offset)
+{
+ int ret;
+ unsigned int xsfdr_in_use;
+ enum qm_dc_portal portal;
+
+
+ if (qman_ip_rev < QMAN_REV31)
+ return -EINVAL;
+
+ portal = query_ceetm_xsfdr_data.dcp_portal;
+ ret = qman_ceetm_get_xsfdr(portal, &xsfdr_in_use);
+ if (ret) {
+ seq_printf(file, "Read CEETM_XSFDR_IN_USE on DCP %d failed\n",
+ portal);
+ return ret;
+ }
+
+ seq_printf(file, "DCP%d: CEETM_XSFDR_IN_USE number is %u\n", portal,
+ (xsfdr_in_use & 0x1FFF));
+ return 0;
+}
+
+static int query_ceetm_xsfdr_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, query_ceetm_xsfdr_show, NULL);
+}
+
+static ssize_t query_ceetm_xsfdr_write(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ int ret;
+ unsigned long val;
+
+ ret = user_input_convert(buf, count, &val);
+ if (ret)
+ return ret;
+ if (val > qm_dc_portal_fman1)
+ return -EINVAL;
+ query_ceetm_xsfdr_data.dcp_portal = (u32)val;
+ return count;
+}
+
+static const struct file_operations query_ceetm_xsfdr_fops = {
+ .owner = THIS_MODULE,
+ .open = query_ceetm_xsfdr_open,
+ .read = seq_read,
+ .write = query_ceetm_xsfdr_write,
+ .release = single_release,
+};
+
+/* helper macros used in qman_debugfs_module_init */
+#define QMAN_DBGFS_ENTRY(name, mode, parent, data, fops) \
+ do { \
+ d = debugfs_create_file(name, \
+ mode, parent, \
+ data, \
+ fops); \
+ if (d == NULL) { \
+ ret = -ENOMEM; \
+ goto _return; \
+ } \
+ } while (0)
+
+/* dfs_root as parent */
+#define QMAN_DBGFS_ENTRY_ROOT(name, mode, data, fops) \
+ QMAN_DBGFS_ENTRY(name, mode, dfs_root, data, fops)
+
+/* fqd_root as parent */
+#define QMAN_DBGFS_ENTRY_FQDROOT(name, mode, data, fops) \
+ QMAN_DBGFS_ENTRY(name, mode, fqd_root, data, fops)
+
+/* fqd state */
+#define QMAN_DBGFS_ENTRY_FQDSTATE(name, index) \
+ QMAN_DBGFS_ENTRY_FQDROOT(name, S_IRUGO, \
+ (void *)&mask_filter[index], &qman_fqd_ctrl_fops)
+
+static int __init qman_debugfs_module_init(void)
+{
+ int ret = 0;
+ struct dentry *d, *fqd_root;
+ u32 reg;
+
+ fqid_max = 0;
+ init_ccsrmempeek();
+ if (!qman_ccsr_start) {
+ /* No QMan node found in device tree */
+ return 0;
+ }
+
+ if (!qman_ccsrmempeek(&reg, QM_FQD_AR)) {
+ /* extract the size of the FQD window */
+ reg = reg & 0x3f;
+ /* calculate valid frame queue descriptor range */
+ fqid_max = (1 << (reg + 1)) / QM_FQD_BLOCK_SIZE;
+ }
+ dfs_root = debugfs_create_dir("qman", NULL);
+ fqd_root = debugfs_create_dir("fqd", dfs_root);
+ if (dfs_root == NULL || fqd_root == NULL) {
+ ret = -ENOMEM;
+ pr_err("Cannot create qman/fqd debugfs dir\n");
+ goto _return;
+ }
+ if (fqid_max) {
+ QMAN_DBGFS_ENTRY_ROOT("ccsrmempeek", S_IRUGO | S_IWUGO,
+ NULL, &qman_ccsrmempeek_fops);
+ }
+ QMAN_DBGFS_ENTRY_ROOT("query_fq_np_fields", S_IRUGO | S_IWUGO,
+ &query_fq_np_fields_data, &query_fq_np_fields_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("query_fq_fields", S_IRUGO | S_IWUGO,
+ &query_fq_fields_data, &query_fq_fields_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("query_wq_lengths", S_IRUGO | S_IWUGO,
+ &query_wq_lengths_data, &query_wq_lengths_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("query_cgr", S_IRUGO | S_IWUGO,
+ &query_cgr_data, &query_cgr_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("query_congestion", S_IRUGO,
+ NULL, &query_congestion_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr", S_IRUGO,
+ NULL, &testwrite_cgr_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_cgrid", S_IRUGO | S_IWUGO,
+ NULL, &teswrite_cgr_cgrid_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("testwrite_cgr_ibcnt", S_IRUGO | S_IWUGO,
+ NULL, &teswrite_cgr_ibcnt_fops);
+
+ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_ccgr", S_IRUGO | S_IWUGO,
+ &query_ccgr_data, &query_ccgr_fops);
+ /* Create files with fqd_root as parent */
+
+ QMAN_DBGFS_ENTRY_FQDROOT("stateoos", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_OOS], &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_retired", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_RETIRED],
+ &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_tentatively_sched", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_TEN_SCHED],
+ &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_truly_sched", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_TRU_SCHED],
+ &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_parked", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_PARKED],
+ &qman_fqd_state_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("state_active", S_IRUGO,
+ (void *)&fqd_states[QM_MCR_NP_STATE_ACTIVE],
+ &qman_fqd_state_fops);
+ QMAN_DBGFS_ENTRY_ROOT("query_cq_fields", S_IRUGO | S_IWUGO,
+ &query_cq_fields_data, &query_cq_fields_fops);
+ QMAN_DBGFS_ENTRY_ROOT("query_ceetm_xsfdr_in_use", S_IRUGO | S_IWUGO,
+ &query_ceetm_xsfdr_data, &query_ceetm_xsfdr_fops);
+
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("cge_enable", 17);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("cge_disable", 16);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("tde_enable", 15);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("tde_disable", 14);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("orp_enable", 13);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("orp_disable", 12);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_enable", 11);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("ctx_a_stashing_disable", 10);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_enable", 9);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("cpc_disable", 8);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_enable", 7);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("sfdr_disable", 6);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_enable", 5);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("avoid_blocking_disable", 4);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_enable", 3);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("hold_active_disable", 2);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_enable", 1);
+
+ QMAN_DBGFS_ENTRY_FQDSTATE("prefer_in_cache_disable", 0);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("summary", S_IRUGO,
+ NULL, &qman_fqd_summary_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("wq", S_IRUGO | S_IWUGO,
+ NULL, &qman_fqd_dest_wq_fops);
+
+ QMAN_DBGFS_ENTRY_FQDROOT("cred", S_IRUGO,
+ NULL, &qman_fqd_cred_fops);
+
+ return 0;
+
+_return:
+ debugfs_remove_recursive(dfs_root);
+ return ret;
+}
+
+static void __exit qman_debugfs_module_exit(void)
+{
+ debugfs_remove_recursive(dfs_root);
+}
+
+module_init(qman_debugfs_module_init);
+module_exit(qman_debugfs_module_exit);
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/staging/fsl_qbman/qman_driver.c b/drivers/staging/fsl_qbman/qman_driver.c
new file mode 100644
index 000000000000..b55ded540243
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_driver.c
@@ -0,0 +1,962 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_private.h"
+
+#include <asm/smp.h> /* hard_smp_processor_id() if !CONFIG_SMP */
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#endif
+
+/* Global variable containing revision id (even on non-control plane systems
+ * where CCSR isn't available) */
+u16 qman_ip_rev;
+EXPORT_SYMBOL(qman_ip_rev);
+u8 qman_ip_cfg;
+EXPORT_SYMBOL(qman_ip_cfg);
+u16 qm_channel_pool1 = QMAN_CHANNEL_POOL1;
+EXPORT_SYMBOL(qm_channel_pool1);
+u16 qm_channel_caam = QMAN_CHANNEL_CAAM;
+EXPORT_SYMBOL(qm_channel_caam);
+u16 qm_channel_pme = QMAN_CHANNEL_PME;
+EXPORT_SYMBOL(qm_channel_pme);
+u16 qm_channel_dce = QMAN_CHANNEL_DCE;
+EXPORT_SYMBOL(qm_channel_dce);
+u16 qman_portal_max;
+EXPORT_SYMBOL(qman_portal_max);
+
+u32 qman_clk;
+struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
+/* the qman ceetm instances on the given SoC */
+u8 num_ceetms;
+
+/* For these variables, and the portal-initialisation logic, the
+ * comments in bman_driver.c apply here so won't be repeated. */
+static struct qman_portal *shared_portals[NR_CPUS];
+static int num_shared_portals;
+static int shared_portals_idx;
+static LIST_HEAD(unused_pcfgs);
+static DEFINE_SPINLOCK(unused_pcfgs_lock);
+
+/* A SDQCR mask comprising all the available/visible pool channels */
+static u32 pools_sdqcr;
+
+#define STR_ERR_NOPROP "No '%s' property in node %s\n"
+#define STR_ERR_CELL "'%s' is not a %d-cell range in node %s\n"
+#define STR_FQID_RANGE "fsl,fqid-range"
+#define STR_POOL_CHAN_RANGE "fsl,pool-channel-range"
+#define STR_CGRID_RANGE "fsl,cgrid-range"
+
+/* A "fsl,fqid-range" node; release the given range to the allocator */
+static __init int fsl_fqid_range_init(struct device_node *node)
+{
+ int ret;
+ const u32 *range = of_get_property(node, STR_FQID_RANGE, &ret);
+ if (!range) {
+ pr_err(STR_ERR_NOPROP, STR_FQID_RANGE, node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err(STR_ERR_CELL, STR_FQID_RANGE, 2, node->full_name);
+ return -EINVAL;
+ }
+ qman_seed_fqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ pr_info("Qman: FQID allocator includes range %d:%d\n",
+ be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ return 0;
+}
+
+/* A "fsl,pool-channel-range" node; add to the SDQCR mask only */
+static __init int fsl_pool_channel_range_sdqcr(struct device_node *node)
+{
+ int ret;
+ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
+ if (!chanid) {
+ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
+ return -EINVAL;
+ }
+ for (ret = 0; ret < be32_to_cpu(chanid[1]); ret++)
+ pools_sdqcr |= QM_SDQCR_CHANNELS_POOL_CONV(be32_to_cpu(chanid[0]) + ret);
+ return 0;
+}
+
+/* A "fsl,pool-channel-range" node; release the given range to the allocator */
+static __init int fsl_pool_channel_range_init(struct device_node *node)
+{
+ int ret;
+ const u32 *chanid = of_get_property(node, STR_POOL_CHAN_RANGE, &ret);
+ if (!chanid) {
+ pr_err(STR_ERR_NOPROP, STR_POOL_CHAN_RANGE, node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err(STR_ERR_CELL, STR_POOL_CHAN_RANGE, 1, node->full_name);
+ return -EINVAL;
+ }
+ qman_seed_pool_range(be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
+ pr_info("Qman: pool channel allocator includes range %d:%d\n",
+ be32_to_cpu(chanid[0]), be32_to_cpu(chanid[1]));
+ return 0;
+}
+
+/* A "fsl,cgrid-range" node; release the given range to the allocator */
+static __init int fsl_cgrid_range_init(struct device_node *node)
+{
+ struct qman_cgr cgr;
+ int ret, errors = 0;
+ const u32 *range = of_get_property(node, STR_CGRID_RANGE, &ret);
+ if (!range) {
+ pr_err(STR_ERR_NOPROP, STR_CGRID_RANGE, node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err(STR_ERR_CELL, STR_CGRID_RANGE, 2, node->full_name);
+ return -EINVAL;
+ }
+ qman_seed_cgrid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ pr_info("Qman: CGRID allocator includes range %d:%d\n",
+ be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ for (cgr.cgrid = 0; cgr.cgrid < __CGR_NUM; cgr.cgrid++) {
+ ret = qman_modify_cgr(&cgr, QMAN_CGR_FLAG_USE_INIT, NULL);
+ if (ret)
+ errors++;
+ }
+ if (errors)
+ pr_err("Warning: %d error%s while initialising CGRs %d:%d\n",
+ errors, (errors > 1) ? "s" : "", range[0], range[1]);
+ return 0;
+}
+
+static __init int fsl_ceetm_init(struct device_node *node)
+{
+ enum qm_dc_portal dcp_portal;
+ struct qm_ceetm_sp *sp;
+ struct qm_ceetm_lni *lni;
+ int ret, i;
+ const u32 *range;
+
+ /* Find LFQID range */
+ range = of_get_property(node, "fsl,ceetm-lfqid-range", &ret);
+ if (!range) {
+ pr_err("No fsl,ceetm-lfqid-range in node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err("fsl,ceetm-lfqid-range is not a 2-cell range in node"
+ " %s\n", node->full_name);
+ return -EINVAL;
+ }
+
+ dcp_portal = (be32_to_cpu(range[0]) & 0x0F0000) >> 16;
+ if (dcp_portal > qm_dc_portal_fman1) {
+ pr_err("The DCP portal %d doesn't support CEETM\n", dcp_portal);
+ return -EINVAL;
+ }
+
+ if (dcp_portal == qm_dc_portal_fman0)
+ qman_seed_ceetm0_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ if (dcp_portal == qm_dc_portal_fman1)
+ qman_seed_ceetm1_lfqid_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ pr_debug("Qman: The lfqid allocator of CEETM %d includes range"
+ " 0x%x:0x%x\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+
+ qman_ceetms[dcp_portal].idx = dcp_portal;
+ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].sub_portals);
+ INIT_LIST_HEAD(&qman_ceetms[dcp_portal].lnis);
+
+ /* Find Sub-portal range */
+ range = of_get_property(node, "fsl,ceetm-sp-range", &ret);
+ if (!range) {
+ pr_err("No fsl,ceetm-sp-range in node %s\n", node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err("fsl,ceetm-sp-range is not a 2-cell range in node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < be32_to_cpu(range[1]); i++) {
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (!sp) {
+ pr_err("Can't alloc memory for sub-portal %d\n",
+ range[0] + i);
+ return -ENOMEM;
+ }
+ sp->idx = be32_to_cpu(range[0]) + i;
+ sp->dcp_idx = dcp_portal;
+ sp->is_claimed = 0;
+ list_add_tail(&sp->node, &qman_ceetms[dcp_portal].sub_portals);
+ sp++;
+ }
+ pr_debug("Qman: Reserve sub-portal %d:%d for CEETM %d\n",
+ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
+ qman_ceetms[dcp_portal].sp_range[0] = be32_to_cpu(range[0]);
+ qman_ceetms[dcp_portal].sp_range[1] = be32_to_cpu(range[1]);
+
+ /* Find LNI range */
+ range = of_get_property(node, "fsl,ceetm-lni-range", &ret);
+ if (!range) {
+ pr_err("No fsl,ceetm-lni-range in node %s\n", node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err("fsl,ceetm-lni-range is not a 2-cell range in node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < be32_to_cpu(range[1]); i++) {
+ lni = kzalloc(sizeof(*lni), GFP_KERNEL);
+ if (!lni) {
+ pr_err("Can't alloc memory for LNI %d\n",
+ range[0] + i);
+ return -ENOMEM;
+ }
+ lni->idx = be32_to_cpu(range[0]) + i;
+ lni->dcp_idx = dcp_portal;
+ lni->is_claimed = 0;
+ INIT_LIST_HEAD(&lni->channels);
+ list_add_tail(&lni->node, &qman_ceetms[dcp_portal].lnis);
+ lni++;
+ }
+ pr_debug("Qman: Reserve LNI %d:%d for CEETM %d\n",
+ be32_to_cpu(range[0]), be32_to_cpu(range[1]), dcp_portal);
+ qman_ceetms[dcp_portal].lni_range[0] = be32_to_cpu(range[0]);
+ qman_ceetms[dcp_portal].lni_range[1] = be32_to_cpu(range[1]);
+
+ /* Find CEETM channel range */
+ range = of_get_property(node, "fsl,ceetm-channel-range", &ret);
+ if (!range) {
+ pr_err("No fsl,ceetm-channel-range in node %s\n",
+ node->full_name);
+ return -EINVAL;
+ }
+ if (ret != 8) {
+ pr_err("fsl,ceetm-channel-range is not a 2-cell range in node"
+ "%s\n", node->full_name);
+ return -EINVAL;
+ }
+
+ if (dcp_portal == qm_dc_portal_fman0)
+ qman_seed_ceetm0_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ if (dcp_portal == qm_dc_portal_fman1)
+ qman_seed_ceetm1_channel_range(be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+ pr_debug("Qman: The channel allocator of CEETM %d includes"
+ " range %d:%d\n", dcp_portal, be32_to_cpu(range[0]), be32_to_cpu(range[1]));
+
+ /* Set CEETM PRES register */
+ ret = qman_ceetm_set_prescaler(dcp_portal);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+static void qman_get_ip_revision(struct device_node *dn)
+{
+ u16 ip_rev = 0;
+ u8 ip_cfg = QMAN_REV_CFG_0;
+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+ if (!of_device_is_available(dn))
+ continue;
+ if (of_device_is_compatible(dn, "fsl,qman-portal-1.0") ||
+ of_device_is_compatible(dn, "fsl,qman-portal-1.0.0")) {
+ pr_err("QMAN rev1.0 on P4080 rev1 is not supported!\n");
+ BUG_ON(1);
+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.1") ||
+ of_device_is_compatible(dn, "fsl,qman-portal-1.1.0")) {
+ ip_rev = QMAN_REV11;
+ qman_portal_max = 10;
+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-1.2") ||
+ of_device_is_compatible(dn, "fsl,qman-portal-1.2.0")) {
+ ip_rev = QMAN_REV12;
+ qman_portal_max = 10;
+ } else if (of_device_is_compatible(dn, "fsl,qman-portal-2.0") ||
+ of_device_is_compatible(dn, "fsl,qman-portal-2.0.0")) {
+ ip_rev = QMAN_REV20;
+ qman_portal_max = 3;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.0.0")) {
+ ip_rev = QMAN_REV30;
+ qman_portal_max = 50;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.0.1")) {
+ ip_rev = QMAN_REV30;
+ qman_portal_max = 25;
+ ip_cfg = QMAN_REV_CFG_1;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.1.0")) {
+ ip_rev = QMAN_REV31;
+ qman_portal_max = 50;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.1.1")) {
+ ip_rev = QMAN_REV31;
+ qman_portal_max = 25;
+ ip_cfg = QMAN_REV_CFG_1;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.1.2")) {
+ ip_rev = QMAN_REV31;
+ qman_portal_max = 18;
+ ip_cfg = QMAN_REV_CFG_2;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.1.3")) {
+ ip_rev = QMAN_REV31;
+ qman_portal_max = 10;
+ ip_cfg = QMAN_REV_CFG_3;
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.2.0")) {
+ ip_rev = QMAN_REV32;
+ qman_portal_max = 10;
+ ip_cfg = QMAN_REV_CFG_3; // TODO: Verify for ls1043
+ } else if (of_device_is_compatible(dn,
+ "fsl,qman-portal-3.2.1")) {
+ ip_rev = QMAN_REV32;
+ qman_portal_max = 10;
+ ip_cfg = QMAN_REV_CFG_3;
+ } else {
+ pr_warn("unknown QMan version in portal node,"
+ "default to rev1.1\n");
+ ip_rev = QMAN_REV11;
+ qman_portal_max = 10;
+ }
+
+ if (!qman_ip_rev) {
+ if (ip_rev) {
+ qman_ip_rev = ip_rev;
+ qman_ip_cfg = ip_cfg;
+ } else {
+ pr_warn("unknown Qman version,"
+ " default to rev1.1\n");
+ qman_ip_rev = QMAN_REV11;
+ qman_ip_cfg = QMAN_REV_CFG_0;
+ }
+ } else if (ip_rev && (qman_ip_rev != ip_rev))
+ pr_warn("Revision=0x%04x, but portal '%s' has"
+ " 0x%04x\n",
+ qman_ip_rev, dn->full_name, ip_rev);
+ if (qman_ip_rev == ip_rev)
+ break;
+ }
+}
+
+/* Parse a portal node, perform generic mapping duties and return the config. It
+ * is not known at this stage for what purpose (or even if) the portal will be
+ * used. */
+static struct qm_portal_config * __init parse_pcfg(struct device_node *node)
+{
+ struct qm_portal_config *pcfg;
+ const u32 *index_p;
+ u32 index, channel;
+ int irq, ret;
+ resource_size_t len;
+
+ pcfg = kmalloc(sizeof(*pcfg), GFP_KERNEL);
+ if (!pcfg) {
+ pr_err("can't allocate portal config");
+ return NULL;
+ }
+
+ /*
+ * This is a *horrible hack*, but the IOMMU/PAMU driver needs a
+ * 'struct device' in order to get the PAMU stashing setup and the QMan
+ * portal [driver] won't function at all without ring stashing
+ *
+ * Making the QMan portal driver nice and proper is part of the
+ * upstreaming effort
+ */
+ pcfg->dev.bus = &platform_bus_type;
+ pcfg->dev.of_node = node;
+#ifdef CONFIG_FSL_PAMU
+ pcfg->dev.archdata.iommu_domain = NULL;
+#endif
+
+ ret = of_address_to_resource(node, DPA_PORTAL_CE,
+ &pcfg->addr_phys[DPA_PORTAL_CE]);
+ if (ret) {
+ pr_err("Can't get %s property '%s'\n", node->full_name,
+ "reg::CE");
+ goto err;
+ }
+ ret = of_address_to_resource(node, DPA_PORTAL_CI,
+ &pcfg->addr_phys[DPA_PORTAL_CI]);
+ if (ret) {
+ pr_err("Can't get %s property '%s'\n", node->full_name,
+ "reg::CI");
+ goto err;
+ }
+ index_p = of_get_property(node, "cell-index", &ret);
+ if (!index_p || (ret != 4)) {
+ pr_err("Can't get %s property '%s'\n", node->full_name,
+ "cell-index");
+ goto err;
+ }
+ index = be32_to_cpu(*index_p);
+ if (index >= qman_portal_max) {
+ pr_err("QMan portal index %d is beyond max (%d)\n",
+ index, qman_portal_max);
+ goto err;
+ }
+
+ channel = index + QM_CHANNEL_SWPORTAL0;
+ pcfg->public_cfg.channel = channel;
+ pcfg->public_cfg.cpu = -1;
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq == 0) {
+ pr_err("Can't get %s property '%s'\n", node->full_name,
+ "interrupts");
+ goto err;
+ }
+ pcfg->public_cfg.irq = irq;
+ pcfg->public_cfg.index = index;
+#ifdef CONFIG_FSL_QMAN_CONFIG
+ /* We need the same LIODN offset for all portals */
+ qman_liodn_fixup(pcfg->public_cfg.channel);
+#endif
+
+ len = resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]);
+ if (len != (unsigned long)len)
+ goto err;
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ pcfg->addr_virt[DPA_PORTAL_CE] = ioremap_cache_ns(
+ pcfg->addr_phys[DPA_PORTAL_CE].start,
+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CE]));
+
+ pcfg->addr_virt[DPA_PORTAL_CI] = ioremap(
+ pcfg->addr_phys[DPA_PORTAL_CI].start,
+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
+#else
+
+ pcfg->addr_virt[DPA_PORTAL_CE] =
+ memremap(pcfg->addr_phys[DPA_PORTAL_CE].start,
+ (unsigned long)len, MEMREMAP_WB);
+
+ pcfg->addr_virt[DPA_PORTAL_CI] =
+ ioremap(pcfg->addr_phys[DPA_PORTAL_CI].start,
+ resource_size(&pcfg->addr_phys[DPA_PORTAL_CI]));
+
+#endif
+ return pcfg;
+err:
+ kfree(pcfg);
+ return NULL;
+}
+
+static struct qm_portal_config *get_pcfg(struct list_head *list)
+{
+ struct qm_portal_config *pcfg;
+ if (list_empty(list))
+ return NULL;
+ pcfg = list_entry(list->prev, struct qm_portal_config, list);
+ list_del(&pcfg->list);
+ return pcfg;
+}
+
+static struct qm_portal_config *get_pcfg_idx(struct list_head *list, u32 idx)
+{
+ struct qm_portal_config *pcfg;
+ if (list_empty(list))
+ return NULL;
+ list_for_each_entry(pcfg, list, list) {
+ if (pcfg->public_cfg.index == idx) {
+ list_del(&pcfg->list);
+ return pcfg;
+ }
+ }
+ return NULL;
+}
+
+static void portal_set_cpu(struct qm_portal_config *pcfg, int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+ int ret;
+ int window_count = 1;
+ struct iommu_domain_geometry geom_attr;
+ struct pamu_stash_attribute stash_attr;
+
+ pcfg->iommu_domain = iommu_domain_alloc(&platform_bus_type);
+ if (!pcfg->iommu_domain) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_alloc() failed",
+ __func__);
+ goto _no_iommu;
+ }
+ geom_attr.aperture_start = 0;
+ geom_attr.aperture_end =
+ ((dma_addr_t)1 << min(8 * sizeof(dma_addr_t), (size_t)36)) - 1;
+ geom_attr.force_aperture = true;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_GEOMETRY,
+ &geom_attr);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain, DOMAIN_ATTR_WINDOWS,
+ &window_count);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ /* set stash information for the window */
+ stash_attr.window = 0;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH,
+ &stash_attr);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_domain_window_enable(pcfg->iommu_domain, 0, 0, 1ULL << 36,
+ IOMMU_READ | IOMMU_WRITE);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_window_enable() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_attach_device(pcfg->iommu_domain, &pcfg->dev);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_device_attach() = %d",
+ __func__, ret);
+ goto _iommu_domain_free;
+ }
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_ENABLE,
+ &window_count);
+ if (ret < 0) {
+ pr_err(KBUILD_MODNAME ":%s(): iommu_domain_set_attr() = %d",
+ __func__, ret);
+ goto _iommu_detach_device;
+ }
+
+_no_iommu:
+#endif
+#ifdef CONFIG_FSL_QMAN_CONFIG
+ if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+#endif
+ pr_warn("Failed to set QMan portal's stash request queue\n");
+
+ return;
+
+#ifdef CONFIG_FSL_PAMU
+_iommu_detach_device:
+ iommu_detach_device(pcfg->iommu_domain, NULL);
+_iommu_domain_free:
+ iommu_domain_free(pcfg->iommu_domain);
+#endif
+}
+
+struct qm_portal_config *qm_get_unused_portal_idx(u32 idx)
+{
+ struct qm_portal_config *ret;
+ spin_lock(&unused_pcfgs_lock);
+ if (idx == QBMAN_ANY_PORTAL_IDX)
+ ret = get_pcfg(&unused_pcfgs);
+ else
+ ret = get_pcfg_idx(&unused_pcfgs, idx);
+ spin_unlock(&unused_pcfgs_lock);
+ /* Bind stashing LIODNs to the CPU we are currently executing on, and
+ * set the portal to use the stashing request queue corresonding to the
+ * cpu as well. The user-space driver assumption is that the pthread has
+ * to already be affine to one cpu only before opening a portal. If that
+ * check is circumvented, the only risk is a performance degradation -
+ * stashing will go to whatever cpu they happened to be running on when
+ * opening the device file, and if that isn't the cpu they subsequently
+ * bind to and do their polling on, tough. */
+ if (ret)
+ portal_set_cpu(ret, hard_smp_processor_id());
+ return ret;
+}
+
+struct qm_portal_config *qm_get_unused_portal(void)
+{
+ return qm_get_unused_portal_idx(QBMAN_ANY_PORTAL_IDX);
+}
+
+void qm_put_unused_portal(struct qm_portal_config *pcfg)
+{
+ spin_lock(&unused_pcfgs_lock);
+ list_add(&pcfg->list, &unused_pcfgs);
+ spin_unlock(&unused_pcfgs_lock);
+}
+
+static struct qman_portal *init_pcfg(struct qm_portal_config *pcfg)
+{
+ struct qman_portal *p;
+
+ pcfg->iommu_domain = NULL;
+ portal_set_cpu(pcfg, pcfg->public_cfg.cpu);
+ p = qman_create_affine_portal(pcfg, NULL);
+ if (p) {
+ u32 irq_sources = 0;
+ /* Determine what should be interrupt-vs-poll driven */
+#ifdef CONFIG_FSL_DPA_PIRQ_SLOW
+ irq_sources |= QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI |
+ QM_PIRQ_CSCI | QM_PIRQ_CCSCI;
+#endif
+#ifdef CONFIG_FSL_DPA_PIRQ_FAST
+ irq_sources |= QM_PIRQ_DQRI;
+#endif
+ qman_p_irqsource_add(p, irq_sources);
+ pr_info("Qman portal %sinitialised, cpu %d\n",
+ pcfg->public_cfg.is_shared ? "(shared) " : "",
+ pcfg->public_cfg.cpu);
+ } else
+ pr_crit("Qman portal failure on cpu %d\n",
+ pcfg->public_cfg.cpu);
+ return p;
+}
+
+static void init_slave(int cpu)
+{
+ struct qman_portal *p;
+ struct cpumask oldmask = current->cpus_mask;
+ set_cpus_allowed_ptr(current, get_cpu_mask(cpu));
+ p = qman_create_affine_slave(shared_portals[shared_portals_idx++], cpu);
+ if (!p)
+ pr_err("Qman slave portal failure on cpu %d\n", cpu);
+ else
+ pr_info("Qman portal %sinitialised, cpu %d\n", "(slave) ", cpu);
+ set_cpus_allowed_ptr(current, &oldmask);
+ if (shared_portals_idx >= num_shared_portals)
+ shared_portals_idx = 0;
+}
+
+static struct cpumask want_unshared __initdata;
+static struct cpumask want_shared __initdata;
+
+static int __init parse_qportals(char *str)
+{
+ return parse_portals_bootarg(str, &want_shared, &want_unshared,
+ "qportals");
+}
+__setup("qportals=", parse_qportals);
+
+static void qman_portal_update_sdest(const struct qm_portal_config *pcfg,
+ unsigned int cpu)
+{
+#ifdef CONFIG_FSL_PAMU
+ struct pamu_stash_attribute stash_attr;
+ int ret;
+
+ if (pcfg->iommu_domain) {
+ stash_attr.cpu = cpu;
+ stash_attr.cache = PAMU_ATTR_CACHE_L1;
+ /* set stash information for the window */
+ stash_attr.window = 0;
+ ret = iommu_domain_set_attr(pcfg->iommu_domain,
+ DOMAIN_ATTR_FSL_PAMU_STASH, &stash_attr);
+ if (ret < 0) {
+ pr_err("Failed to update pamu stash setting\n");
+ return;
+ }
+ }
+#endif
+#ifdef CONFIG_FSL_QMAN_CONFIG
+ if (qman_set_sdest(pcfg->public_cfg.channel, cpu))
+ pr_warn("Failed to update portal's stash request queue\n");
+#endif
+}
+
+static int qman_offline_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+ p = (struct qman_portal *)affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(0));
+ qman_portal_update_sdest(pcfg, 0);
+ }
+ }
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int qman_online_cpu(unsigned int cpu)
+{
+ struct qman_portal *p;
+ const struct qm_portal_config *pcfg;
+ p = (struct qman_portal *)affine_portals[cpu];
+ if (p) {
+ pcfg = qman_get_qm_portal_config(p);
+ if (pcfg) {
+ irq_set_affinity(pcfg->public_cfg.irq, cpumask_of(cpu));
+ qman_portal_update_sdest(pcfg, cpu);
+ }
+ }
+ return 0;
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+__init int qman_init(void)
+{
+ struct cpumask slave_cpus;
+ struct cpumask unshared_cpus = *cpu_none_mask;
+ struct cpumask shared_cpus = *cpu_none_mask;
+ LIST_HEAD(unshared_pcfgs);
+ LIST_HEAD(shared_pcfgs);
+ struct device_node *dn;
+ struct qm_portal_config *pcfg;
+ struct qman_portal *p;
+ int cpu, ret;
+ const u32 *clk;
+ struct cpumask offline_cpus;
+
+ /* Initialise the Qman (CCSR) device */
+ for_each_compatible_node(dn, NULL, "fsl,qman") {
+ if (!qman_init_ccsr(dn))
+ pr_info("Qman err interrupt handler present\n");
+ else
+ pr_err("Qman CCSR setup failed\n");
+
+ clk = of_get_property(dn, "clock-frequency", NULL);
+ if (!clk)
+ pr_warn("Can't find Qman clock frequency\n");
+ else
+ qman_clk = be32_to_cpu(*clk);
+ }
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ /* Setup lookup table for FQ demux */
+ ret = qman_setup_fq_lookup_table(get_qman_fqd_size()/64);
+ if (ret)
+ return ret;
+#endif
+
+ /* Get qman ip revision */
+ qman_get_ip_revision(dn);
+ if ((qman_ip_rev & 0xff00) >= QMAN_REV30) {
+ qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
+ qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
+ qm_channel_pme = QMAN_CHANNEL_PME_REV3;
+ }
+
+ if ((qman_ip_rev == QMAN_REV31) && (qman_ip_cfg == QMAN_REV_CFG_2))
+ qm_channel_dce = QMAN_CHANNEL_DCE_QMANREV312;
+
+ /*
+ * Parse the ceetm node to get how many ceetm instances are supported
+ * on the current silicon. num_ceetms must be confirmed before portals
+ * are intiailized.
+ */
+ num_ceetms = 0;
+ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm")
+ num_ceetms++;
+
+ /* Parse pool channels into the SDQCR mask. (Must happen before portals
+ * are initialised.) */
+ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
+ ret = fsl_pool_channel_range_sdqcr(dn);
+ if (ret)
+ return ret;
+ }
+
+ memset(affine_portals, 0, sizeof(void *) * num_possible_cpus());
+ /* Initialise portals. See bman_driver.c for comments */
+ for_each_compatible_node(dn, NULL, "fsl,qman-portal") {
+ if (!of_device_is_available(dn))
+ continue;
+ pcfg = parse_pcfg(dn);
+ if (pcfg) {
+ pcfg->public_cfg.pools = pools_sdqcr;
+ list_add_tail(&pcfg->list, &unused_pcfgs);
+ }
+ }
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, &want_shared)) {
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &shared_pcfgs);
+ cpumask_set_cpu(cpu, &shared_cpus);
+ }
+ if (cpumask_test_cpu(cpu, &want_unshared)) {
+ if (cpumask_test_cpu(cpu, &shared_cpus))
+ continue;
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &unshared_pcfgs);
+ cpumask_set_cpu(cpu, &unshared_cpus);
+ }
+ }
+ if (list_empty(&shared_pcfgs) && list_empty(&unshared_pcfgs)) {
+ for_each_online_cpu(cpu) {
+ pcfg = get_pcfg(&unused_pcfgs);
+ if (!pcfg)
+ break;
+ pcfg->public_cfg.cpu = cpu;
+ list_add_tail(&pcfg->list, &unshared_pcfgs);
+ cpumask_set_cpu(cpu, &unshared_cpus);
+ }
+ }
+ cpumask_andnot(&slave_cpus, cpu_possible_mask, &shared_cpus);
+ cpumask_andnot(&slave_cpus, &slave_cpus, &unshared_cpus);
+ if (cpumask_empty(&slave_cpus)) {
+ if (!list_empty(&shared_pcfgs)) {
+ cpumask_or(&unshared_cpus, &unshared_cpus,
+ &shared_cpus);
+ cpumask_clear(&shared_cpus);
+ list_splice_tail(&shared_pcfgs, &unshared_pcfgs);
+ INIT_LIST_HEAD(&shared_pcfgs);
+ }
+ } else {
+ if (list_empty(&shared_pcfgs)) {
+ pcfg = get_pcfg(&unshared_pcfgs);
+ if (!pcfg) {
+ pr_crit("No QMan portals available!\n");
+ return 0;
+ }
+ cpumask_clear_cpu(pcfg->public_cfg.cpu, &unshared_cpus);
+ cpumask_set_cpu(pcfg->public_cfg.cpu, &shared_cpus);
+ list_add_tail(&pcfg->list, &shared_pcfgs);
+ }
+ }
+ list_for_each_entry(pcfg, &unshared_pcfgs, list) {
+ pcfg->public_cfg.is_shared = 0;
+ p = init_pcfg(pcfg);
+ if (!p) {
+ pr_crit("Unable to configure portals\n");
+ return 0;
+ }
+ }
+ list_for_each_entry(pcfg, &shared_pcfgs, list) {
+ pcfg->public_cfg.is_shared = 1;
+ p = init_pcfg(pcfg);
+ if (p)
+ shared_portals[num_shared_portals++] = p;
+ }
+ if (!cpumask_empty(&slave_cpus))
+ for_each_cpu(cpu, &slave_cpus)
+ init_slave(cpu);
+ pr_info("Qman portals initialised\n");
+ cpumask_andnot(&offline_cpus, cpu_possible_mask, cpu_online_mask);
+ for_each_cpu(cpu, &offline_cpus)
+ qman_offline_cpu(cpu);
+#ifdef CONFIG_HOTPLUG_CPU
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "soc/qman_portal:online",
+ qman_online_cpu, qman_offline_cpu);
+ if (ret < 0) {
+ pr_err("qman: failed to register hotplug callbacks.\n");
+ return ret;
+ }
+#endif
+ return 0;
+}
+
+__init int qman_resource_init(void)
+{
+ struct device_node *dn;
+ int ret;
+
+ /* Initialise FQID allocation ranges */
+ for_each_compatible_node(dn, NULL, "fsl,fqid-range") {
+ ret = fsl_fqid_range_init(dn);
+ if (ret)
+ return ret;
+ }
+ /* Initialise CGRID allocation ranges */
+ for_each_compatible_node(dn, NULL, "fsl,cgrid-range") {
+ ret = fsl_cgrid_range_init(dn);
+ if (ret)
+ return ret;
+ }
+ /* Parse pool channels into the allocator. (Must happen after portals
+ * are initialised.) */
+ for_each_compatible_node(dn, NULL, "fsl,pool-channel-range") {
+ ret = fsl_pool_channel_range_init(dn);
+ if (ret)
+ return ret;
+ }
+
+ /* Parse CEETM */
+ for_each_compatible_node(dn, NULL, "fsl,qman-ceetm") {
+ ret = fsl_ceetm_init(dn);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_SUSPEND
+void suspend_unused_qportal(void)
+{
+ struct qm_portal_config *pcfg;
+
+ if (list_empty(&unused_pcfgs))
+ return;
+
+ list_for_each_entry(pcfg, &unused_pcfgs, list) {
+#ifdef CONFIG_PM_DEBUG
+ pr_info("Need to save qportal %d\n", pcfg->public_cfg.index);
+#endif
+ /* save isdr, disable all via isdr, clear isr */
+ pcfg->saved_isdr =
+ __raw_readl(pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
+ 0xe08);
+ __raw_writel(0xffffffff, pcfg->addr_virt[DPA_PORTAL_CI] +
+ 0xe00);
+ }
+ return;
+}
+
+void resume_unused_qportal(void)
+{
+ struct qm_portal_config *pcfg;
+
+ if (list_empty(&unused_pcfgs))
+ return;
+
+ list_for_each_entry(pcfg, &unused_pcfgs, list) {
+#ifdef CONFIG_PM_DEBUG
+ pr_info("Need to resume qportal %d\n", pcfg->public_cfg.index);
+#endif
+ /* restore isdr */
+ __raw_writel(pcfg->saved_isdr,
+ pcfg->addr_virt[DPA_PORTAL_CI] + 0xe08);
+ }
+ return;
+}
+#endif
diff --git a/drivers/staging/fsl_qbman/qman_high.c b/drivers/staging/fsl_qbman/qman_high.c
new file mode 100644
index 000000000000..85d1ea37d449
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_high.c
@@ -0,0 +1,5660 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_low.h"
+
+/* Compilation constants */
+#define DQRR_MAXFILL 15
+#define EQCR_ITHRESH 4 /* if EQCR congests, interrupt threshold */
+#define IRQNAME "QMan portal %d"
+#define MAX_IRQNAME 16 /* big enough for "QMan portal %d" */
+
+/* Divide 'n' by 'd', rounding down if 'r' is negative, rounding up if it's
+ * positive, and rounding to the closest value if it's zero. NB, this macro
+ * implicitly upgrades parameters to unsigned 64-bit, so feed it with types
+ * that are compatible with this. NB, these arguments should not be expressions
+ * unless it is safe for them to be evaluated multiple times. Eg. do not pass
+ * in "some_value++" as a parameter to the macro! */
+#define ROUNDING(n, d, r) \
+ (((r) < 0) ? div64_u64((n), (d)) : \
+ (((r) > 0) ? div64_u64(((n) + (d) - 1), (d)) : \
+ div64_u64(((n) + ((d) / 2)), (d))))
+
+/* Lock/unlock frame queues, subject to the "LOCKED" flag. This is about
+ * inter-processor locking only. Note, FQLOCK() is always called either under a
+ * local_irq_save() or from interrupt context - hence there's no need for irq
+ * protection (and indeed, attempting to nest irq-protection doesn't work, as
+ * the "irq en/disable" machinery isn't recursive...). */
+#define FQLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_lock(&__fq478->fqlock); \
+ } while (0)
+#define FQUNLOCK(fq) \
+ do { \
+ struct qman_fq *__fq478 = (fq); \
+ if (fq_isset(__fq478, QMAN_FQ_FLAG_LOCKED)) \
+ spin_unlock(&__fq478->fqlock); \
+ } while (0)
+
+static inline void fq_set(struct qman_fq *fq, u32 mask)
+{
+ set_bits(mask, &fq->flags);
+}
+static inline void fq_clear(struct qman_fq *fq, u32 mask)
+{
+ clear_bits(mask, &fq->flags);
+}
+static inline int fq_isset(struct qman_fq *fq, u32 mask)
+{
+ return fq->flags & mask;
+}
+static inline int fq_isclear(struct qman_fq *fq, u32 mask)
+{
+ return !(fq->flags & mask);
+}
+
+struct qman_portal {
+ struct qm_portal p;
+ unsigned long bits; /* PORTAL_BITS_*** - dynamic, strictly internal */
+ unsigned long irq_sources;
+ u32 use_eqcr_ci_stashing;
+ u32 slowpoll; /* only used when interrupts are off */
+ struct qman_fq *vdqcr_owned; /* only 1 volatile dequeue at a time */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ struct qman_fq *eqci_owned; /* only 1 enqueue WAIT_SYNC at a time */
+#endif
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ raw_spinlock_t sharing_lock; /* only used if is_shared */
+ int is_shared;
+ struct qman_portal *sharing_redirect;
+#endif
+ u32 sdqcr;
+ int dqrr_disable_ref;
+ /* A portal-specific handler for DCP ERNs. If this is NULL, the global
+ * handler is called instead. */
+ qman_cb_dc_ern cb_dc_ern;
+ /* When the cpu-affine portal is activated, this is non-NULL */
+ const struct qm_portal_config *config;
+ /* This is needed for providing a non-NULL device to dma_map_***() */
+ struct platform_device *pdev;
+ struct dpa_rbtree retire_table;
+ char irqname[MAX_IRQNAME];
+ /* 2-element array. cgrs[0] is mask, cgrs[1] is snapshot. */
+ struct qman_cgrs *cgrs;
+ /* linked-list of CSCN handlers. */
+ struct list_head cgr_cbs;
+ /* list lock */
+ spinlock_t cgr_lock;
+ /* 2-element array. ccgrs[0] is mask, ccgrs[1] is snapshot. */
+ struct qman_ccgrs *ccgrs[QMAN_CEETM_MAX];
+ /* 256-element array, each is a linked-list of CCSCN handlers. */
+ struct list_head ccgr_cbs[QMAN_CEETM_MAX];
+ /* list lock */
+ spinlock_t ccgr_lock;
+ /* track if memory was allocated by the driver */
+ u8 alloced;
+ /* power management data */
+ u32 save_isdr;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* Keep a shadow copy of the DQRR on LE systems as the SW needs to
+ * do byte swaps of DQRR read only memory. First entry must be aligned
+ * to 2 ** 10 to ensure DQRR index calculations based shadow copy
+ * address (6 bits for address shift + 4 bits for the DQRR size).
+ */
+ struct qm_dqrr_entry shadow_dqrr[QM_DQRR_SIZE] __aligned(1024);
+#endif
+};
+
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+#define PORTAL_IRQ_LOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_lock_irqsave(&(p)->sharing_lock, irqflags); \
+ else \
+ local_irq_save(irqflags); \
+ } while (0)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) \
+ do { \
+ if ((p)->is_shared) \
+ raw_spin_unlock_irqrestore(&(p)->sharing_lock, \
+ irqflags); \
+ else \
+ local_irq_restore(irqflags); \
+ } while (0)
+#else
+#define PORTAL_IRQ_LOCK(p, irqflags) local_irq_save(irqflags)
+#define PORTAL_IRQ_UNLOCK(p, irqflags) local_irq_restore(irqflags)
+#endif
+
+/* Global handler for DCP ERNs. Used when the portal receiving the message does
+ * not have a portal-specific handler. */
+static qman_cb_dc_ern cb_dc_ern;
+
+static cpumask_t affine_mask;
+static DEFINE_SPINLOCK(affine_mask_lock);
+static u16 affine_channels[NR_CPUS];
+static DEFINE_PER_CPU(struct qman_portal, qman_affine_portal);
+void *affine_portals[NR_CPUS];
+
+/* "raw" gets the cpu-local struct whether it's a redirect or not. */
+static inline struct qman_portal *get_raw_affine_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+/* For ops that can redirect, this obtains the portal to use */
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+static inline struct qman_portal *get_affine_portal(void)
+{
+ struct qman_portal *p = get_raw_affine_portal();
+ if (p->sharing_redirect)
+ return p->sharing_redirect;
+ return p;
+}
+#else
+#define get_affine_portal() get_raw_affine_portal()
+#endif
+/* For every "get", there must be a "put" */
+static inline void put_affine_portal(void)
+{
+ put_cpu_var(qman_affine_portal);
+}
+/* Exception: poll functions assume the caller is cpu-affine and in no risk of
+ * re-entrance, which are the two reasons we usually use the get/put_cpu_var()
+ * semantic - ie. to disable pre-emption. Some use-cases expect the execution
+ * context to remain as non-atomic during poll-triggered callbacks as it was
+ * when the poll API was first called (eg. NAPI), so we go out of our way in
+ * this case to not disable pre-emption. */
+static inline struct qman_portal *get_poll_portal(void)
+{
+ return &get_cpu_var(qman_affine_portal);
+}
+#define put_poll_portal()
+
+/* This gives a FQID->FQ lookup to cover the fact that we can't directly demux
+ * retirement notifications (the fact they are sometimes h/w-consumed means that
+ * contextB isn't always a s/w demux - and as we can't know which case it is
+ * when looking at the notification, we have to use the slow lookup for all of
+ * them). NB, it's possible to have multiple FQ objects refer to the same FQID
+ * (though at most one of them should be the consumer), so this table isn't for
+ * all FQs - FQs are added when retirement commands are issued, and removed when
+ * they complete, which also massively reduces the size of this table. */
+IMPLEMENT_DPA_RBTREE(fqtree, struct qman_fq, node, fqid);
+
+/* This is what everything can wait on, even if it migrates to a different cpu
+ * to the one whose affine portal it is waiting on. */
+static DECLARE_WAIT_QUEUE_HEAD(affine_queue);
+
+static inline int table_push_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ int ret = fqtree_push(&p->retire_table, fq);
+ if (ret)
+ pr_err("ERROR: double FQ-retirement %d\n", fq->fqid);
+ return ret;
+}
+
+static inline void table_del_fq(struct qman_portal *p, struct qman_fq *fq)
+{
+ fqtree_del(&p->retire_table, fq);
+}
+
+static inline struct qman_fq *table_find_fq(struct qman_portal *p, u32 fqid)
+{
+ return fqtree_find(&p->retire_table, fqid);
+}
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+static void **qman_fq_lookup_table;
+static size_t qman_fq_lookup_table_size;
+
+int qman_setup_fq_lookup_table(size_t num_entries)
+{
+ num_entries++;
+ /* Allocate 1 more entry since the first entry is not used */
+ qman_fq_lookup_table = vzalloc((num_entries * sizeof(void *)));
+ if (!qman_fq_lookup_table) {
+ pr_err("QMan: Could not allocate fq lookup table\n");
+ return -ENOMEM;
+ }
+ qman_fq_lookup_table_size = num_entries;
+ pr_info("QMan: Allocated lookup table at %p, entry count %lu\n",
+ qman_fq_lookup_table,
+ (unsigned long)qman_fq_lookup_table_size);
+ return 0;
+}
+
+/* global structure that maintains fq object mapping */
+static DEFINE_SPINLOCK(fq_hash_table_lock);
+
+static int find_empty_fq_table_entry(u32 *entry, struct qman_fq *fq)
+{
+ u32 i;
+
+ spin_lock(&fq_hash_table_lock);
+ /* Can't use index zero because this has special meaning
+ * in context_b field. */
+ for (i = 1; i < qman_fq_lookup_table_size; i++) {
+ if (qman_fq_lookup_table[i] == NULL) {
+ *entry = i;
+ qman_fq_lookup_table[i] = fq;
+ spin_unlock(&fq_hash_table_lock);
+ return 0;
+ }
+ }
+ spin_unlock(&fq_hash_table_lock);
+ return -ENOMEM;
+}
+
+static void clear_fq_table_entry(u32 entry)
+{
+ spin_lock(&fq_hash_table_lock);
+ BUG_ON(entry >= qman_fq_lookup_table_size);
+ qman_fq_lookup_table[entry] = NULL;
+ spin_unlock(&fq_hash_table_lock);
+}
+
+static inline struct qman_fq *get_fq_table_entry(u32 entry)
+{
+ BUG_ON(entry >= qman_fq_lookup_table_size);
+ return qman_fq_lookup_table[entry];
+}
+#endif
+
+static inline void cpu_to_hw_fqd(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to HW format */
+ fqd->fq_ctrl = cpu_to_be16(fqd->fq_ctrl);
+ fqd->dest_wq = cpu_to_be16(fqd->dest_wq);
+ fqd->ics_cred = cpu_to_be16(fqd->ics_cred);
+ fqd->context_b = cpu_to_be32(fqd->context_b);
+ fqd->context_a.opaque = cpu_to_be64(fqd->context_a.opaque);
+}
+
+static inline void hw_fqd_to_cpu(struct qm_fqd *fqd)
+{
+ /* Byteswap the FQD to CPU format */
+ fqd->fq_ctrl = be16_to_cpu(fqd->fq_ctrl);
+ fqd->dest_wq = be16_to_cpu(fqd->dest_wq);
+ fqd->ics_cred = be16_to_cpu(fqd->ics_cred);
+ fqd->context_b = be32_to_cpu(fqd->context_b);
+ fqd->context_a.opaque = be64_to_cpu(fqd->context_a.opaque);
+}
+
+/* Swap a 40 bit address */
+static inline u64 cpu_to_be40(u64 in)
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return in;
+#else
+ u64 out = 0;
+ u8 *p = (u8 *) &out;
+ p[0] = in >> 32;
+ p[1] = in >> 24;
+ p[2] = in >> 16;
+ p[3] = in >> 8;
+ p[4] = in >> 0;
+ return out;
+#endif
+}
+static inline u64 be40_to_cpu(u64 in)
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return in;
+#else
+ u64 out = 0;
+ u8 *pout = (u8 *) &out;
+ u8 *pin = (u8 *) &in;
+ pout[0] = pin[4];
+ pout[1] = pin[3];
+ pout[2] = pin[2];
+ pout[3] = pin[1];
+ pout[4] = pin[0];
+ return out;
+#endif
+}
+
+/* Swap a 24 bit value */
+static inline u32 cpu_to_be24(u32 in)
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return in;
+#else
+ u32 out = 0;
+ u8 *p = (u8 *) &out;
+ p[0] = in >> 16;
+ p[1] = in >> 8;
+ p[2] = in >> 0;
+ return out;
+#endif
+}
+
+static inline u32 be24_to_cpu(u32 in)
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return in;
+#else
+ u32 out = 0;
+ u8 *pout = (u8 *) &out;
+ u8 *pin = (u8 *) &in;
+ pout[0] = pin[2];
+ pout[1] = pin[1];
+ pout[2] = pin[0];
+ return out;
+#endif
+}
+
+static inline u64 be48_to_cpu(u64 in)
+{
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ return in;
+#else
+ u64 out = 0;
+ u8 *pout = (u8 *) &out;
+ u8 *pin = (u8 *) &in;
+
+ pout[0] = pin[5];
+ pout[1] = pin[4];
+ pout[2] = pin[3];
+ pout[3] = pin[2];
+ pout[4] = pin[1];
+ pout[5] = pin[0];
+ return out;
+#endif
+}
+static inline void cpu_to_hw_fd(struct qm_fd *fd)
+{
+ fd->opaque_addr = cpu_to_be64(fd->opaque_addr);
+ fd->status = cpu_to_be32(fd->status);
+ fd->opaque = cpu_to_be32(fd->opaque);
+}
+
+static inline void hw_fd_to_cpu(struct qm_fd *fd)
+{
+ fd->opaque_addr = be64_to_cpu(fd->opaque_addr);
+ fd->status = be32_to_cpu(fd->status);
+ fd->opaque = be32_to_cpu(fd->opaque);
+}
+
+static inline void hw_cq_query_to_cpu(struct qm_mcr_ceetm_cq_query *cq_query)
+{
+ cq_query->ccgid = be16_to_cpu(cq_query->ccgid);
+ cq_query->state = be16_to_cpu(cq_query->state);
+ cq_query->pfdr_hptr = be24_to_cpu(cq_query->pfdr_hptr);
+ cq_query->pfdr_tptr = be24_to_cpu(cq_query->pfdr_tptr);
+ cq_query->od1_xsfdr = be16_to_cpu(cq_query->od1_xsfdr);
+ cq_query->od2_xsfdr = be16_to_cpu(cq_query->od2_xsfdr);
+ cq_query->od3_xsfdr = be16_to_cpu(cq_query->od3_xsfdr);
+ cq_query->od4_xsfdr = be16_to_cpu(cq_query->od4_xsfdr);
+ cq_query->od5_xsfdr = be16_to_cpu(cq_query->od5_xsfdr);
+ cq_query->od6_xsfdr = be16_to_cpu(cq_query->od6_xsfdr);
+ cq_query->ra1_xsfdr = be16_to_cpu(cq_query->ra1_xsfdr);
+ cq_query->ra2_xsfdr = be16_to_cpu(cq_query->ra2_xsfdr);
+ cq_query->frm_cnt = be24_to_cpu(cq_query->frm_cnt);
+}
+
+static inline void hw_ccgr_query_to_cpu(struct qm_mcr_ceetm_ccgr_query *ccgr_q)
+{
+ int i;
+
+ ccgr_q->cm_query.cs_thres.hword =
+ be16_to_cpu(ccgr_q->cm_query.cs_thres.hword);
+ ccgr_q->cm_query.cs_thres_x.hword =
+ be16_to_cpu(ccgr_q->cm_query.cs_thres_x.hword);
+ ccgr_q->cm_query.td_thres.hword =
+ be16_to_cpu(ccgr_q->cm_query.td_thres.hword);
+ ccgr_q->cm_query.wr_parm_g.word =
+ be32_to_cpu(ccgr_q->cm_query.wr_parm_g.word);
+ ccgr_q->cm_query.wr_parm_y.word =
+ be32_to_cpu(ccgr_q->cm_query.wr_parm_y.word);
+ ccgr_q->cm_query.wr_parm_r.word =
+ be32_to_cpu(ccgr_q->cm_query.wr_parm_r.word);
+ ccgr_q->cm_query.cscn_targ_dcp =
+ be16_to_cpu(ccgr_q->cm_query.cscn_targ_dcp);
+ ccgr_q->cm_query.i_cnt = be40_to_cpu(ccgr_q->cm_query.i_cnt);
+ ccgr_q->cm_query.a_cnt = be40_to_cpu(ccgr_q->cm_query.a_cnt);
+ for (i = 0; i < ARRAY_SIZE(ccgr_q->cm_query.cscn_targ_swp); i++)
+ ccgr_q->cm_query.cscn_targ_swp[i] =
+ be32_to_cpu(ccgr_q->cm_query.cscn_targ_swp[i]);
+}
+
+/* In the case that slow- and fast-path handling are both done by qman_poll()
+ * (ie. because there is no interrupt handling), we ought to balance how often
+ * we do the fast-path poll versus the slow-path poll. We'll use two decrementer
+ * sources, so we call the fast poll 'n' times before calling the slow poll
+ * once. The idle decrementer constant is used when the last slow-poll detected
+ * no work to do, and the busy decrementer constant when the last slow-poll had
+ * work to do. */
+#define SLOW_POLL_IDLE 1000
+#define SLOW_POLL_BUSY 10
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit);
+
+/* Portal interrupt handler */
+static irqreturn_t portal_isr(__always_unused int irq, void *ptr)
+{
+ struct qman_portal *p = ptr;
+ /*
+ * The CSCI/CCSCI source is cleared inside __poll_portal_slow(), because
+ * it could race against a Query Congestion State command also given
+ * as part of the handling of this interrupt source. We mustn't
+ * clear it a second time in this top-level function.
+ */
+ u32 clear = QM_DQAVAIL_MASK | (p->irq_sources &
+ ~(QM_PIRQ_CSCI | QM_PIRQ_CCSCI));
+ u32 is = qm_isr_status_read(&p->p) & p->irq_sources;
+ /* DQRR-handling if it's interrupt-driven */
+ if (is & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
+ /* Handling of anything else that's interrupt-driven */
+ clear |= __poll_portal_slow(p, is);
+ qm_isr_status_clear(&p->p, clear);
+ return IRQ_HANDLED;
+}
+
+/* This inner version is used privately by qman_create_affine_portal(), as well
+ * as by the exported qman_stop_dequeues(). */
+static inline void qman_stop_dequeues_ex(struct qman_portal *p)
+{
+ unsigned long irqflags __maybe_unused;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ if (!(p->dqrr_disable_ref++))
+ qm_dqrr_set_maxfill(&p->p, 0);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+
+static int drain_mr_fqrni(struct qm_portal *p)
+{
+ const struct qm_mr_entry *msg;
+loop:
+ msg = qm_mr_current(p);
+ if (!msg) {
+ /* if MR was full and h/w had other FQRNI entries to produce, we
+ * need to allow it time to produce those entries once the
+ * existing entries are consumed. A worst-case situation
+ * (fully-loaded system) means h/w sequencers may have to do 3-4
+ * other things before servicing the portal's MR pump, each of
+ * which (if slow) may take ~50 qman cycles (which is ~200
+ * processor cycles). So rounding up and then multiplying this
+ * worst-case estimate by a factor of 10, just to be
+ * ultra-paranoid, goes as high as 10,000 cycles. NB, we consume
+ * one entry at a time, so h/w has an opportunity to produce new
+ * entries well before the ring has been fully consumed, so
+ * we're being *really* paranoid here. */
+ u64 now, then = mfatb();
+ do {
+ now = mfatb();
+ } while ((then + 10000) > now);
+ msg = qm_mr_current(p);
+ if (!msg)
+ return 0;
+ }
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) != QM_MR_VERB_FQRNI) {
+ /* We aren't draining anything but FQRNIs */
+ pr_err("QMan found verb 0x%x in MR\n", msg->verb);
+ return -1;
+ }
+ qm_mr_next(p);
+ qm_mr_cci_consume(p, 1);
+ goto loop;
+}
+
+#ifdef CONFIG_SUSPEND
+static int _qman_portal_suspend_noirq(struct device *dev)
+{
+ struct qman_portal *p = (struct qman_portal *)dev->platform_data;
+#ifdef CONFIG_PM_DEBUG
+ struct platform_device *pdev = to_platform_device(dev);
+#endif
+
+ p->save_isdr = qm_isr_disable_read(&p->p);
+ qm_isr_disable_write(&p->p, 0xffffffff);
+ qm_isr_status_clear(&p->p, 0xffffffff);
+#ifdef CONFIG_PM_DEBUG
+ pr_info("Suspend for %s\n", pdev->name);
+#endif
+ return 0;
+}
+
+static int _qman_portal_resume_noirq(struct device *dev)
+{
+ struct qman_portal *p = (struct qman_portal *)dev->platform_data;
+
+ /* restore isdr */
+ qm_isr_disable_write(&p->p, p->save_isdr);
+ return 0;
+}
+#else
+#define _qman_portal_suspend_noirq NULL
+#define _qman_portal_resume_noirq NULL
+#endif
+
+struct dev_pm_domain qman_portal_device_pm_domain = {
+ .ops = {
+ USE_PLATFORM_PM_SLEEP_OPS
+ .suspend_noirq = _qman_portal_suspend_noirq,
+ .resume_noirq = _qman_portal_resume_noirq,
+ }
+};
+
+struct qman_portal *qman_create_portal(
+ struct qman_portal *portal,
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs)
+{
+ struct qm_portal *__p;
+ char buf[16];
+ int ret;
+ u32 isdr;
+ struct platform_device_info pdev_info;
+
+ if (!portal) {
+ portal = kmalloc(sizeof(*portal), GFP_KERNEL);
+ if (!portal)
+ return portal;
+ portal->alloced = 1;
+ } else
+ portal->alloced = 0;
+
+ __p = &portal->p;
+
+#if (defined CONFIG_PPC || defined CONFIG_PPC64) && defined CONFIG_FSL_PAMU
+ /* PAMU is required for stashing */
+ portal->use_eqcr_ci_stashing = ((qman_ip_rev >= QMAN_REV30) ?
+ 1 : 0);
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ portal->use_eqcr_ci_stashing = 1;
+#else
+ portal->use_eqcr_ci_stashing = 0;
+#endif
+
+ /* prep the low-level portal struct with the mapped addresses from the
+ * config, everything that follows depends on it and "config" is more
+ * for (de)reference... */
+ __p->addr.addr_ce = config->addr_virt[DPA_PORTAL_CE];
+ __p->addr.addr_ci = config->addr_virt[DPA_PORTAL_CI];
+ /*
+ * If CI-stashing is used, the current defaults use a threshold of 3,
+ * and stash with high-than-DQRR priority.
+ */
+ if (qm_eqcr_init(__p, qm_eqcr_pvb,
+ portal->use_eqcr_ci_stashing ? 3 : 0, 1)) {
+ pr_err("Qman EQCR initialisation failed\n");
+ goto fail_eqcr;
+ }
+ if (qm_dqrr_init(__p, config, qm_dqrr_dpush, qm_dqrr_pvb,
+ qm_dqrr_cdc, DQRR_MAXFILL)) {
+ pr_err("Qman DQRR initialisation failed\n");
+ goto fail_dqrr;
+ }
+ if (qm_mr_init(__p, qm_mr_pvb, qm_mr_cci)) {
+ pr_err("Qman MR initialisation failed\n");
+ goto fail_mr;
+ }
+ if (qm_mc_init(__p)) {
+ pr_err("Qman MC initialisation failed\n");
+ goto fail_mc;
+ }
+ if (qm_isr_init(__p)) {
+ pr_err("Qman ISR initialisation failed\n");
+ goto fail_isr;
+ }
+ /* static interrupt-gating controls */
+ qm_dqrr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_DQRR_ITHRESH);
+ qm_mr_set_ithresh(__p, CONFIG_FSL_QMAN_PIRQ_MR_ITHRESH);
+ qm_isr_set_iperiod(__p, CONFIG_FSL_QMAN_PIRQ_IPERIOD);
+ portal->cgrs = kmalloc(2 * sizeof(*cgrs), GFP_KERNEL);
+ if (!portal->cgrs)
+ goto fail_cgrs;
+ /* initial snapshot is no-depletion */
+ qman_cgrs_init(&portal->cgrs[1]);
+ if (cgrs)
+ portal->cgrs[0] = *cgrs;
+ else
+ /* if the given mask is NULL, assume all CGRs can be seen */
+ qman_cgrs_fill(&portal->cgrs[0]);
+ INIT_LIST_HEAD(&portal->cgr_cbs);
+ spin_lock_init(&portal->cgr_lock);
+ if (num_ceetms) {
+ for (ret = 0; ret < num_ceetms; ret++) {
+ portal->ccgrs[ret] = kmalloc(2 *
+ sizeof(struct qman_ccgrs), GFP_KERNEL);
+ if (!portal->ccgrs[ret])
+ goto fail_ccgrs;
+ qman_ccgrs_init(&portal->ccgrs[ret][1]);
+ qman_ccgrs_fill(&portal->ccgrs[ret][0]);
+ INIT_LIST_HEAD(&portal->ccgr_cbs[ret]);
+ }
+ }
+ spin_lock_init(&portal->ccgr_lock);
+ portal->bits = 0;
+ portal->slowpoll = 0;
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ portal->eqci_owned = NULL;
+#endif
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ raw_spin_lock_init(&portal->sharing_lock);
+ portal->is_shared = config->public_cfg.is_shared;
+ portal->sharing_redirect = NULL;
+#endif
+ portal->sdqcr = QM_SDQCR_SOURCE_CHANNELS | QM_SDQCR_COUNT_UPTO3 |
+ QM_SDQCR_DEDICATED_PRECEDENCE | QM_SDQCR_TYPE_PRIO_QOS |
+ QM_SDQCR_TOKEN_SET(0xab) | QM_SDQCR_CHANNELS_DEDICATED;
+ portal->dqrr_disable_ref = 0;
+ portal->cb_dc_ern = NULL;
+ sprintf(buf, "qportal-%d", config->public_cfg.channel);
+
+ memset(&pdev_info, 0, sizeof(pdev_info));
+ pdev_info.name = buf;
+ pdev_info.id = PLATFORM_DEVID_NONE;
+ pdev_info.dma_mask = DMA_BIT_MASK(40);
+
+ portal->pdev = platform_device_register_full(&pdev_info);
+ if (!portal->pdev) {
+ pr_err("qman_portal - platform_device_alloc() failed\n");
+ goto fail_devregister;
+ }
+
+ arch_setup_dma_ops(&portal->pdev->dev, 0, 0, NULL, true);
+
+ portal->pdev->dev.pm_domain = &qman_portal_device_pm_domain;
+ portal->pdev->dev.platform_data = portal;
+ dpa_rbtree_init(&portal->retire_table);
+ isdr = 0xffffffff;
+ qm_isr_disable_write(__p, isdr);
+ portal->irq_sources = 0;
+ qm_isr_enable_write(__p, portal->irq_sources);
+ qm_isr_status_clear(__p, 0xffffffff);
+ snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, config->public_cfg.cpu);
+ if (request_irq(config->public_cfg.irq, portal_isr, 0, portal->irqname,
+ portal)) {
+ pr_err("request_irq() failed\n");
+ goto fail_irq;
+ }
+ if ((config->public_cfg.cpu != -1) &&
+ irq_can_set_affinity(config->public_cfg.irq) &&
+ irq_set_affinity(config->public_cfg.irq,
+ cpumask_of(config->public_cfg.cpu))) {
+ pr_err("irq_set_affinity() failed\n");
+ goto fail_affinity;
+ }
+
+ /* Need EQCR to be empty before continuing */
+ isdr ^= QM_PIRQ_EQCI;
+ qm_isr_disable_write(__p, isdr);
+ ret = qm_eqcr_get_fill(__p);
+ if (ret) {
+ pr_err("Qman EQCR unclean\n");
+ goto fail_eqcr_empty;
+ }
+ isdr ^= (QM_PIRQ_DQRI | QM_PIRQ_MRI);
+ qm_isr_disable_write(__p, isdr);
+ if (qm_dqrr_current(__p) != NULL) {
+ pr_err("Qman DQRR unclean\n");
+ qm_dqrr_cdc_consume_n(__p, 0xffff);
+ }
+ if (qm_mr_current(__p) != NULL) {
+ /* special handling, drain just in case it's a few FQRNIs */
+ if (drain_mr_fqrni(__p)) {
+ const struct qm_mr_entry *e = qm_mr_current(__p);
+ /*
+ * Message ring cannot be empty no need to check
+ * qm_mr_current returned successfully
+ */
+ pr_err("Qman MR unclean, MR VERB 0x%x, rc 0x%x\n, addr 0x%x",
+ e->verb, e->ern.rc, e->ern.fd.addr_lo);
+ goto fail_dqrr_mr_empty;
+ }
+ }
+ /* Success */
+ portal->config = config;
+ /*
+ * Undisable all the IRQs except the dequeue available bits.
+ * If left enabled they cause problems with sleep mode. Since
+ * they are not used in push mode we can safely turn them off
+ */
+ qm_isr_disable_write(__p, QM_DQAVAIL_MASK);
+ qm_isr_uninhibit(__p);
+ /* Write a sane SDQCR */
+ qm_dqrr_sdqcr_set(__p, portal->sdqcr);
+ return portal;
+fail_dqrr_mr_empty:
+fail_eqcr_empty:
+fail_affinity:
+ free_irq(config->public_cfg.irq, portal);
+fail_irq:
+ platform_device_unregister(portal->pdev);
+fail_devregister:
+ if (num_ceetms)
+ for (ret = 0; ret < num_ceetms; ret++)
+ kfree(portal->ccgrs[ret]);
+fail_ccgrs:
+ kfree(portal->cgrs);
+fail_cgrs:
+ qm_isr_finish(__p);
+fail_isr:
+ qm_mc_finish(__p);
+fail_mc:
+ qm_mr_finish(__p);
+fail_mr:
+ qm_dqrr_finish(__p);
+fail_dqrr:
+ qm_eqcr_finish(__p);
+fail_eqcr:
+ if (portal->alloced)
+ kfree(portal);
+ return NULL;
+}
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs)
+{
+ struct qman_portal *res;
+ struct qman_portal *portal;
+
+ portal = &per_cpu(qman_affine_portal, config->public_cfg.cpu);
+ res = qman_create_portal(portal, config, cgrs);
+ if (res) {
+ spin_lock(&affine_mask_lock);
+ cpumask_set_cpu(config->public_cfg.cpu, &affine_mask);
+ affine_channels[config->public_cfg.cpu] =
+ config->public_cfg.channel;
+ affine_portals[config->public_cfg.cpu] = portal;
+ spin_unlock(&affine_mask_lock);
+ }
+ return res;
+}
+
+/* These checks are BUG_ON()s because the driver is already supposed to avoid
+ * these cases. */
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+ int cpu)
+{
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ struct qman_portal *p;
+ p = &per_cpu(qman_affine_portal, cpu);
+ /* Check that we don't already have our own portal */
+ BUG_ON(p->config);
+ /* Check that we aren't already slaving to another portal */
+ BUG_ON(p->is_shared);
+ /* Check that 'redirect' is prepared to have us */
+ BUG_ON(!redirect->config->public_cfg.is_shared);
+ /* These are the only elements to initialise when redirecting */
+ p->irq_sources = 0;
+ p->sharing_redirect = redirect;
+ affine_portals[cpu] = p;
+ return p;
+#else
+ BUG();
+ return NULL;
+#endif
+}
+
+void qman_destroy_portal(struct qman_portal *qm)
+{
+ const struct qm_portal_config *pcfg;
+ int i;
+
+ /* Stop dequeues on the portal */
+ qm_dqrr_sdqcr_set(&qm->p, 0);
+
+ /* NB we do this to "quiesce" EQCR. If we add enqueue-completions or
+ * something related to QM_PIRQ_EQCI, this may need fixing.
+ * Also, due to the prefetching model used for CI updates in the enqueue
+ * path, this update will only invalidate the CI cacheline *after*
+ * working on it, so we need to call this twice to ensure a full update
+ * irrespective of where the enqueue processing was at when the teardown
+ * began. */
+ qm_eqcr_cce_update(&qm->p);
+ qm_eqcr_cce_update(&qm->p);
+ pcfg = qm->config;
+
+ free_irq(pcfg->public_cfg.irq, qm);
+
+ kfree(qm->cgrs);
+ if (num_ceetms)
+ for (i = 0; i < num_ceetms; i++)
+ kfree(qm->ccgrs[i]);
+ qm_isr_finish(&qm->p);
+ qm_mc_finish(&qm->p);
+ qm_mr_finish(&qm->p);
+ qm_dqrr_finish(&qm->p);
+ qm_eqcr_finish(&qm->p);
+
+ platform_device_unregister(qm->pdev);
+
+ qm->config = NULL;
+ if (qm->alloced)
+ kfree(qm);
+}
+
+const struct qm_portal_config *qman_destroy_affine_portal(void)
+{
+ /* We don't want to redirect if we're a slave, use "raw" */
+ struct qman_portal *qm = get_raw_affine_portal();
+ const struct qm_portal_config *pcfg;
+ int cpu;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (qm->sharing_redirect) {
+ qm->sharing_redirect = NULL;
+ put_affine_portal();
+ return NULL;
+ }
+ qm->is_shared = 0;
+#endif
+ pcfg = qm->config;
+ cpu = pcfg->public_cfg.cpu;
+
+ qman_destroy_portal(qm);
+
+ spin_lock(&affine_mask_lock);
+ cpumask_clear_cpu(cpu, &affine_mask);
+ spin_unlock(&affine_mask_lock);
+ put_affine_portal();
+ return pcfg;
+}
+
+const struct qman_portal_config *qman_p_get_portal_config(struct qman_portal *p)
+{
+ return &p->config->public_cfg;
+}
+EXPORT_SYMBOL(qman_p_get_portal_config);
+
+const struct qman_portal_config *qman_get_portal_config(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ const struct qman_portal_config *ret = qman_p_get_portal_config(p);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_get_portal_config);
+
+/* Inline helper to reduce nesting in __poll_portal_slow() */
+static inline void fq_state_change(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg, u8 verb)
+{
+ FQLOCK(fq);
+ switch (verb) {
+ case QM_MR_VERB_FQRL:
+ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_ORL));
+ fq_clear(fq, QMAN_FQ_STATE_ORL);
+ table_del_fq(p, fq);
+ break;
+ case QM_MR_VERB_FQRN:
+ DPA_ASSERT((fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_sched));
+ DPA_ASSERT(fq_isset(fq, QMAN_FQ_STATE_CHANGING));
+ fq_clear(fq, QMAN_FQ_STATE_CHANGING);
+ if (msg->fq.fqs & QM_MR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (msg->fq.fqs & QM_MR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ fq->state = qman_fq_state_retired;
+ break;
+ case QM_MR_VERB_FQPN:
+ DPA_ASSERT(fq->state == qman_fq_state_sched);
+ DPA_ASSERT(fq_isclear(fq, QMAN_FQ_STATE_CHANGING));
+ fq->state = qman_fq_state_parked;
+ }
+ FQUNLOCK(fq);
+}
+
+static u32 __poll_portal_slow(struct qman_portal *p, u32 is)
+{
+ const struct qm_mr_entry *msg;
+ struct qm_mr_entry swapped_msg;
+ int k;
+
+ if (is & QM_PIRQ_CSCI) {
+ struct qman_cgrs rr, c;
+ struct qm_mc_result *mcr;
+ struct qman_cgr *cgr;
+ unsigned long irqflags __maybe_unused;
+
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ /*
+ * The CSCI bit must be cleared _before_ issuing the
+ * Query Congestion State command, to ensure that a long
+ * CGR State Change callback cannot miss an intervening
+ * state change.
+ */
+ qm_isr_status_clear(&p->p, QM_PIRQ_CSCI);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ for (k = 0; k < 8; k++)
+ mcr->querycongestion.state.__state[k] = be32_to_cpu(
+ mcr->querycongestion.state.__state[k]);
+ /* mask out the ones I'm not interested in */
+ qman_cgrs_and(&rr, (const struct qman_cgrs *)
+ &mcr->querycongestion.state, &p->cgrs[0]);
+ /* check previous snapshot for delta, enter/exit congestion */
+ qman_cgrs_xor(&c, &rr, &p->cgrs[1]);
+ /* update snapshot */
+ qman_cgrs_cp(&p->cgrs[1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(cgr, &p->cgr_cbs, node)
+ if (cgr->cb && qman_cgrs_get(&c, cgr->cgrid))
+ cgr->cb(p, cgr, qman_cgrs_get(&rr, cgr->cgrid));
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ }
+ if (is & QM_PIRQ_CCSCI) {
+ struct qman_ccgrs rr, c, congestion_result;
+ struct qm_mc_result *mcr;
+ struct qm_mc_command *mcc;
+ struct qm_ceetm_ccg *ccg;
+ unsigned long irqflags __maybe_unused;
+ int i, j;
+
+ spin_lock_irqsave(&p->ccgr_lock, irqflags);
+ /*
+ * The CCSCI bit must be cleared _before_ issuing the
+ * Query Congestion State command, to ensure that a long
+ * CCGR State Change callback cannot miss an intervening
+ * state change.
+ */
+ qm_isr_status_clear(&p->p, QM_PIRQ_CCSCI);
+
+ for (i = 0; i < num_ceetms; i++) {
+ for (j = 0; j < 2; j++) {
+ mcc = qm_mc_start(&p->p);
+ mcc->ccgr_query.ccgrid = cpu_to_be16(
+ CEETM_QUERY_CONGESTION_STATE | j);
+ mcc->ccgr_query.dcpid = i;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ for (k = 0; k < 8; k++)
+ mcr->ccgr_query.congestion_state.state.
+ __state[k] = be32_to_cpu(
+ mcr->ccgr_query.
+ congestion_state.state.
+ __state[k]);
+ congestion_result.q[j] =
+ mcr->ccgr_query.congestion_state.state;
+ }
+ /* mask out the ones I'm not interested in */
+ qman_ccgrs_and(&rr, &congestion_result,
+ &p->ccgrs[i][0]);
+ /*
+ * check previous snapshot for delta, enter/exit
+ * congestion.
+ */
+ qman_ccgrs_xor(&c, &rr, &p->ccgrs[i][1]);
+ /* update snapshot */
+ qman_ccgrs_cp(&p->ccgrs[i][1], &rr);
+ /* Invoke callback */
+ list_for_each_entry(ccg, &p->ccgr_cbs[i], cb_node)
+ if (ccg->cb && qman_ccgrs_get(&c,
+ (ccg->parent->idx << 4) | ccg->idx))
+ ccg->cb(ccg, ccg->cb_ctx,
+ qman_ccgrs_get(&rr,
+ (ccg->parent->idx << 4)
+ | ccg->idx));
+ }
+ spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
+ }
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (is & QM_PIRQ_EQCI) {
+ unsigned long irqflags;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ p->eqci_owned = NULL;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ wake_up(&affine_queue);
+ }
+#endif
+
+ if (is & QM_PIRQ_EQRI) {
+ unsigned long irqflags __maybe_unused;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ qm_eqcr_cce_update(&p->p);
+ qm_eqcr_set_ithresh(&p->p, 0);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ wake_up(&affine_queue);
+ }
+
+ if (is & QM_PIRQ_MRI) {
+ struct qman_fq *fq;
+ u8 verb, num = 0;
+mr_loop:
+ qm_mr_pvb_update(&p->p);
+ msg = qm_mr_current(&p->p);
+ if (!msg)
+ goto mr_done;
+ swapped_msg = *msg;
+ hw_fd_to_cpu(&swapped_msg.ern.fd);
+ verb = msg->verb & QM_MR_VERB_TYPE_MASK;
+ /* The message is a software ERN iff the 0x20 bit is set */
+ if (verb & 0x20) {
+ switch (verb) {
+ case QM_MR_VERB_FQRNI:
+ /* nada, we drop FQRNIs on the floor */
+ break;
+ case QM_MR_VERB_FQRN:
+ case QM_MR_VERB_FQRL:
+ /* Lookup in the retirement table */
+ fq = table_find_fq(p, be32_to_cpu(msg->fq.fqid));
+ BUG_ON(!fq);
+ fq_state_change(p, fq, &swapped_msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_FQPN:
+ /* Parked */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(
+ be32_to_cpu(msg->fq.contextB));
+#else
+ fq = (void *)(uintptr_t)
+ be32_to_cpu(msg->fq.contextB);
+#endif
+ fq_state_change(p, fq, msg, verb);
+ if (fq->cb.fqs)
+ fq->cb.fqs(p, fq, &swapped_msg);
+ break;
+ case QM_MR_VERB_DC_ERN:
+ /* DCP ERN */
+ if (p->cb_dc_ern)
+ p->cb_dc_ern(p, msg);
+ else if (cb_dc_ern)
+ cb_dc_ern(p, msg);
+ else {
+ static int warn_once;
+ if (!warn_once) {
+ pr_crit("Leaking DCP ERNs!\n");
+ warn_once = 1;
+ }
+ }
+ break;
+ default:
+ pr_crit("Invalid MR verb 0x%02x\n", verb);
+ }
+ } else {
+ /* Its a software ERN */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(be32_to_cpu(msg->ern.tag));
+#else
+ fq = (void *)(uintptr_t)be32_to_cpu(msg->ern.tag);
+#endif
+ fq->cb.ern(p, fq, &swapped_msg);
+ }
+ num++;
+ qm_mr_next(&p->p);
+ goto mr_loop;
+mr_done:
+ qm_mr_cci_consume(&p->p, num);
+ }
+ /*
+ * QM_PIRQ_CSCI/CCSCI has already been cleared, as part of its specific
+ * processing. If that interrupt source has meanwhile been re-asserted,
+ * we mustn't clear it here (or in the top-level interrupt handler).
+ */
+ return is & (QM_PIRQ_EQCI | QM_PIRQ_EQRI | QM_PIRQ_MRI);
+}
+
+/* remove some slowish-path stuff from the "fast path" and make sure it isn't
+ * inlined. */
+static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
+{
+ p->vdqcr_owned = NULL;
+ FQLOCK(fq);
+ fq_clear(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ wake_up(&affine_queue);
+}
+
+/* Copy a DQRR entry ensuring reads reach QBMan in order */
+static inline void safe_copy_dqrr(struct qm_dqrr_entry *dst,
+ const struct qm_dqrr_entry *src)
+{
+ int i = 0;
+ const u64 *s64 = (u64*)src;
+ u64 *d64 = (u64*)dst;
+
+ /* DQRR only has 32 bytes of valid data so only need to
+ * copy 4 - 64 bit values */
+ *d64 = *s64;
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ {
+ u32 res, zero = 0;
+ /* Create a dependancy after copying first bytes ensures no wrap
+ transaction generated to QBMan */
+ /* Logical AND the value pointed to by s64 with 0x0 and
+ store the result in res */
+ asm volatile("and %[result], %[in1], %[in2]"
+ : [result] "=r" (res)
+ : [in1] "r" (zero), [in2] "r" (*s64)
+ : "memory");
+ /* Add res to s64 - this creates a dependancy on the result of
+ reading the value of s64 before the next read. The side
+ effect of this is that the core must stall until the first
+ aligned read is complete therefore preventing a WRAP
+ transaction to be seen by the QBMan */
+ asm volatile("add %[result], %[in1], %[in2]"
+ : [result] "=r" (s64)
+ : [in1] "r" (res), [in2] "r" (s64)
+ : "memory");
+ }
+#endif
+ /* Copy the last 3 64 bit parts */
+ d64++; s64++;
+ for (;i<3; i++)
+ *d64++ = *s64++;
+}
+
+/* Look: no locks, no irq_save()s, no preempt_disable()s! :-) The only states
+ * that would conflict with other things if they ran at the same time on the
+ * same cpu are;
+ *
+ * (i) setting/clearing vdqcr_owned, and
+ * (ii) clearing the NE (Not Empty) flag.
+ *
+ * Both are safe. Because;
+ *
+ * (i) this clearing can only occur after qman_volatile_dequeue() has set the
+ * vdqcr_owned field (which it does before setting VDQCR), and
+ * qman_volatile_dequeue() blocks interrupts and preemption while this is
+ * done so that we can't interfere.
+ * (ii) the NE flag is only cleared after qman_retire_fq() has set it, and as
+ * with (i) that API prevents us from interfering until it's safe.
+ *
+ * The good thing is that qman_volatile_dequeue() and qman_retire_fq() run far
+ * less frequently (ie. per-FQ) than __poll_portal_fast() does, so the nett
+ * advantage comes from this function not having to "lock" anything at all.
+ *
+ * Note also that the callbacks are invoked at points which are safe against the
+ * above potential conflicts, but that this function itself is not re-entrant
+ * (this is because the function tracks one end of each FIFO in the portal and
+ * we do *not* want to lock that). So the consequence is that it is safe for
+ * user callbacks to call into any Qman API *except* qman_poll() (as that's the
+ * sole API that could be invoking the callback through this function).
+ */
+static inline unsigned int __poll_portal_fast(struct qman_portal *p,
+ unsigned int poll_limit)
+{
+ const struct qm_dqrr_entry *dq;
+ struct qman_fq *fq;
+ enum qman_cb_dqrr_result res;
+ unsigned int limit = 0;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ struct qm_dqrr_entry *shadow;
+ const struct qm_dqrr_entry *orig_dq;
+#endif
+loop:
+ qm_dqrr_pvb_update(&p->p);
+ dq = qm_dqrr_current(&p->p);
+ if (!dq)
+ goto done;
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ /* If running on an LE system the fields of the
+ dequeue entry must be swapped. Because the
+ QMan HW will ignore writes the DQRR entry is
+ copied and the index stored within the copy */
+ shadow = &p->shadow_dqrr[DQRR_PTR2IDX(dq)];
+ /* Use safe copy here to avoid WRAP transaction */
+ safe_copy_dqrr(shadow, dq);
+ orig_dq = dq;
+ dq = shadow;
+ shadow->fqid = be32_to_cpu(shadow->fqid);
+ shadow->contextB = be32_to_cpu(shadow->contextB);
+ shadow->seqnum = be16_to_cpu(shadow->seqnum);
+ hw_fd_to_cpu(&shadow->fd);
+#endif
+ if (dq->stat & QM_DQRR_STAT_UNSCHEDULED) {
+ /* VDQCR: don't trust contextB as the FQ may have been
+ * configured for h/w consumption and we're draining it
+ * post-retirement. */
+ fq = p->vdqcr_owned;
+ /* We only set QMAN_FQ_STATE_NE when retiring, so we only need
+ * to check for clearing it when doing volatile dequeues. It's
+ * one less thing to check in the critical path (SDQCR). */
+ if (dq->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_clear(fq, QMAN_FQ_STATE_NE);
+ /* this is duplicated from the SDQCR code, but we have stuff to
+ * do before *and* after this callback, and we don't want
+ * multiple if()s in the critical path (SDQCR). */
+ res = fq->cb.dqrr(p, fq, dq);
+ if (res == qman_cb_dqrr_stop)
+ goto done;
+ /* Check for VDQCR completion */
+ if (dq->stat & QM_DQRR_STAT_DQCR_EXPIRED)
+ clear_vdqcr(p, fq);
+ } else {
+ /* SDQCR: contextB points to the FQ */
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ fq = get_fq_table_entry(dq->contextB);
+#else
+ fq = (void *)(uintptr_t)dq->contextB;
+#endif
+ /* Now let the callback do its stuff */
+ res = fq->cb.dqrr(p, fq, dq);
+
+ /* The callback can request that we exit without consuming this
+ * entry nor advancing; */
+ if (res == qman_cb_dqrr_stop)
+ goto done;
+ }
+ /* Interpret 'dq' from a driver perspective. */
+ /* Parking isn't possible unless HELDACTIVE was set. NB,
+ * FORCEELIGIBLE implies HELDACTIVE, so we only need to
+ * check for HELDACTIVE to cover both. */
+ DPA_ASSERT((dq->stat & QM_DQRR_STAT_FQ_HELDACTIVE) ||
+ (res != qman_cb_dqrr_park));
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, orig_dq,
+ (res == qman_cb_dqrr_park));
+#else
+ /* Defer just means "skip it, I'll consume it myself later on" */
+ if (res != qman_cb_dqrr_defer)
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, (res == qman_cb_dqrr_park));
+#endif
+ /* Move forward */
+ qm_dqrr_next(&p->p);
+ /* Entry processed and consumed, increment our counter. The callback can
+ * request that we exit after consuming the entry, and we also exit if
+ * we reach our processing limit, so loop back only if neither of these
+ * conditions is met. */
+ if ((++limit < poll_limit) && (res != qman_cb_dqrr_consume_stop))
+ goto loop;
+done:
+ return limit;
+}
+
+u32 qman_irqsource_get(void)
+{
+ /* "irqsource" and "poll" APIs mustn't redirect when sharing, they
+ * should shut the user out if they are not the primary CPU hosting the
+ * portal. That's why we use the "raw" interface. */
+ struct qman_portal *p = get_raw_affine_portal();
+ u32 ret = p->irq_sources & QM_PIRQ_VISIBLE;
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_get);
+
+int qman_p_irqsource_add(struct qman_portal *p, u32 bits __maybe_unused)
+{
+ __maybe_unused unsigned long irqflags;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (p->sharing_redirect)
+ return -EINVAL;
+ else
+#endif
+ {
+ bits = bits & QM_PIRQ_VISIBLE;
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ /* Clear any previously remaining interrupt conditions in
+ * QCSP_ISR. This prevents raising a false interrupt when
+ * interrupt conditions are enabled in QCSP_IER.
+ */
+ qm_isr_status_clear(&p->p, bits);
+ set_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_irqsource_add);
+
+int qman_irqsource_add(u32 bits __maybe_unused)
+{
+ struct qman_portal *p = get_raw_affine_portal();
+ int ret;
+ ret = qman_p_irqsource_add(p, bits);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_add);
+
+int qman_p_irqsource_remove(struct qman_portal *p, u32 bits)
+{
+ __maybe_unused unsigned long irqflags;
+ u32 ier;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (p->sharing_redirect) {
+ put_affine_portal();
+ return -EINVAL;
+ }
+#endif
+ /* Our interrupt handler only processes+clears status register bits that
+ * are in p->irq_sources. As we're trimming that mask, if one of them
+ * were to assert in the status register just before we remove it from
+ * the enable register, there would be an interrupt-storm when we
+ * release the IRQ lock. So we wait for the enable register update to
+ * take effect in h/w (by reading it back) and then clear all other bits
+ * in the status register. Ie. we clear them from ISR once it's certain
+ * IER won't allow them to reassert. */
+ PORTAL_IRQ_LOCK(p, irqflags);
+ bits &= QM_PIRQ_VISIBLE;
+ clear_bits(bits, &p->irq_sources);
+ qm_isr_enable_write(&p->p, p->irq_sources);
+
+ ier = qm_isr_enable_read(&p->p);
+ /* Using "~ier" (rather than "bits" or "~p->irq_sources") creates a
+ * data-dependency, ie. to protect against re-ordering. */
+ qm_isr_status_clear(&p->p, ~ier);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_irqsource_remove);
+
+int qman_irqsource_remove(u32 bits)
+{
+ struct qman_portal *p = get_raw_affine_portal();
+ int ret;
+ ret = qman_p_irqsource_remove(p, bits);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_irqsource_remove);
+
+const cpumask_t *qman_affine_cpus(void)
+{
+ return &affine_mask;
+}
+EXPORT_SYMBOL(qman_affine_cpus);
+
+u16 qman_affine_channel(int cpu)
+{
+ if (cpu < 0) {
+ struct qman_portal *portal = get_raw_affine_portal();
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ BUG_ON(portal->sharing_redirect);
+#endif
+ cpu = portal->config->public_cfg.cpu;
+ put_affine_portal();
+ }
+ BUG_ON(!cpumask_test_cpu(cpu, &affine_mask));
+ return affine_channels[cpu];
+}
+EXPORT_SYMBOL(qman_affine_channel);
+
+void *qman_get_affine_portal(int cpu)
+{
+ return affine_portals[cpu];
+}
+EXPORT_SYMBOL(qman_get_affine_portal);
+
+int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
+{
+ int ret;
+
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (unlikely(p->sharing_redirect))
+ ret = -EINVAL;
+ else
+#endif
+ {
+ BUG_ON(p->irq_sources & QM_PIRQ_DQRI);
+ ret = __poll_portal_fast(p, limit);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_dqrr);
+
+int qman_poll_dqrr(unsigned int limit)
+{
+ struct qman_portal *p = get_poll_portal();
+ int ret;
+ ret = qman_p_poll_dqrr(p, limit);
+ put_poll_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_poll_dqrr);
+
+u32 qman_p_poll_slow(struct qman_portal *p)
+{
+ u32 ret;
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (unlikely(p->sharing_redirect))
+ ret = (u32)-1;
+ else
+#endif
+ {
+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+ ret = __poll_portal_slow(p, is);
+ qm_isr_status_clear(&p->p, ret);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(qman_p_poll_slow);
+
+u32 qman_poll_slow(void)
+{
+ struct qman_portal *p = get_poll_portal();
+ u32 ret;
+ ret = qman_p_poll_slow(p);
+ put_poll_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_poll_slow);
+
+/* Legacy wrapper */
+void qman_p_poll(struct qman_portal *p)
+{
+#ifdef CONFIG_FSL_DPA_PORTAL_SHARE
+ if (unlikely(p->sharing_redirect))
+ return;
+#endif
+ if ((~p->irq_sources) & QM_PIRQ_SLOW) {
+ if (!(p->slowpoll--)) {
+ u32 is = qm_isr_status_read(&p->p) & ~p->irq_sources;
+ u32 active = __poll_portal_slow(p, is);
+ if (active) {
+ qm_isr_status_clear(&p->p, active);
+ p->slowpoll = SLOW_POLL_BUSY;
+ } else
+ p->slowpoll = SLOW_POLL_IDLE;
+ }
+ }
+ if ((~p->irq_sources) & QM_PIRQ_DQRI)
+ __poll_portal_fast(p, CONFIG_FSL_QMAN_POLL_LIMIT);
+}
+EXPORT_SYMBOL(qman_p_poll);
+
+void qman_poll(void)
+{
+ struct qman_portal *p = get_poll_portal();
+ qman_p_poll(p);
+ put_poll_portal();
+}
+EXPORT_SYMBOL(qman_poll);
+
+void qman_p_stop_dequeues(struct qman_portal *p)
+{
+ qman_stop_dequeues_ex(p);
+}
+EXPORT_SYMBOL(qman_p_stop_dequeues);
+
+void qman_stop_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ qman_p_stop_dequeues(p);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_stop_dequeues);
+
+void qman_p_start_dequeues(struct qman_portal *p)
+{
+ unsigned long irqflags __maybe_unused;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ DPA_ASSERT(p->dqrr_disable_ref > 0);
+ if (!(--p->dqrr_disable_ref))
+ qm_dqrr_set_maxfill(&p->p, DQRR_MAXFILL);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_start_dequeues);
+
+void qman_start_dequeues(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ qman_p_start_dequeues(p);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_start_dequeues);
+
+void qman_p_static_dequeue_add(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags __maybe_unused;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ pools &= p->config->public_cfg.pools;
+ p->sdqcr |= pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_add);
+
+void qman_static_dequeue_add(u32 pools)
+{
+ struct qman_portal *p = get_affine_portal();
+ qman_p_static_dequeue_add(p, pools);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_static_dequeue_add);
+
+void qman_p_static_dequeue_del(struct qman_portal *p, u32 pools)
+{
+ unsigned long irqflags __maybe_unused;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ pools &= p->config->public_cfg.pools;
+ p->sdqcr &= ~pools;
+ qm_dqrr_sdqcr_set(&p->p, p->sdqcr);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_del);
+
+void qman_static_dequeue_del(u32 pools)
+{
+ struct qman_portal *p = get_affine_portal();
+ qman_p_static_dequeue_del(p, pools);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_static_dequeue_del);
+
+u32 qman_p_static_dequeue_get(struct qman_portal *p)
+{
+ return p->sdqcr;
+}
+EXPORT_SYMBOL(qman_p_static_dequeue_get);
+
+u32 qman_static_dequeue_get(void)
+{
+ struct qman_portal *p = get_affine_portal();
+ u32 ret = qman_p_static_dequeue_get(p);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_static_dequeue_get);
+
+void qman_p_dca(struct qman_portal *p, struct qm_dqrr_entry *dq,
+ int park_request)
+{
+ qm_dqrr_cdc_consume_1ptr(&p->p, dq, park_request);
+}
+EXPORT_SYMBOL(qman_p_dca);
+
+void qman_dca(struct qm_dqrr_entry *dq, int park_request)
+{
+ struct qman_portal *p = get_affine_portal();
+ qman_p_dca(p, dq, park_request);
+ put_affine_portal();
+}
+EXPORT_SYMBOL(qman_dca);
+
+/*******************/
+/* Frame queue API */
+/*******************/
+
+static const char *mcr_result_str(u8 result)
+{
+ switch (result) {
+ case QM_MCR_RESULT_NULL:
+ return "QM_MCR_RESULT_NULL";
+ case QM_MCR_RESULT_OK:
+ return "QM_MCR_RESULT_OK";
+ case QM_MCR_RESULT_ERR_FQID:
+ return "QM_MCR_RESULT_ERR_FQID";
+ case QM_MCR_RESULT_ERR_FQSTATE:
+ return "QM_MCR_RESULT_ERR_FQSTATE";
+ case QM_MCR_RESULT_ERR_NOTEMPTY:
+ return "QM_MCR_RESULT_ERR_NOTEMPTY";
+ case QM_MCR_RESULT_PENDING:
+ return "QM_MCR_RESULT_PENDING";
+ case QM_MCR_RESULT_ERR_BADCOMMAND:
+ return "QM_MCR_RESULT_ERR_BADCOMMAND";
+ }
+ return "<unknown MCR result>";
+}
+
+int qman_create_fq(u32 fqid, u32 flags, struct qman_fq *fq)
+{
+ struct qm_fqd fqd;
+ struct qm_mcr_queryfq_np np;
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID) {
+ int ret = qman_alloc_fqid(&fqid);
+ if (ret)
+ return ret;
+ }
+ spin_lock_init(&fq->fqlock);
+ fq->fqid = fqid;
+ fq->flags = flags;
+ fq->state = qman_fq_state_oos;
+ fq->cgr_groupid = 0;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
+ return -ENOMEM;
+#endif
+ if (!(flags & QMAN_FQ_FLAG_AS_IS) || (flags & QMAN_FQ_FLAG_NO_MODIFY))
+ return 0;
+ /* Everything else is AS_IS support */
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(&fqd);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYFQ_NP);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("QUERYFQ_NP failed: %s\n", mcr_result_str(mcr->result));
+ goto err;
+ }
+ np = mcr->queryfq_np;
+ /* Phew, have queryfq and queryfq_np results, stitch together
+ * the FQ object from those. */
+ fq->cgr_groupid = fqd.cgid;
+ switch (np.state & QM_MCR_NP_STATE_MASK) {
+ case QM_MCR_NP_STATE_OOS:
+ break;
+ case QM_MCR_NP_STATE_RETIRED:
+ fq->state = qman_fq_state_retired;
+ if (np.frm_cnt)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ break;
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ fq->state = qman_fq_state_sched;
+ if (np.state & QM_MCR_NP_STATE_R)
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ break;
+ case QM_MCR_NP_STATE_PARKED:
+ fq->state = qman_fq_state_parked;
+ break;
+ default:
+ DPA_ASSERT(NULL == "invalid FQ state");
+ }
+ if (fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq->state |= QMAN_FQ_STATE_CGR_EN;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+err:
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (flags & QMAN_FQ_FLAG_DYNAMIC_FQID)
+ qman_release_fqid(fqid);
+ return -EIO;
+}
+EXPORT_SYMBOL(qman_create_fq);
+
+void qman_destroy_fq(struct qman_fq *fq, u32 flags __maybe_unused)
+{
+
+ /* We don't need to lock the FQ as it is a pre-condition that the FQ be
+ * quiesced. Instead, run some checks. */
+ switch (fq->state) {
+ case qman_fq_state_parked:
+ DPA_ASSERT(flags & QMAN_FQ_DESTROY_PARKED);
+ case qman_fq_state_oos:
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
+ qman_release_fqid(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ clear_fq_table_entry(fq->key);
+#endif
+ return;
+ default:
+ break;
+ }
+ DPA_ASSERT(NULL == "qman_free_fq() on unquiesced FQ!");
+}
+EXPORT_SYMBOL(qman_destroy_fq);
+
+u32 qman_fq_fqid(struct qman_fq *fq)
+{
+ return fq->fqid;
+}
+EXPORT_SYMBOL(qman_fq_fqid);
+
+void qman_fq_state(struct qman_fq *fq, enum qman_fq_state *state, u32 *flags)
+{
+ if (state)
+ *state = fq->state;
+ if (flags)
+ *flags = fq->flags;
+}
+EXPORT_SYMBOL(qman_fq_state);
+
+int qman_init_fq(struct qman_fq *fq, u32 flags, struct qm_mcc_initfq *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res, myverb = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ QM_MCC_VERB_INITFQ_SCHED : QM_MCC_VERB_INITFQ_PARKED;
+
+ if ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked))
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ if (opts && (opts->we_mask & QM_INITFQ_WE_OAC)) {
+ /* And can't be set at the same time as TDTHRESH */
+ if (opts->we_mask & QM_INITFQ_WE_TDTHRESH)
+ return -EINVAL;
+ }
+ /* Issue an INITFQ_[PARKED|SCHED] management command */
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ ((fq->state != qman_fq_state_oos) &&
+ (fq->state != qman_fq_state_parked)))) {
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return -EBUSY;
+ }
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initfq = *opts;
+ mcc->initfq.fqid = cpu_to_be32(fq->fqid);
+ mcc->initfq.count = 0;
+
+ /* If the FQ does *not* have the TO_DCPORTAL flag, contextB is set as a
+ * demux pointer. Otherwise, the caller-provided value is allowed to
+ * stand, don't overwrite it. */
+ if (fq_isclear(fq, QMAN_FQ_FLAG_TO_DCPORTAL)) {
+ dma_addr_t phys_fq;
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTB;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ mcc->initfq.fqd.context_b = fq->key;
+#else
+ mcc->initfq.fqd.context_b = (u32)(uintptr_t)fq;
+#endif
+ /* and the physical address - NB, if the user wasn't trying to
+ * set CONTEXTA, clear the stashing settings. */
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_CONTEXTA)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_CONTEXTA;
+ memset(&mcc->initfq.fqd.context_a, 0,
+ sizeof(mcc->initfq.fqd.context_a));
+ } else {
+ phys_fq = dma_map_single(&p->pdev->dev, fq, sizeof(*fq),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&p->pdev->dev, phys_fq)) {
+ dev_err(&p->pdev->dev,
+ "dma_map_single failed for fqid: %u\n",
+ fq->fqid);
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return -EIO;
+ }
+
+ qm_fqd_stashing_set64(&mcc->initfq.fqd, phys_fq);
+ }
+ }
+ if (flags & QMAN_INITFQ_FLAG_LOCAL) {
+ mcc->initfq.fqd.dest.channel = p->config->public_cfg.channel;
+ if (!(mcc->initfq.we_mask & QM_INITFQ_WE_DESTWQ)) {
+ mcc->initfq.we_mask |= QM_INITFQ_WE_DESTWQ;
+ mcc->initfq.fqd.dest.wq = 4;
+ }
+ }
+ mcc->initfq.we_mask = cpu_to_be16(mcc->initfq.we_mask);
+ cpu_to_hw_fqd(&mcc->initfq.fqd);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return -EIO;
+ }
+ if (opts) {
+ if (opts->we_mask & QM_INITFQ_WE_FQCTRL) {
+ if (opts->fqd.fq_ctrl & QM_FQCTRL_CGE)
+ fq_set(fq, QMAN_FQ_STATE_CGR_EN);
+ else
+ fq_clear(fq, QMAN_FQ_STATE_CGR_EN);
+ }
+ if (opts->we_mask & QM_INITFQ_WE_CGID)
+ fq->cgr_groupid = opts->fqd.cgid;
+ }
+ fq->state = (flags & QMAN_INITFQ_FLAG_SCHED) ?
+ qman_fq_state_sched : qman_fq_state_parked;
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+}
+EXPORT_SYMBOL(qman_init_fq);
+
+int qman_schedule_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_parked)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTERFQ_SCHED management command */
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state != qman_fq_state_parked))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_SCHED);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_SCHED);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_sched;
+out:
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_schedule_fq);
+
+int qman_retire_fq(struct qman_fq *fq, u32 *flags)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int rval;
+ u8 res;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_sched))
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_oos))) {
+ rval = -EBUSY;
+ goto out;
+ }
+ rval = table_push_fq(p, fq);
+ if (rval)
+ goto out;
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_RETIRE);
+ res = mcr->result;
+ /* "Elegant" would be to treat OK/PENDING the same way; set CHANGING,
+ * and defer the flags until FQRNI or FQRN (respectively) show up. But
+ * "Friendly" is to process OK immediately, and not set CHANGING. We do
+ * friendly, otherwise the caller doesn't necessarily have a fully
+ * "retired" FQ on return even if the retirement was immediate. However
+ * this does mean some code duplication between here and
+ * fq_state_change(). */
+ if (likely(res == QM_MCR_RESULT_OK)) {
+ rval = 0;
+ /* Process 'fq' right away, we'll ignore FQRNI */
+ if (mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY)
+ fq_set(fq, QMAN_FQ_STATE_NE);
+ if (mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)
+ fq_set(fq, QMAN_FQ_STATE_ORL);
+ else
+ table_del_fq(p, fq);
+ if (flags)
+ *flags = fq->flags;
+ fq->state = qman_fq_state_retired;
+ if (fq->cb.fqs) {
+ /* Another issue with supporting "immediate" retirement
+ * is that we're forced to drop FQRNIs, because by the
+ * time they're seen it may already be "too late" (the
+ * fq may have been OOS'd and free()'d already). But if
+ * the upper layer wants a callback whether it's
+ * immediate or not, we have to fake a "MR" entry to
+ * look like an FQRNI... */
+ struct qm_mr_entry msg;
+ msg.verb = QM_MR_VERB_FQRNI;
+ msg.fq.fqs = mcr->alterfq.fqs;
+ msg.fq.fqid = fq->fqid;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ msg.fq.contextB = fq->key;
+#else
+ msg.fq.contextB = (u32)(uintptr_t)fq;
+#endif
+ fq->cb.fqs(p, fq, &msg);
+ }
+ } else if (res == QM_MCR_RESULT_PENDING) {
+ rval = 1;
+ fq_set(fq, QMAN_FQ_STATE_CHANGING);
+ } else {
+ rval = -EIO;
+ table_del_fq(p, fq);
+ }
+out:
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return rval;
+}
+EXPORT_SYMBOL(qman_retire_fq);
+
+int qman_oos_fq(struct qman_fq *fq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int ret = 0;
+ u8 res;
+
+ if (fq->state != qman_fq_state_retired)
+ return -EINVAL;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_BLOCKOOS)) ||
+ (fq->state != qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_ALTER_OOS);
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+ fq->state = qman_fq_state_oos;
+out:
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_oos_fq);
+
+int qman_fq_flow_control(struct qman_fq *fq, int xon)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int ret = 0;
+ u8 res;
+ u8 myverb;
+
+ if ((fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired) ||
+ (fq->state == qman_fq_state_parked))
+ return -EINVAL;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (unlikely(fq_isset(fq, QMAN_FQ_FLAG_NO_MODIFY)))
+ return -EINVAL;
+#endif
+ /* Issue a ALTER_FQXON or ALTER_FQXOFF management command */
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ FQLOCK(fq);
+ if (unlikely((fq_isset(fq, QMAN_FQ_STATE_CHANGING)) ||
+ (fq->state == qman_fq_state_parked) ||
+ (fq->state == qman_fq_state_oos) ||
+ (fq->state == qman_fq_state_retired))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ mcc = qm_mc_start(&p->p);
+ mcc->alterfq.fqid = fq->fqid;
+ mcc->alterfq.count = 0;
+ myverb = xon ? QM_MCC_VERB_ALTER_FQXON : QM_MCC_VERB_ALTER_FQXOFF;
+
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ ret = -EIO;
+ goto out;
+ }
+out:
+ FQUNLOCK(fq);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_fq_flow_control);
+
+int qman_query_fq(struct qman_fq *fq, struct qm_fqd *fqd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *fqd = mcr->queryfq.fqd;
+ hw_fqd_to_cpu(fqd);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_fq);
+
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->queryfq.fqid = cpu_to_be32(fq->fqid);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ *np = mcr->queryfq_np;
+ np->fqd_link = be24_to_cpu(np->fqd_link);
+ np->odp_seq = be16_to_cpu(np->odp_seq);
+ np->orp_nesn = be16_to_cpu(np->orp_nesn);
+ np->orp_ea_hseq = be16_to_cpu(np->orp_ea_hseq);
+ np->orp_ea_tseq = be16_to_cpu(np->orp_ea_tseq);
+ np->orp_ea_hptr = be24_to_cpu(np->orp_ea_hptr);
+ np->orp_ea_tptr = be24_to_cpu(np->orp_ea_tptr);
+ np->pfdr_hptr = be24_to_cpu(np->pfdr_hptr);
+ np->pfdr_tptr = be24_to_cpu(np->pfdr_tptr);
+ np->ics_surp = be16_to_cpu(np->ics_surp);
+ np->byte_cnt = be32_to_cpu(np->byte_cnt);
+ np->frm_cnt = be24_to_cpu(np->frm_cnt);
+ np->ra1_sfdr = be16_to_cpu(np->ra1_sfdr);
+ np->ra2_sfdr = be16_to_cpu(np->ra2_sfdr);
+ np->od1_sfdr = be16_to_cpu(np->od1_sfdr);
+ np->od2_sfdr = be16_to_cpu(np->od2_sfdr);
+ np->od3_sfdr = be16_to_cpu(np->od3_sfdr);
+ }
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res == QM_MCR_RESULT_ERR_FQID)
+ return -ERANGE;
+ else if (res != QM_MCR_RESULT_OK)
+ return -EIO;
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_fq_np);
+
+int qman_query_wq(u8 query_dedicated, struct qm_mcr_querywq *wq)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res, myverb;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ myverb = (query_dedicated) ? QM_MCR_VERB_QUERYWQ_DEDICATED :
+ QM_MCR_VERB_QUERYWQ;
+ mcc = qm_mc_start(&p->p);
+ mcc->querywq.channel.id = cpu_to_be16(wq->channel.id);
+ qm_mc_commit(&p->p, myverb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == myverb);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ int i, array_len;
+ wq->channel.id = be16_to_cpu(mcr->querywq.channel.id);
+ array_len = ARRAY_SIZE(mcr->querywq.wq_len);
+ for (i = 0; i < array_len; i++)
+ wq->wq_len[i] = be32_to_cpu(mcr->querywq.wq_len[i]);
+ }
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERYWQ failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_wq);
+
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->cgrtestwrite.cgid = cgr->cgrid;
+ mcc->cgrtestwrite.i_bcnt_hi = (u8)(i_bcnt >> 32);
+ mcc->cgrtestwrite.i_bcnt_lo = (u32)i_bcnt;
+ qm_mc_commit(&p->p, QM_MCC_VERB_CGRTESTWRITE);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_CGRTESTWRITE);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *result = mcr->cgrtestwrite;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CGR TEST WRITE failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_testwrite_cgr);
+
+int qman_query_cgr(struct qman_cgr *cgr, struct qm_mcr_querycgr *cgrd)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+ int i;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ mcc->querycgr.cgid = cgr->cgrid;
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCGR);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCC_VERB_QUERYCGR);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *cgrd = mcr->querycgr;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CGR failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+ cgrd->cgr.wr_parm_g.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_g.word);
+ cgrd->cgr.wr_parm_y.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_y.word);
+ cgrd->cgr.wr_parm_r.word =
+ be32_to_cpu(cgrd->cgr.wr_parm_r.word);
+ cgrd->cgr.cscn_targ = be32_to_cpu(cgrd->cgr.cscn_targ);
+ cgrd->cgr.__cs_thres = be16_to_cpu(cgrd->cgr.__cs_thres);
+ for (i = 0; i < ARRAY_SIZE(cgrd->cscn_targ_swp); i++)
+ be32_to_cpus(&cgrd->cscn_targ_swp[i]);
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_cgr);
+
+int qman_query_congestion(struct qm_mcr_querycongestion *congestion)
+{
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+ int i;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ qm_mc_start(&p->p);
+ qm_mc_commit(&p->p, QM_MCC_VERB_QUERYCONGESTION);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCC_VERB_QUERYCONGESTION);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ memcpy_fromio(congestion, &mcr->querycongestion,
+ sizeof(*congestion));
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("QUERY_CONGESTION failed: %s\n", mcr_result_str(res));
+ return -EIO;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(congestion->state.__state); i++)
+ be32_to_cpus(&congestion->state.__state[i]);
+ return 0;
+}
+EXPORT_SYMBOL(qman_query_congestion);
+
+/* internal function used as a wait_event() expression */
+static int set_p_vdqcr(struct qman_portal *p, struct qman_fq *fq, u32 vdqcr)
+{
+ unsigned long irqflags __maybe_unused;
+ int ret = -EBUSY;
+ PORTAL_IRQ_LOCK(p, irqflags);
+ if (!p->vdqcr_owned) {
+ FQLOCK(fq);
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ goto escape;
+ fq_set(fq, QMAN_FQ_STATE_VDQCR);
+ FQUNLOCK(fq);
+ p->vdqcr_owned = fq;
+ ret = 0;
+ }
+escape:
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ if (!ret)
+ qm_dqrr_vdqcr_set(&p->p, vdqcr);
+ return ret;
+}
+
+static int set_vdqcr(struct qman_portal **p, struct qman_fq *fq, u32 vdqcr)
+{
+ int ret;
+ *p = get_affine_portal();
+ ret = set_p_vdqcr(*p, fq, vdqcr);
+ put_affine_portal();
+ return ret;
+}
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+static int wait_p_vdqcr_start(struct qman_portal *p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !(ret = set_p_vdqcr(p, fq, vdqcr)));
+ else
+ wait_event(affine_queue, !(ret = set_p_vdqcr(p, fq, vdqcr)));
+ return ret;
+}
+
+static int wait_vdqcr_start(struct qman_portal **p, struct qman_fq *fq,
+ u32 vdqcr, u32 flags)
+{
+ int ret = 0;
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ ret = wait_event_interruptible(affine_queue,
+ !(ret = set_vdqcr(p, fq, vdqcr)));
+ else
+ wait_event(affine_queue, !(ret = set_vdqcr(p, fq, vdqcr)));
+ return ret;
+}
+#endif
+
+int qman_p_volatile_dequeue(struct qman_portal *p, struct qman_fq *fq,
+ u32 flags __maybe_unused, u32 vdqcr)
+{
+ int ret;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired))
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_p_vdqcr_start(p, fq, vdqcr, flags);
+ else
+#endif
+ ret = set_p_vdqcr(p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /* NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue. */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_volatile_dequeue);
+
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags __maybe_unused,
+ u32 vdqcr)
+{
+ struct qman_portal *p;
+ int ret;
+
+ if ((fq->state != qman_fq_state_parked) &&
+ (fq->state != qman_fq_state_retired))
+ return -EINVAL;
+ if (vdqcr & QM_VDQCR_FQID_MASK)
+ return -EINVAL;
+ if (fq_isset(fq, QMAN_FQ_STATE_VDQCR))
+ return -EBUSY;
+ vdqcr = (vdqcr & ~QM_VDQCR_FQID_MASK) | fq->fqid;
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_WAIT)
+ ret = wait_vdqcr_start(&p, fq, vdqcr, flags);
+ else
+#endif
+ ret = set_vdqcr(&p, fq, vdqcr);
+ if (ret)
+ return ret;
+ /* VDQCR is set */
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_VOLATILE_FLAG_FINISH) {
+ if (flags & QMAN_VOLATILE_FLAG_WAIT_INT)
+ /* NB: don't propagate any error - the caller wouldn't
+ * know whether the VDQCR was issued or not. A signal
+ * could arrive after returning anyway, so the caller
+ * can check signal_pending() if that's an issue. */
+ wait_event_interruptible(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ else
+ wait_event(affine_queue,
+ !fq_isset(fq, QMAN_FQ_STATE_VDQCR));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_volatile_dequeue);
+
+static noinline void update_eqcr_ci(struct qman_portal *p, u8 avail)
+{
+ if (avail)
+ qm_eqcr_cce_prefetch(&p->p);
+ else
+ qm_eqcr_cce_update(&p->p);
+}
+
+int qman_eqcr_is_empty(void)
+{
+ unsigned long irqflags __maybe_unused;
+ struct qman_portal *p = get_affine_portal();
+ u8 avail;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ update_eqcr_ci(p, 0);
+ avail = qm_eqcr_get_fill(&p->p);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return avail == 0;
+}
+EXPORT_SYMBOL(qman_eqcr_is_empty);
+
+void qman_set_dc_ern(qman_cb_dc_ern handler, int affine)
+{
+ if (affine) {
+ unsigned long irqflags __maybe_unused;
+ struct qman_portal *p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ p->cb_dc_ern = handler;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ } else
+ cb_dc_ern = handler;
+}
+EXPORT_SYMBOL(qman_set_dc_ern);
+
+static inline struct qm_eqcr_entry *try_p_eq_start(struct qman_portal *p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ u8 avail;
+ PORTAL_IRQ_LOCK(p, (*irqflags));
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (p->eqci_owned) {
+ PORTAL_IRQ_UNLOCK(p, (*irqflags));
+ return NULL;
+ }
+ p->eqci_owned = fq;
+ }
+#endif
+ if (p->use_eqcr_ci_stashing) {
+ /*
+ * The stashing case is easy, only update if we need to in
+ * order to try and liberate ring entries.
+ */
+ eq = qm_eqcr_start_stash(&p->p);
+ } else {
+ /*
+ * The non-stashing case is harder, need to prefetch ahead of
+ * time.
+ */
+ avail = qm_eqcr_get_avail(&p->p);
+ if (avail < 2)
+ update_eqcr_ci(p, avail);
+ eq = qm_eqcr_start_no_stash(&p->p);
+ }
+
+ if (unlikely(!eq)) {
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC)))
+ p->eqci_owned = NULL;
+#endif
+ PORTAL_IRQ_UNLOCK(p, (*irqflags));
+ return NULL;
+ }
+ if (flags & QMAN_ENQUEUE_FLAG_DCA)
+ eq->dca = QM_EQCR_DCA_ENABLE |
+ ((flags & QMAN_ENQUEUE_FLAG_DCA_PARK) ?
+ QM_EQCR_DCA_PARK : 0) |
+ ((flags >> 8) & QM_EQCR_DCA_IDXMASK);
+ eq->fqid = cpu_to_be32(fq->fqid);
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ eq->tag = cpu_to_be32(fq->key);
+#else
+ eq->tag = cpu_to_be32((u32)(uintptr_t)fq);
+#endif
+ eq->fd = *fd;
+ cpu_to_hw_fd(&eq->fd);
+ return eq;
+}
+
+static inline struct qm_eqcr_entry *try_eq_start(struct qman_portal **p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ *p = get_affine_portal();
+ eq = try_p_eq_start(*p, irqflags, fq, fd, flags);
+ if (!eq)
+ put_affine_portal();
+ return eq;
+}
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+static noinline struct qm_eqcr_entry *__wait_eq_start(struct qman_portal **p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq = try_eq_start(p, irqflags, fq, fd, flags);
+ if (!eq)
+ qm_eqcr_set_ithresh(&(*p)->p, EQCR_ITHRESH);
+ return eq;
+}
+static noinline struct qm_eqcr_entry *wait_eq_start(struct qman_portal **p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ /* NB: return NULL if signal occurs before completion. Signal
+ * can occur during return. Caller must check for signal */
+ wait_event_interruptible(affine_queue,
+ (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
+ else
+ wait_event(affine_queue,
+ (eq = __wait_eq_start(p, irqflags, fq, fd, flags)));
+ return eq;
+}
+static noinline struct qm_eqcr_entry *__wait_p_eq_start(struct qman_portal *p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq = try_p_eq_start(p, irqflags, fq, fd, flags);
+ if (!eq)
+ qm_eqcr_set_ithresh(&p->p, EQCR_ITHRESH);
+ return eq;
+}
+static noinline struct qm_eqcr_entry *wait_p_eq_start(struct qman_portal *p,
+ unsigned long *irqflags __maybe_unused,
+ struct qman_fq *fq,
+ const struct qm_fd *fd,
+ u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ /* NB: return NULL if signal occurs before completion. Signal
+ * can occur during return. Caller must check for signal */
+ wait_event_interruptible(affine_queue,
+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+ else
+ wait_event(affine_queue,
+ (eq = __wait_p_eq_start(p, irqflags, fq, fd, flags)));
+ return eq;
+}
+#endif
+
+int qman_p_enqueue(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags)
+{
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ /* NB: return success even if signal occurs before
+ * condition is true. pvb_commit guarantees success */
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue);
+
+int qman_enqueue(struct qman_fq *fq, const struct qm_fd *fd, u32 flags)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ /* NB: return success even if signal occurs before
+ * condition is true. pvb_commit guarantees success */
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue);
+
+int qman_p_enqueue_orp(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum)
+{
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Process ORP-specifics here */
+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+ else {
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+ else
+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+ }
+ eq->seqnum = cpu_to_be16(orp_seqnum);
+ eq->orp = cpu_to_be32(orp->fqid);
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ /* NB: return success even if signal occurs before
+ * condition is true. pvb_commit guarantees success */
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_orp);
+
+int qman_enqueue_orp(struct qman_fq *fq, const struct qm_fd *fd, u32 flags,
+ struct qman_fq *orp, u16 orp_seqnum)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* Process ORP-specifics here */
+ if (flags & QMAN_ENQUEUE_FLAG_NLIS)
+ orp_seqnum |= QM_EQCR_SEQNUM_NLIS;
+ else {
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NLIS;
+ if (flags & QMAN_ENQUEUE_FLAG_NESN)
+ orp_seqnum |= QM_EQCR_SEQNUM_NESN;
+ else
+ /* No need to check 4 QMAN_ENQUEUE_FLAG_HOLE */
+ orp_seqnum &= ~QM_EQCR_SEQNUM_NESN;
+ }
+ eq->seqnum = cpu_to_be16(orp_seqnum);
+ eq->orp = cpu_to_be32(orp->fqid);
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_ORP |
+ ((flags & (QMAN_ENQUEUE_FLAG_HOLE | QMAN_ENQUEUE_FLAG_NESN)) ?
+ 0 : QM_EQCR_VERB_CMD_ENQUEUE) |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ /* NB: return success even if signal occurs before
+ * condition is true. pvb_commit guarantees success */
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue_orp);
+
+int qman_p_enqueue_precommit(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_fd *fd, u32 flags,
+ qman_cb_precommit cb, void *cb_arg)
+{
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_p_eq_start(p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_p_eq_start(p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* invoke user supplied callback function before writing commit verb */
+ if (cb(cb_arg)) {
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ return -EINVAL;
+ }
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ /* NB: return success even if signal occurs before
+ * condition is true. pvb_commit guarantees success */
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_p_enqueue_precommit);
+
+int qman_enqueue_precommit(struct qman_fq *fq, const struct qm_fd *fd,
+ u32 flags, qman_cb_precommit cb, void *cb_arg)
+{
+ struct qman_portal *p;
+ struct qm_eqcr_entry *eq;
+ unsigned long irqflags __maybe_unused;
+
+#ifdef CONFIG_FSL_DPA_CAN_WAIT
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT)
+ eq = wait_eq_start(&p, &irqflags, fq, fd, flags);
+ else
+#endif
+ eq = try_eq_start(&p, &irqflags, fq, fd, flags);
+ if (!eq)
+ return -EBUSY;
+ /* invoke user supplied callback function before writing commit verb */
+ if (cb(cb_arg)) {
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return -EINVAL;
+ }
+ /* Note: QM_EQCR_VERB_INTERRUPT == QMAN_ENQUEUE_FLAG_WAIT_SYNC */
+ qm_eqcr_pvb_commit(&p->p, QM_EQCR_VERB_CMD_ENQUEUE |
+ (flags & (QM_EQCR_VERB_COLOUR_MASK | QM_EQCR_VERB_INTERRUPT)));
+ /* Factor the below out, it's used from qman_enqueue_orp() too */
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+#ifdef CONFIG_FSL_DPA_CAN_WAIT_SYNC
+ if (unlikely((flags & QMAN_ENQUEUE_FLAG_WAIT) &&
+ (flags & QMAN_ENQUEUE_FLAG_WAIT_SYNC))) {
+ if (flags & QMAN_ENQUEUE_FLAG_WAIT_INT)
+ /* NB: return success even if signal occurs before
+ * condition is true. pvb_commit guarantees success */
+ wait_event_interruptible(affine_queue,
+ (p->eqci_owned != fq));
+ else
+ wait_event(affine_queue, (p->eqci_owned != fq));
+ }
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_enqueue_precommit);
+
+int qman_modify_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+ u8 verb = QM_MCC_VERB_MODIFYCGR;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+ mcc = qm_mc_start(&p->p);
+ if (opts)
+ mcc->initcgr = *opts;
+ mcc->initcgr.we_mask = cpu_to_be16(mcc->initcgr.we_mask);
+ mcc->initcgr.cgr.wr_parm_g.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_g.word);
+ mcc->initcgr.cgr.wr_parm_y.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_y.word);
+ mcc->initcgr.cgr.wr_parm_r.word =
+ cpu_to_be32(mcc->initcgr.cgr.wr_parm_r.word);
+ mcc->initcgr.cgr.cscn_targ = cpu_to_be32(mcc->initcgr.cgr.cscn_targ);
+ mcc->initcgr.cgr.__cs_thres = cpu_to_be16(mcc->initcgr.cgr.__cs_thres);
+
+ mcc->initcgr.cgid = cgr->cgrid;
+ if (flags & QMAN_CGR_FLAG_USE_INIT)
+ verb = QM_MCC_VERB_INITCGR;
+ qm_mc_commit(&p->p, verb);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == verb);
+ res = mcr->result;
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return (res == QM_MCR_RESULT_OK) ? 0 : -EIO;
+}
+EXPORT_SYMBOL(qman_modify_cgr);
+
+#define TARG_MASK(n) (0x80000000 >> (n->config->public_cfg.channel - \
+ QM_CHANNEL_SWPORTAL0))
+#define TARG_DCP_MASK(n) (0x80000000 >> (10 + n))
+#define PORTAL_IDX(n) (n->config->public_cfg.channel - QM_CHANNEL_SWPORTAL0)
+
+static u8 qman_cgr_cpus[__CGR_NUM];
+
+int qman_create_cgr(struct qman_cgr *cgr, u32 flags,
+ struct qm_mcc_initcgr *opts)
+{
+ unsigned long irqflags __maybe_unused;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret;
+ struct qman_portal *p;
+
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC. */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ preempt_disable();
+ p = get_affine_portal();
+ qman_cgr_cpus[cgr->cgrid] = smp_processor_id();
+ preempt_enable();
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ cgr->chan = p->config->public_cfg.channel;
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+
+ /* if no opts specified, just add it to the list */
+ if (!opts)
+ goto add_list;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ goto release_lock;
+ if (opts)
+ local_opts = *opts;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p);
+ else
+ /* Overwrite TARG */
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_MASK(p);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT, &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ goto release_lock;
+add_list:
+ list_add(&cgr->node, &p->cgr_cbs);
+
+ /* Determine if newly added object requires its callback to be called */
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* we can't go back, so proceed and return success, but screen
+ * and wail to the log file */
+ pr_crit("CGR HW state partially modified\n");
+ ret = 0;
+ goto release_lock;
+ }
+ if (cgr->cb && cgr_state.cgr.cscn_en && qman_cgrs_get(&p->cgrs[1],
+ cgr->cgrid))
+ cgr->cb(p, cgr, 1);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr);
+
+int qman_create_cgr_to_dcp(struct qman_cgr *cgr, u32 flags, u16 dcp_portal,
+ struct qm_mcc_initcgr *opts)
+{
+ unsigned long irqflags __maybe_unused;
+ struct qm_mcc_initcgr local_opts;
+ struct qm_mcr_querycgr cgr_state;
+ int ret;
+
+ if ((qman_ip_rev & 0xFF00) < QMAN_REV30) {
+ pr_warn("This QMan version doesn't support to send CSCN to DCP portal\n");
+ return -EINVAL;
+ }
+ /* We have to check that the provided CGRID is within the limits of the
+ * data-structures, for obvious reasons. However we'll let h/w take
+ * care of determining whether it's within the limits of what exists on
+ * the SoC.
+ */
+ if (cgr->cgrid >= __CGR_NUM)
+ return -EINVAL;
+
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret)
+ return ret;
+
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ if (opts)
+ local_opts = *opts;
+
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl =
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT |
+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_portal;
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ |
+ TARG_DCP_MASK(dcp_portal);
+ local_opts.we_mask |= QM_CGR_WE_CSCN_TARG;
+
+ /* send init if flags indicate so */
+ if (opts && (flags & QMAN_CGR_FLAG_USE_INIT))
+ ret = qman_modify_cgr(cgr, QMAN_CGR_FLAG_USE_INIT,
+ &local_opts);
+ else
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+
+ return ret;
+}
+EXPORT_SYMBOL(qman_create_cgr_to_dcp);
+
+int qman_delete_cgr(struct qman_cgr *cgr)
+{
+ unsigned long irqflags __maybe_unused;
+ struct qm_mcr_querycgr cgr_state;
+ struct qm_mcc_initcgr local_opts;
+ int ret = 0;
+ struct qman_cgr *i;
+ struct qman_portal *p = get_affine_portal();
+
+ if (cgr->chan != p->config->public_cfg.channel) {
+ pr_crit("Attempting to delete cgr from different portal "
+ "than it was create: create 0x%x, delete 0x%x\n",
+ cgr->chan, p->config->public_cfg.channel);
+ ret = -EINVAL;
+ goto put_portal;
+ }
+ memset(&local_opts, 0, sizeof(struct qm_mcc_initcgr));
+ spin_lock_irqsave(&p->cgr_lock, irqflags);
+ list_del(&cgr->node);
+ /*
+ * If there are no other CGR objects for this CGRID in the list, update
+ * CSCN_TARG accordingly
+ */
+ list_for_each_entry(i, &p->cgr_cbs, node)
+ if ((i->cgrid == cgr->cgrid) && i->cb)
+ goto release_lock;
+ ret = qman_query_cgr(cgr, &cgr_state);
+ if (ret) {
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+ goto release_lock;
+ }
+ /* Overwrite TARG */
+ local_opts.we_mask = QM_CGR_WE_CSCN_TARG;
+ if ((qman_ip_rev & 0xFF00) >= QMAN_REV30)
+ local_opts.cgr.cscn_targ_upd_ctrl = PORTAL_IDX(p);
+ else
+ local_opts.cgr.cscn_targ = cgr_state.cgr.cscn_targ &
+ ~(TARG_MASK(p));
+ ret = qman_modify_cgr(cgr, 0, &local_opts);
+ if (ret)
+ /* add back to the list */
+ list_add(&cgr->node, &p->cgr_cbs);
+release_lock:
+ spin_unlock_irqrestore(&p->cgr_lock, irqflags);
+put_portal:
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_delete_cgr);
+
+struct cgr_comp {
+ struct qman_cgr *cgr;
+ struct completion completion;
+};
+
+static void qman_delete_cgr_smp_call(void *p)
+{
+ qman_delete_cgr((struct qman_cgr *)p);
+}
+
+void qman_delete_cgr_safe(struct qman_cgr *cgr)
+{
+ preempt_disable();
+ if (qman_cgr_cpus[cgr->cgrid] != smp_processor_id()) {
+ smp_call_function_single(qman_cgr_cpus[cgr->cgrid],
+ qman_delete_cgr_smp_call, cgr, true);
+ preempt_enable();
+ return;
+ }
+ qman_delete_cgr(cgr);
+ preempt_enable();
+}
+EXPORT_SYMBOL(qman_delete_cgr_safe);
+
+int qm_get_clock(u64 *clock_hz)
+{
+ if (!qman_clk) {
+ pr_warn("Qman clock speed is unknown\n");
+ return -EINVAL;
+ }
+ *clock_hz = (u64)qman_clk;
+ return 0;
+}
+EXPORT_SYMBOL(qm_get_clock);
+
+int qm_set_clock(u64 clock_hz)
+{
+ if (qman_clk)
+ return -1;
+ qman_clk = (u32)clock_hz;
+ return 0;
+}
+EXPORT_SYMBOL(qm_set_clock);
+
+/* CEETM management command */
+static int qman_ceetm_configure_lfqmt(struct qm_mcc_ceetm_lfqmt_config *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->lfqmt_config = *opts;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_CONFIG);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_CEETM_VERB_LFQMT_CONFIG);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: CONFIGURE LFQMT failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_ceetm_query_lfqmt(int lfqid,
+ struct qm_mcr_ceetm_lfqmt_query *lfqmt_query)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->lfqmt_query.lfqid = lfqid;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_LFQMT_QUERY);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_LFQMT_QUERY);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK)
+ *lfqmt_query = mcr->lfqmt_query;
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: QUERY LFQMT failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_query_lfqmt);
+
+static int qman_ceetm_configure_cq(struct qm_mcc_ceetm_cq_config *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cq_config = *opts;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_CONFIG);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ res = mcr->result;
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_CONFIG);
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: CONFIGURE CQ failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
+ struct qm_mcr_ceetm_cq_query *cq_query)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->cq_query.cqid = cpu_to_be16(cqid);
+ mcc->cq_query.dcpid = dcpid;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_QUERY);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CQ_QUERY);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ *cq_query = mcr->cq_query;
+ hw_cq_query_to_cpu(cq_query);
+ }
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: QUERY CQ failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_query_cq);
+
+static int qman_ceetm_configure_dct(struct qm_mcc_ceetm_dct_config *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->dct_config = *opts;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_CONFIG);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_CONFIG);
+ res = mcr->result;
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: CONFIGURE DCT failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int qman_ceetm_query_dct(struct qm_mcc_ceetm_dct_query *opts,
+ struct qm_mcr_ceetm_dct_query *dct_query)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p = get_affine_portal();
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->dct_query = *opts;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_DCT_QUERY);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_DCT_QUERY);
+ res = mcr->result;
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: QUERY DCT failed\n");
+ return -EIO;
+ }
+
+ *dct_query = mcr->dct_query;
+ return 0;
+}
+
+static int qman_ceetm_configure_class_scheduler(
+ struct qm_mcc_ceetm_class_scheduler_config *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->csch_config = *opts;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_CEETM_VERB_CLASS_SCHEDULER_CONFIG);
+ res = mcr->result;
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: CONFIGURE CLASS SCHEDULER failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int qman_ceetm_query_class_scheduler(struct qm_ceetm_channel *channel,
+ struct qm_mcr_ceetm_class_scheduler_query *query)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->csch_query.cqcid = cpu_to_be16(channel->idx);
+ mcc->csch_query.dcpid = channel->dcp_idx;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_CEETM_VERB_CLASS_SCHEDULER_QUERY);
+ res = mcr->result;
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: QUERY CLASS SCHEDULER failed\n");
+ return -EIO;
+ }
+ *query = mcr->csch_query;
+ return 0;
+}
+
+static int qman_ceetm_configure_mapping_shaper_tcfc(
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->mst_config = *opts;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_CONFIG);
+ res = mcr->result;
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: CONFIGURE CHANNEL MAPPING failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+static int qman_ceetm_query_mapping_shaper_tcfc(
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query *opts,
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query *response)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->mst_query = *opts;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_CEETM_VERB_MAPPING_SHAPER_TCFC_QUERY);
+ res = mcr->result;
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: QUERY CHANNEL MAPPING failed\n");
+ return -EIO;
+ }
+
+ *response = mcr->mst_query;
+ return 0;
+}
+
+static int qman_ceetm_configure_ccgr(struct qm_mcc_ceetm_ccgr_config *opts)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->ccgr_config = *opts;
+
+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_CONFIG);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_CONFIG);
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: CONFIGURE CCGR failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+
+int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
+ struct qm_mcr_ceetm_ccgr_query *response)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->ccgr_query.ccgrid = cpu_to_be16(ccgr_query->ccgrid);
+ mcc->ccgr_query.dcpid = ccgr_query->dcpid;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
+
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_CEETM_VERB_CCGR_QUERY);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ *response = mcr->ccgr_query;
+ hw_ccgr_query_to_cpu(response);
+ }
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: QUERY CCGR failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_query_ccgr);
+
+static int qman_ceetm_cq_peek_pop_xsfdrread(struct qm_ceetm_cq *cq,
+ u8 command_type, u16 xsfdr,
+ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread *cq_ppxr)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ switch (command_type) {
+ case 0:
+ case 1:
+ mcc->cq_ppxr.cqid = (cq->parent->idx << 4) | cq->idx;
+ break;
+ case 2:
+ mcc->cq_ppxr.xsfdr = xsfdr;
+ break;
+ default:
+ break;
+ }
+ mcc->cq_ppxr.ct = command_type;
+ mcc->cq_ppxr.dcpid = cq->parent->dcp_idx;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_CEETM_VERB_CQ_PEEK_POP_XFDRREAD);
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: CQ PEEK/POP/XSFDR READ failed\n");
+ return -EIO;
+ }
+ *cq_ppxr = mcr->cq_ppxr;
+ return 0;
+}
+
+static int qman_ceetm_query_statistics(u16 cid,
+ enum qm_dc_portal dcp_idx,
+ u16 command_type,
+ struct qm_mcr_ceetm_statistics_query *query_result)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->stats_query_write.cid = cid;
+ mcc->stats_query_write.dcpid = dcp_idx;
+ mcc->stats_query_write.ct = command_type;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
+
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: STATISTICS QUERY failed\n");
+ return -EIO;
+ }
+ *query_result = mcr->stats_query;
+ return 0;
+}
+
+int qman_ceetm_query_write_statistics(u16 cid, enum qm_dc_portal dcp_idx,
+ u16 command_type, u64 frame_count,
+ u64 byte_count)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ mcc->stats_query_write.cid = cid;
+ mcc->stats_query_write.dcpid = dcp_idx;
+ mcc->stats_query_write.ct = command_type;
+ mcc->stats_query_write.frm_cnt = frame_count;
+ mcc->stats_query_write.byte_cnt = byte_count;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
+
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_CEETM_VERB_STATISTICS_QUERY_WRITE);
+
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+
+ res = mcr->result;
+ if (res != QM_MCR_RESULT_OK) {
+ pr_err("CEETM: STATISTICS WRITE failed\n");
+ return -EIO;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_query_write_statistics);
+
+int qman_ceetm_bps2tokenrate(u64 bps, struct qm_ceetm_rate *token_rate,
+ int rounding)
+{
+ u16 pres;
+ u64 temp;
+ u64 qman_freq;
+ int ret;
+
+ /* Read PRES from CEET_CFG_PRES register */
+ ret = qman_ceetm_get_prescaler(&pres);
+ if (ret)
+ return -EINVAL;
+
+ ret = qm_get_clock(&qman_freq);
+ if (ret)
+ return -EINVAL;
+
+ /* token-rate = bytes-per-second * update-reference-period
+ *
+ * Where token-rate is N/8192 for a integer N, and
+ * update-reference-period is (2^22)/(PRES*QHz), where PRES
+ * is the prescalar value and QHz is the QMan clock frequency.
+ * So:
+ *
+ * token-rate = (byte-per-second*2^22)/PRES*QHZ)
+ *
+ * Converting to bits-per-second gives;
+ *
+ * token-rate = (bps*2^19) / (PRES*QHZ)
+ * N = (bps*2^32) / (PRES*QHz)
+ *
+ * And to avoid 64-bit overflow if 'bps' is larger than 4Gbps
+ * (yet minimise rounding error if 'bps' is small), we reorganise
+ * the formula to use two 16-bit shifts rather than 1 32-bit shift.
+ * N = (((bps*2^16)/PRES)*2^16)/QHz
+ */
+ temp = ROUNDING((bps << 16), pres, rounding);
+ temp = ROUNDING((temp << 16), qman_freq, rounding);
+ token_rate->whole = temp >> 13;
+ token_rate->fraction = temp & (((u64)1 << 13) - 1);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_bps2tokenrate);
+
+int qman_ceetm_tokenrate2bps(const struct qm_ceetm_rate *token_rate, u64 *bps,
+ int rounding)
+{
+ u16 pres;
+ u64 temp;
+ u64 qman_freq;
+ int ret;
+
+ /* Read PRES from CEET_CFG_PRES register */
+ ret = qman_ceetm_get_prescaler(&pres);
+ if (ret)
+ return -EINVAL;
+
+ ret = qm_get_clock(&qman_freq);
+ if (ret)
+ return -EINVAL;
+
+ /* bytes-per-second = token-rate / update-reference-period
+ *
+ * where "token-rate" is N/8192 for an integer N, and
+ * "update-reference-period" is (2^22)/(PRES*QHz), where PRES is
+ * the prescalar value and QHz is the QMan clock frequency. So;
+ *
+ * bytes-per-second = (N/8192) / (4194304/PRES*QHz)
+ * = N*PRES*QHz / (4194304*8192)
+ * = N*PRES*QHz / (2^35)
+ *
+ * Converting to bits-per-second gives;
+ *
+ * bps = N*PRES*QHZ / (2^32)
+ *
+ * Note, the numerator has a maximum width of 72 bits! So to
+ * avoid 64-bit overflow errors, we calculate PRES*QHZ (maximum
+ * width 48 bits) divided by 2^9 (reducing to maximum 39 bits), before
+ * multiplying by N (goes to maximum of 63 bits).
+ *
+ * temp = PRES*QHZ / (2^16)
+ * kbps = temp*N / (2^16)
+ */
+ temp = ROUNDING(qman_freq * pres, (u64)1 << 16 , rounding);
+ temp *= ((token_rate->whole << 13) + token_rate->fraction);
+ *bps = ROUNDING(temp, (u64)(1) << 16, rounding);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_tokenrate2bps);
+
+int qman_ceetm_sp_claim(struct qm_ceetm_sp **sp, enum qm_dc_portal dcp_idx,
+ unsigned int sp_idx)
+{
+ struct qm_ceetm_sp *p;
+
+ DPA_ASSERT((dcp_idx == qm_dc_portal_fman0) ||
+ (dcp_idx == qm_dc_portal_fman1));
+
+ if ((sp_idx < qman_ceetms[dcp_idx].sp_range[0]) ||
+ (sp_idx >= (qman_ceetms[dcp_idx].sp_range[0] +
+ qman_ceetms[dcp_idx].sp_range[1]))) {
+ pr_err("Sub-portal index doesn't exist\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(p, &qman_ceetms[dcp_idx].sub_portals, node) {
+ if ((p->idx == sp_idx) && (p->is_claimed == 0)) {
+ p->is_claimed = 1;
+ *sp = p;
+ return 0;
+ }
+ }
+ pr_err("The sub-portal#%d is not available!\n", sp_idx);
+ return -ENODEV;
+}
+EXPORT_SYMBOL(qman_ceetm_sp_claim);
+
+int qman_ceetm_sp_release(struct qm_ceetm_sp *sp)
+{
+ struct qm_ceetm_sp *p;
+
+ if (sp->lni && sp->lni->is_claimed == 1) {
+ pr_err("The dependency of sub-portal has not been released!\n");
+ return -EBUSY;
+ }
+
+ list_for_each_entry(p, &qman_ceetms[sp->dcp_idx].sub_portals, node) {
+ if (p->idx == sp->idx) {
+ p->is_claimed = 0;
+ p->lni = NULL;
+ }
+ }
+ /* Disable CEETM mode of this sub-portal */
+ qman_sp_disable_ceetm_mode(sp->dcp_idx, sp->idx);
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_sp_release);
+
+int qman_ceetm_lni_claim(struct qm_ceetm_lni **lni, enum qm_dc_portal dcp_idx,
+ unsigned int lni_idx)
+{
+ struct qm_ceetm_lni *p;
+
+ if ((lni_idx < qman_ceetms[dcp_idx].lni_range[0]) ||
+ (lni_idx >= (qman_ceetms[dcp_idx].lni_range[0] +
+ qman_ceetms[dcp_idx].lni_range[1]))) {
+ pr_err("The lni index is out of range\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(p, &qman_ceetms[dcp_idx].lnis, node) {
+ if ((p->idx == lni_idx) && (p->is_claimed == 0)) {
+ *lni = p;
+ p->is_claimed = 1;
+ return 0;
+ }
+ }
+
+ pr_err("The LNI#%d is not available!\n", lni_idx);
+ return -EINVAL;
+}
+EXPORT_SYMBOL(qman_ceetm_lni_claim);
+
+int qman_ceetm_lni_release(struct qm_ceetm_lni *lni)
+{
+ struct qm_ceetm_lni *p;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+
+ if (!list_empty(&lni->channels)) {
+ pr_err("The LNI dependencies are not released!\n");
+ return -EBUSY;
+ }
+
+ list_for_each_entry(p, &qman_ceetms[lni->dcp_idx].lnis, node) {
+ if (p->idx == lni->idx) {
+ p->shaper_enable = 0;
+ p->shaper_couple = 0;
+ p->cr_token_rate.whole = 0;
+ p->cr_token_rate.fraction = 0;
+ p->er_token_rate.whole = 0;
+ p->er_token_rate.fraction = 0;
+ p->cr_token_bucket_limit = 0;
+ p->er_token_bucket_limit = 0;
+ p->is_claimed = 0;
+ }
+ }
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
+ config_opts.dcpid = lni->dcp_idx;
+ memset(&config_opts.shaper_config, 0,
+ sizeof(config_opts.shaper_config));
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_release);
+
+int qman_ceetm_sp_set_lni(struct qm_ceetm_sp *sp, struct qm_ceetm_lni *lni)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
+ config_opts.dcpid = sp->dcp_idx;
+ config_opts.sp_mapping.map_lni_id = lni->idx;
+ sp->lni = lni;
+
+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts))
+ return -EINVAL;
+
+ /* Enable CEETM mode for this sub-portal */
+ return qman_sp_enable_ceetm_mode(sp->dcp_idx, sp->idx);
+}
+EXPORT_SYMBOL(qman_ceetm_sp_set_lni);
+
+int qman_ceetm_sp_get_lni(struct qm_ceetm_sp *sp, unsigned int *lni_idx)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_SP_MAPPING | sp->idx);
+ query_opts.dcpid = sp->dcp_idx;
+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
+ pr_err("Can't get SP <-> LNI mapping\n");
+ return -EINVAL;
+ }
+ *lni_idx = query_result.sp_mapping_query.map_lni_id;
+ sp->lni->idx = query_result.sp_mapping_query.map_lni_id;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_sp_get_lni);
+
+int qman_ceetm_lni_enable_shaper(struct qm_ceetm_lni *lni, int coupled,
+ int oal)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+
+ if (lni->shaper_enable) {
+ pr_err("The shaper has already been enabled\n");
+ return -EINVAL;
+ }
+ lni->shaper_enable = 1;
+ lni->shaper_couple = coupled;
+ lni->oal = oal;
+
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
+ config_opts.dcpid = lni->dcp_idx;
+ config_opts.shaper_config.cpl = coupled;
+ config_opts.shaper_config.oal = oal;
+ config_opts.shaper_config.crtcr = cpu_to_be24((lni->cr_token_rate.whole
+ << 13) | lni->cr_token_rate.fraction);
+ config_opts.shaper_config.ertcr = cpu_to_be24((lni->er_token_rate.whole
+ << 13) | lni->er_token_rate.fraction);
+ config_opts.shaper_config.crtbl =
+ cpu_to_be16(lni->cr_token_bucket_limit);
+ config_opts.shaper_config.ertbl =
+ cpu_to_be16(lni->er_token_bucket_limit);
+ config_opts.shaper_config.mps = 60;
+
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_enable_shaper);
+
+int qman_ceetm_lni_disable_shaper(struct qm_ceetm_lni *lni)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+
+ if (!lni->shaper_enable) {
+ pr_err("The shaper has been disabled\n");
+ return -EINVAL;
+ }
+
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
+ config_opts.dcpid = lni->dcp_idx;
+ config_opts.shaper_config.cpl = lni->shaper_couple;
+ config_opts.shaper_config.oal = lni->oal;
+ config_opts.shaper_config.crtbl =
+ cpu_to_be16(lni->cr_token_bucket_limit);
+ config_opts.shaper_config.ertbl =
+ cpu_to_be16(lni->er_token_bucket_limit);
+ /* Set CR/ER rate with all 1's to configure an infinite rate, thus
+ * disable the shaping.
+ */
+ config_opts.shaper_config.crtcr = 0xFFFFFF;
+ config_opts.shaper_config.ertcr = 0xFFFFFF;
+ config_opts.shaper_config.mps = 60;
+ lni->shaper_enable = 0;
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_disable_shaper);
+
+int qman_ceetm_lni_is_shaper_enabled(struct qm_ceetm_lni *lni)
+{
+ return lni->shaper_enable;
+}
+EXPORT_SYMBOL(qman_ceetm_lni_is_shaper_enabled);
+
+int qman_ceetm_lni_set_commit_rate(struct qm_ceetm_lni *lni,
+ const struct qm_ceetm_rate *token_rate,
+ u16 token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+
+ lni->cr_token_rate.whole = token_rate->whole;
+ lni->cr_token_rate.fraction = token_rate->fraction;
+ lni->cr_token_bucket_limit = token_limit;
+ if (!lni->shaper_enable)
+ return 0;
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
+ query_opts.dcpid = lni->dcp_idx;
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
+ &query_result);
+ if (ret) {
+ pr_err("Fail to get current LNI shaper setting\n");
+ return -EINVAL;
+ }
+
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
+ config_opts.dcpid = lni->dcp_idx;
+ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole << 13)
+ | (token_rate->fraction));
+ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
+ config_opts.shaper_config.oal = query_result.shaper_query.oal;
+ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
+ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
+ config_opts.shaper_config.mps = query_result.shaper_query.mps;
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate);
+
+int qman_ceetm_lni_set_commit_rate_bps(struct qm_ceetm_lni *lni,
+ u64 bps,
+ u16 token_limit)
+{
+ struct qm_ceetm_rate token_rate;
+ int ret;
+
+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
+ if (ret) {
+ pr_err("Can not convert bps to token rate\n");
+ return -EINVAL;
+ }
+
+ return qman_ceetm_lni_set_commit_rate(lni, &token_rate, token_limit);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_set_commit_rate_bps);
+
+int qman_ceetm_lni_get_commit_rate(struct qm_ceetm_lni *lni,
+ struct qm_ceetm_rate *token_rate,
+ u16 *token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
+ query_opts.dcpid = lni->dcp_idx;
+
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
+ if (ret) {
+ pr_err("The LNI CR rate or limit is not set\n");
+ return -EINVAL;
+ }
+ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
+ 0x1FFF;
+ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate);
+
+int qman_ceetm_lni_get_commit_rate_bps(struct qm_ceetm_lni *lni,
+ u64 *bps, u16 *token_limit)
+{
+ struct qm_ceetm_rate token_rate;
+ int ret;
+
+ ret = qman_ceetm_lni_get_commit_rate(lni, &token_rate, token_limit);
+ if (ret) {
+ pr_err("The LNI CR rate or limit is not available\n");
+ return -EINVAL;
+ }
+
+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_get_commit_rate_bps);
+
+int qman_ceetm_lni_set_excess_rate(struct qm_ceetm_lni *lni,
+ const struct qm_ceetm_rate *token_rate,
+ u16 token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+
+ lni->er_token_rate.whole = token_rate->whole;
+ lni->er_token_rate.fraction = token_rate->fraction;
+ lni->er_token_bucket_limit = token_limit;
+ if (!lni->shaper_enable)
+ return 0;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
+ query_opts.dcpid = lni->dcp_idx;
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts,
+ &query_result);
+ if (ret) {
+ pr_err("Fail to get current LNI shaper setting\n");
+ return -EINVAL;
+ }
+
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
+ config_opts.dcpid = lni->dcp_idx;
+ config_opts.shaper_config.ertcr = cpu_to_be24(
+ (token_rate->whole << 13) | (token_rate->fraction));
+ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
+ config_opts.shaper_config.oal = query_result.shaper_query.oal;
+ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
+ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
+ config_opts.shaper_config.mps = query_result.shaper_query.mps;
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate);
+
+int qman_ceetm_lni_set_excess_rate_bps(struct qm_ceetm_lni *lni,
+ u64 bps,
+ u16 token_limit)
+{
+ struct qm_ceetm_rate token_rate;
+ int ret;
+
+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
+ if (ret) {
+ pr_err("Can not convert bps to token rate\n");
+ return -EINVAL;
+ }
+ return qman_ceetm_lni_set_excess_rate(lni, &token_rate, token_limit);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_set_excess_rate_bps);
+
+int qman_ceetm_lni_get_excess_rate(struct qm_ceetm_lni *lni,
+ struct qm_ceetm_rate *token_rate,
+ u16 *token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_LNI_SHAPER | lni->idx);
+ query_opts.dcpid = lni->dcp_idx;
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
+ if (ret) {
+ pr_err("The LNI ER rate or limit is not set\n");
+ return -EINVAL;
+ }
+ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
+ 0x1FFF;
+ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate);
+
+int qman_ceetm_lni_get_excess_rate_bps(struct qm_ceetm_lni *lni,
+ u64 *bps, u16 *token_limit)
+{
+ struct qm_ceetm_rate token_rate;
+ int ret;
+
+ ret = qman_ceetm_lni_get_excess_rate(lni, &token_rate, token_limit);
+ if (ret) {
+ pr_err("The LNI ER rate or limit is not available\n");
+ return -EINVAL;
+ }
+
+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_get_excess_rate_bps);
+
+#define QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(n) ((15 - n) * 4)
+#define QMAN_CEETM_LNITCFCC_ENABLE 0x8
+int qman_ceetm_lni_set_tcfcc(struct qm_ceetm_lni *lni,
+ unsigned int cq_level,
+ int traffic_class)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ u64 lnitcfcc;
+
+ if ((cq_level > 15) | (traffic_class > 7)) {
+ pr_err("The CQ or traffic class id is out of range\n");
+ return -EINVAL;
+ }
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
+ query_opts.dcpid = lni->dcp_idx;
+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
+ pr_err("Fail to query tcfcc\n");
+ return -EINVAL;
+ }
+
+ lnitcfcc = be64_to_cpu(query_result.tcfc_query.lnitcfcc);
+ if (traffic_class == -1) {
+ /* disable tcfc for this CQ */
+ lnitcfcc &= ~((u64)QMAN_CEETM_LNITCFCC_ENABLE <<
+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
+ } else {
+ lnitcfcc &= ~((u64)0xF <<
+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
+ lnitcfcc |= ((u64)(QMAN_CEETM_LNITCFCC_ENABLE |
+ traffic_class)) <<
+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level);
+ }
+ config_opts.tcfc_config.lnitcfcc = cpu_to_be64(lnitcfcc);
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
+ config_opts.dcpid = lni->dcp_idx;
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_lni_set_tcfcc);
+
+#define QMAN_CEETM_LNITCFCC_TC_MASK 0x7
+int qman_ceetm_lni_get_tcfcc(struct qm_ceetm_lni *lni, unsigned int cq_level,
+ int *traffic_class)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+ u8 lnitcfcc;
+
+ if (cq_level > 15) {
+ pr_err("the CQ level is out of range\n");
+ return -EINVAL;
+ }
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_TCFC | lni->idx);
+ query_opts.dcpid = lni->dcp_idx;
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
+ if (ret)
+ return ret;
+ lnitcfcc = (u8)be64_to_cpu((query_result.tcfc_query.lnitcfcc) >>
+ QMAN_CEETM_LNITCFCC_CQ_LEVEL_SHIFT(cq_level));
+ if (lnitcfcc & QMAN_CEETM_LNITCFCC_ENABLE)
+ *traffic_class = lnitcfcc & QMAN_CEETM_LNITCFCC_TC_MASK;
+ else
+ *traffic_class = -1;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_lni_get_tcfcc);
+
+int qman_ceetm_channel_claim(struct qm_ceetm_channel **channel,
+ struct qm_ceetm_lni *lni)
+{
+ struct qm_ceetm_channel *p;
+ u32 channel_idx;
+ int ret = 0;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+
+ if (lni->dcp_idx == qm_dc_portal_fman0) {
+ ret = qman_alloc_ceetm0_channel(&channel_idx);
+ } else if (lni->dcp_idx == qm_dc_portal_fman1) {
+ ret = qman_alloc_ceetm1_channel(&channel_idx);
+ } else {
+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
+ lni->dcp_idx);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ pr_err("The is no channel available for LNI#%d\n", lni->idx);
+ return -ENODEV;
+ }
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ p->idx = channel_idx;
+ p->dcp_idx = lni->dcp_idx;
+ p->lni_idx = lni->idx;
+ list_add_tail(&p->node, &lni->channels);
+ INIT_LIST_HEAD(&p->class_queues);
+ INIT_LIST_HEAD(&p->ccgs);
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
+ channel_idx);
+ config_opts.dcpid = lni->dcp_idx;
+ config_opts.channel_mapping.map_lni_id = lni->idx;
+ config_opts.channel_mapping.map_shaped = 0;
+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
+ pr_err("Can't map channel#%d for LNI#%d\n",
+ channel_idx, lni->idx);
+ return -EINVAL;
+ }
+ *channel = p;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_claim);
+
+int qman_ceetm_channel_release(struct qm_ceetm_channel *channel)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+ if (!list_empty(&channel->class_queues)) {
+ pr_err("CEETM channel#%d has class queue unreleased!\n",
+ channel->idx);
+ return -EBUSY;
+ }
+ if (!list_empty(&channel->ccgs)) {
+ pr_err("CEETM channel#%d has ccg unreleased!\n",
+ channel->idx);
+ return -EBUSY;
+ }
+
+ /* channel->dcp_idx corresponds to known fman validation */
+ if ((channel->dcp_idx != qm_dc_portal_fman0) &&
+ (channel->dcp_idx != qm_dc_portal_fman1)) {
+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
+ channel->dcp_idx);
+ return -EINVAL;
+ }
+
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ config_opts.dcpid = channel->dcp_idx;
+ memset(&config_opts.shaper_config, 0,
+ sizeof(config_opts.shaper_config));
+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
+ pr_err("Can't reset channel shapping parameters\n");
+ return -EINVAL;
+ }
+
+ if (channel->dcp_idx == qm_dc_portal_fman0) {
+ qman_release_ceetm0_channelid(channel->idx);
+ } else if (channel->dcp_idx == qm_dc_portal_fman1) {
+ qman_release_ceetm1_channelid(channel->idx);
+ } else {
+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
+ channel->dcp_idx);
+ return -EINVAL;
+ }
+ list_del(&channel->node);
+ kfree(channel);
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_release);
+
+int qman_ceetm_channel_enable_shaper(struct qm_ceetm_channel *channel,
+ int coupled)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+
+ if (channel->shaper_enable == 1) {
+ pr_err("This channel shaper has been enabled!\n");
+ return -EINVAL;
+ }
+
+ channel->shaper_enable = 1;
+ channel->shaper_couple = coupled;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
+ channel->idx);
+ query_opts.dcpid = channel->dcp_idx;
+
+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
+ pr_err("Can't query channel mapping\n");
+ return -EINVAL;
+ }
+
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
+ channel->idx);
+ config_opts.dcpid = channel->dcp_idx;
+ config_opts.channel_mapping.map_lni_id =
+ query_result.channel_mapping_query.map_lni_id;
+ config_opts.channel_mapping.map_shaped = 1;
+ if (qman_ceetm_configure_mapping_shaper_tcfc(&config_opts)) {
+ pr_err("Can't enable shaper for channel #%d\n", channel->idx);
+ return -EINVAL;
+ }
+
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ config_opts.shaper_config.cpl = coupled;
+ config_opts.shaper_config.crtcr =
+ cpu_to_be24((channel->cr_token_rate.whole
+ << 13) |
+ channel->cr_token_rate.fraction);
+ config_opts.shaper_config.ertcr =
+ cpu_to_be24(channel->er_token_rate.whole
+ << 13 |
+ channel->er_token_rate.fraction);
+ config_opts.shaper_config.crtbl =
+ cpu_to_be16(channel->cr_token_bucket_limit);
+ config_opts.shaper_config.ertbl =
+ cpu_to_be16(channel->er_token_bucket_limit);
+
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_enable_shaper);
+
+int qman_ceetm_channel_disable_shaper(struct qm_ceetm_channel *channel)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
+ channel->idx);
+ query_opts.dcpid = channel->dcp_idx;
+
+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
+ pr_err("Can't query channel mapping\n");
+ return -EINVAL;
+ }
+
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
+ channel->idx);
+ config_opts.dcpid = channel->dcp_idx;
+ config_opts.channel_mapping.map_shaped = 0;
+ config_opts.channel_mapping.map_lni_id =
+ query_result.channel_mapping_query.map_lni_id;
+
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_disable_shaper);
+
+int qman_ceetm_channel_is_shaper_enabled(struct qm_ceetm_channel *channel)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_MAPPING |
+ channel->idx);
+ query_opts.dcpid = channel->dcp_idx;
+
+ if (qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result)) {
+ pr_err("Can't query channel mapping\n");
+ return -EINVAL;
+ }
+
+ return query_result.channel_mapping_query.map_shaped;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_is_shaper_enabled);
+
+int qman_ceetm_channel_set_commit_rate(struct qm_ceetm_channel *channel,
+ const struct qm_ceetm_rate *token_rate,
+ u16 token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ query_opts.dcpid = channel->dcp_idx;
+
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
+ if (ret) {
+ pr_err("Fail to get the current channel shaper setting\n");
+ return -EINVAL;
+ }
+
+ channel->cr_token_rate.whole = token_rate->whole;
+ channel->cr_token_rate.fraction = token_rate->fraction;
+ channel->cr_token_bucket_limit = token_limit;
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ config_opts.dcpid = channel->dcp_idx;
+ config_opts.shaper_config.crtcr = cpu_to_be24((token_rate->whole
+ << 13) | (token_rate->fraction));
+ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
+ config_opts.shaper_config.ertcr = query_result.shaper_query.ertcr;
+ config_opts.shaper_config.ertbl = query_result.shaper_query.ertbl;
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate);
+
+int qman_ceetm_channel_set_commit_rate_bps(struct qm_ceetm_channel *channel,
+ u64 bps, u16 token_limit)
+{
+ struct qm_ceetm_rate token_rate;
+ int ret;
+
+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
+ if (ret) {
+ pr_err("Can not convert bps to token rate\n");
+ return -EINVAL;
+ }
+ return qman_ceetm_channel_set_commit_rate(channel, &token_rate,
+ token_limit);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_commit_rate_bps);
+
+int qman_ceetm_channel_get_commit_rate(struct qm_ceetm_channel *channel,
+ struct qm_ceetm_rate *token_rate,
+ u16 *token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ query_opts.dcpid = channel->dcp_idx;
+
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
+ if (ret | !query_result.shaper_query.crtcr |
+ !query_result.shaper_query.crtbl) {
+ pr_err("The channel commit rate or limit is not set\n");
+ return -EINVAL;
+ }
+ token_rate->whole = be24_to_cpu(query_result.shaper_query.crtcr) >> 13;
+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.crtcr) &
+ 0x1FFF;
+ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate);
+
+int qman_ceetm_channel_get_commit_rate_bps(struct qm_ceetm_channel *channel,
+ u64 *bps, u16 *token_limit)
+{
+ struct qm_ceetm_rate token_rate;
+ int ret;
+
+ ret = qman_ceetm_channel_get_commit_rate(channel, &token_rate,
+ token_limit);
+ if (ret) {
+ pr_err("The channel CR rate or limit is not available\n");
+ return -EINVAL;
+ }
+
+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_get_commit_rate_bps);
+
+int qman_ceetm_channel_set_excess_rate(struct qm_ceetm_channel *channel,
+ const struct qm_ceetm_rate *token_rate,
+ u16 token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ query_opts.dcpid = channel->dcp_idx;
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
+ if (ret) {
+ pr_err("Fail to get the current channel shaper setting\n");
+ return -EINVAL;
+ }
+
+ channel->er_token_rate.whole = token_rate->whole;
+ channel->er_token_rate.fraction = token_rate->fraction;
+ channel->er_token_bucket_limit = token_limit;
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ config_opts.dcpid = channel->dcp_idx;
+ config_opts.shaper_config.ertcr = cpu_to_be24(
+ (token_rate->whole << 13) | (token_rate->fraction));
+ config_opts.shaper_config.ertbl = cpu_to_be16(token_limit);
+ config_opts.shaper_config.cpl = query_result.shaper_query.cpl;
+ config_opts.shaper_config.crtcr = query_result.shaper_query.crtcr;
+ config_opts.shaper_config.crtbl = query_result.shaper_query.crtbl;
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate);
+
+int qman_ceetm_channel_set_excess_rate_bps(struct qm_ceetm_channel *channel,
+ u64 bps, u16 token_limit)
+{
+ struct qm_ceetm_rate token_rate;
+ int ret;
+
+ ret = qman_ceetm_bps2tokenrate(bps, &token_rate, 0);
+ if (ret) {
+ pr_err("Can not convert bps to token rate\n");
+ return -EINVAL;
+ }
+ return qman_ceetm_channel_set_excess_rate(channel, &token_rate,
+ token_limit);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_excess_rate_bps);
+
+int qman_ceetm_channel_get_excess_rate(struct qm_ceetm_channel *channel,
+ struct qm_ceetm_rate *token_rate,
+ u16 *token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ query_opts.dcpid = channel->dcp_idx;
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
+ if (ret | !query_result.shaper_query.ertcr |
+ !query_result.shaper_query.ertbl) {
+ pr_err("The channel excess rate or limit is not set\n");
+ return -EINVAL;
+ }
+ token_rate->whole = be24_to_cpu(query_result.shaper_query.ertcr) >> 13;
+ token_rate->fraction = be24_to_cpu(query_result.shaper_query.ertcr) &
+ 0x1FFF;
+ *token_limit = be16_to_cpu(query_result.shaper_query.ertbl);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate);
+
+int qman_ceetm_channel_get_excess_rate_bps(struct qm_ceetm_channel *channel,
+ u64 *bps, u16 *token_limit)
+{
+ struct qm_ceetm_rate token_rate;
+ int ret;
+
+ ret = qman_ceetm_channel_get_excess_rate(channel, &token_rate,
+ token_limit);
+ if (ret) {
+ pr_err("The channel ER rate or limit is not available\n");
+ return -EINVAL;
+ }
+
+ return qman_ceetm_tokenrate2bps(&token_rate, bps, 0);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_get_excess_rate_bps);
+
+int qman_ceetm_channel_set_weight(struct qm_ceetm_channel *channel,
+ u16 token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_config config_opts;
+
+ if (channel->shaper_enable) {
+ pr_err("This channel is a shaped one\n");
+ return -EINVAL;
+ }
+
+ channel->cr_token_bucket_limit = token_limit;
+ config_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ config_opts.dcpid = channel->dcp_idx;
+ config_opts.shaper_config.crtbl = cpu_to_be16(token_limit);
+ return qman_ceetm_configure_mapping_shaper_tcfc(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_weight);
+
+int qman_ceetm_channel_get_weight(struct qm_ceetm_channel *channel,
+ u16 *token_limit)
+{
+ struct qm_mcc_ceetm_mapping_shaper_tcfc_query query_opts;
+ struct qm_mcr_ceetm_mapping_shaper_tcfc_query query_result;
+ int ret;
+
+ query_opts.cid = cpu_to_be16(CEETM_COMMAND_CHANNEL_SHAPER |
+ channel->idx);
+ query_opts.dcpid = channel->dcp_idx;
+ ret = qman_ceetm_query_mapping_shaper_tcfc(&query_opts, &query_result);
+ if (ret | !query_result.shaper_query.crtbl) {
+ pr_err("This unshaped channel's uFQ wight is unavailable\n");
+ return -EINVAL;
+ }
+ *token_limit = be16_to_cpu(query_result.shaper_query.crtbl);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_get_weight);
+
+int qman_ceetm_channel_set_group(struct qm_ceetm_channel *channel, int group_b,
+ unsigned int prio_a, unsigned int prio_b)
+{
+ struct qm_mcc_ceetm_class_scheduler_config config_opts;
+ struct qm_mcr_ceetm_class_scheduler_query query_result;
+ int i;
+
+ if (prio_a > 7) {
+ pr_err("The priority of group A is out of range\n");
+ return -EINVAL;
+ }
+ if (group_b && (prio_b > 7)) {
+ pr_err("The priority of group B is out of range\n");
+ return -EINVAL;
+ }
+
+ if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
+ pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
+ return -EINVAL;
+ }
+
+ config_opts.cqcid = cpu_to_be16(channel->idx);
+ config_opts.dcpid = channel->dcp_idx;
+ config_opts.gpc_combine_flag = !group_b;
+ config_opts.gpc_prio_a = prio_a;
+ config_opts.gpc_prio_b = prio_b;
+
+ for (i = 0; i < 8; i++)
+ config_opts.w[i] = query_result.w[i];
+ config_opts.crem = query_result.crem;
+ config_opts.erem = query_result.erem;
+
+ return qman_ceetm_configure_class_scheduler(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_group);
+
+int qman_ceetm_channel_get_group(struct qm_ceetm_channel *channel, int *group_b,
+ unsigned int *prio_a, unsigned int *prio_b)
+{
+ struct qm_mcr_ceetm_class_scheduler_query query_result;
+
+ if (qman_ceetm_query_class_scheduler(channel, &query_result)) {
+ pr_err("Can't query channel#%d's scheduler!\n", channel->idx);
+ return -EINVAL;
+ }
+ *group_b = !query_result.gpc_combine_flag;
+ *prio_a = query_result.gpc_prio_a;
+ *prio_b = query_result.gpc_prio_b;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_get_group);
+
+#define GROUP_A_ELIGIBILITY_SET (1 << 8)
+#define GROUP_B_ELIGIBILITY_SET (1 << 9)
+#define CQ_ELIGIBILITY_SET(n) (1 << (7 - n))
+int qman_ceetm_channel_set_group_cr_eligibility(struct qm_ceetm_channel
+ *channel, int group_b, int cre)
+{
+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
+ int i;
+
+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
+ pr_err("Cannot get the channel %d scheduler setting.\n",
+ channel->idx);
+ return -EINVAL;
+ }
+ csch_config.cqcid = cpu_to_be16(channel->idx);
+ csch_config.dcpid = channel->dcp_idx;
+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
+ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
+ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
+
+ for (i = 0; i < 8; i++)
+ csch_config.w[i] = csch_query.w[i];
+ csch_config.erem = csch_query.erem;
+ if (group_b)
+ csch_config.crem = (be16_to_cpu(csch_query.crem)
+ & ~GROUP_B_ELIGIBILITY_SET)
+ | (cre ? GROUP_B_ELIGIBILITY_SET : 0);
+ else
+ csch_config.crem = (be16_to_cpu(csch_query.crem)
+ & ~GROUP_A_ELIGIBILITY_SET)
+ | (cre ? GROUP_A_ELIGIBILITY_SET : 0);
+
+ csch_config.crem = cpu_to_be16(csch_config.crem);
+
+ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
+ pr_err("Cannot config channel %d's scheduler with "
+ "group_%c's cr eligibility\n", channel->idx,
+ group_b ? 'b' : 'a');
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_group_cr_eligibility);
+
+int qman_ceetm_channel_set_group_er_eligibility(struct qm_ceetm_channel
+ *channel, int group_b, int ere)
+{
+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
+ int i;
+
+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
+ pr_err("Cannot get the channel %d scheduler setting.\n",
+ channel->idx);
+ return -EINVAL;
+ }
+ csch_config.cqcid = cpu_to_be16(channel->idx);
+ csch_config.dcpid = channel->dcp_idx;
+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
+ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
+ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
+
+ for (i = 0; i < 8; i++)
+ csch_config.w[i] = csch_query.w[i];
+ csch_config.crem = csch_query.crem;
+ if (group_b)
+ csch_config.erem = (be16_to_cpu(csch_query.erem)
+ & ~GROUP_B_ELIGIBILITY_SET)
+ | (ere ? GROUP_B_ELIGIBILITY_SET : 0);
+ else
+ csch_config.erem = (be16_to_cpu(csch_query.erem)
+ & ~GROUP_A_ELIGIBILITY_SET)
+ | (ere ? GROUP_A_ELIGIBILITY_SET : 0);
+
+ csch_config.erem = cpu_to_be16(csch_config.erem);
+
+ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
+ pr_err("Cannot config channel %d's scheduler with "
+ "group_%c's er eligibility\n", channel->idx,
+ group_b ? 'b' : 'a');
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_group_er_eligibility);
+
+int qman_ceetm_channel_set_cq_cr_eligibility(struct qm_ceetm_channel *channel,
+ unsigned int idx, int cre)
+{
+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
+ int i;
+
+ if (idx > 7) {
+ pr_err("CQ index is out of range\n");
+ return -EINVAL;
+ }
+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
+ pr_err("Cannot get the channel %d scheduler setting.\n",
+ channel->idx);
+ return -EINVAL;
+ }
+ csch_config.cqcid = cpu_to_be16(channel->idx);
+ csch_config.dcpid = channel->dcp_idx;
+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
+ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
+ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
+ for (i = 0; i < 8; i++)
+ csch_config.w[i] = csch_query.w[i];
+ csch_config.erem = csch_query.erem;
+ csch_config.crem = (be16_to_cpu(csch_query.crem)
+ & ~CQ_ELIGIBILITY_SET(idx)) |
+ (cre ? CQ_ELIGIBILITY_SET(idx) : 0);
+ csch_config.crem = cpu_to_be16(csch_config.crem);
+ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
+ pr_err("Cannot config channel scheduler to set "
+ "cr eligibility mask for CQ#%d\n", idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_cq_cr_eligibility);
+
+int qman_ceetm_channel_set_cq_er_eligibility(struct qm_ceetm_channel *channel,
+ unsigned int idx, int ere)
+{
+ struct qm_mcc_ceetm_class_scheduler_config csch_config;
+ struct qm_mcr_ceetm_class_scheduler_query csch_query;
+ int i;
+
+ if (idx > 7) {
+ pr_err("CQ index is out of range\n");
+ return -EINVAL;
+ }
+ if (qman_ceetm_query_class_scheduler(channel, &csch_query)) {
+ pr_err("Cannot get the channel %d scheduler setting.\n",
+ channel->idx);
+ return -EINVAL;
+ }
+ csch_config.cqcid = cpu_to_be16(channel->idx);
+ csch_config.dcpid = channel->dcp_idx;
+ csch_config.gpc_combine_flag = csch_query.gpc_combine_flag;
+ csch_config.gpc_prio_a = csch_query.gpc_prio_a;
+ csch_config.gpc_prio_b = csch_query.gpc_prio_b;
+ for (i = 0; i < 8; i++)
+ csch_config.w[i] = csch_query.w[i];
+ csch_config.crem = csch_query.crem;
+ csch_config.erem = (be16_to_cpu(csch_query.erem)
+ & ~CQ_ELIGIBILITY_SET(idx)) |
+ (ere ? CQ_ELIGIBILITY_SET(idx) : 0);
+ csch_config.erem = cpu_to_be16(csch_config.erem);
+ if (qman_ceetm_configure_class_scheduler(&csch_config)) {
+ pr_err("Cannot config channel scheduler to set "
+ "er eligibility mask for CQ#%d\n", idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_channel_set_cq_er_eligibility);
+
+int qman_ceetm_cq_claim(struct qm_ceetm_cq **cq,
+ struct qm_ceetm_channel *channel, unsigned int idx,
+ struct qm_ceetm_ccg *ccg)
+{
+ struct qm_ceetm_cq *p;
+ struct qm_mcc_ceetm_cq_config cq_config;
+
+ if (idx > 7) {
+ pr_err("The independent class queue id is out of range\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(p, &channel->class_queues, node) {
+ if (p->idx == idx) {
+ pr_err("The CQ#%d has been claimed!\n", idx);
+ return -EINVAL;
+ }
+ }
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ pr_err("Can't allocate memory for CQ#%d!\n", idx);
+ return -ENOMEM;
+ }
+
+ list_add_tail(&p->node, &channel->class_queues);
+ p->idx = idx;
+ p->is_claimed = 1;
+ p->parent = channel;
+ INIT_LIST_HEAD(&p->bound_lfqids);
+
+ if (ccg) {
+ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
+ cq_config.dcpid = channel->dcp_idx;
+ cq_config.ccgid = cpu_to_be16(ccg->idx);
+ if (qman_ceetm_configure_cq(&cq_config)) {
+ pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
+ idx, ccg->idx);
+ list_del(&p->node);
+ kfree(p);
+ return -EINVAL;
+ }
+ }
+
+ *cq = p;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_cq_claim);
+
+int qman_ceetm_cq_claim_A(struct qm_ceetm_cq **cq,
+ struct qm_ceetm_channel *channel, unsigned int idx,
+ struct qm_ceetm_ccg *ccg)
+{
+ struct qm_ceetm_cq *p;
+ struct qm_mcc_ceetm_cq_config cq_config;
+
+ if ((idx < 8) || (idx > 15)) {
+ pr_err("This grouped class queue id is out of range\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(p, &channel->class_queues, node) {
+ if (p->idx == idx) {
+ pr_err("The CQ#%d has been claimed!\n", idx);
+ return -EINVAL;
+ }
+ }
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ pr_err("Can't allocate memory for CQ#%d!\n", idx);
+ return -ENOMEM;
+ }
+
+ list_add_tail(&p->node, &channel->class_queues);
+ p->idx = idx;
+ p->is_claimed = 1;
+ p->parent = channel;
+ INIT_LIST_HEAD(&p->bound_lfqids);
+
+ if (ccg) {
+ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
+ cq_config.dcpid = channel->dcp_idx;
+ cq_config.ccgid = cpu_to_be16(ccg->idx);
+ if (qman_ceetm_configure_cq(&cq_config)) {
+ pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
+ idx, ccg->idx);
+ list_del(&p->node);
+ kfree(p);
+ return -EINVAL;
+ }
+ }
+ *cq = p;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_cq_claim_A);
+
+int qman_ceetm_cq_claim_B(struct qm_ceetm_cq **cq,
+ struct qm_ceetm_channel *channel, unsigned int idx,
+ struct qm_ceetm_ccg *ccg)
+{
+ struct qm_ceetm_cq *p;
+ struct qm_mcc_ceetm_cq_config cq_config;
+
+ if ((idx < 12) || (idx > 15)) {
+ pr_err("This grouped class queue id is out of range\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(p, &channel->class_queues, node) {
+ if (p->idx == idx) {
+ pr_err("The CQ#%d has been claimed!\n", idx);
+ return -EINVAL;
+ }
+ }
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ pr_err("Can't allocate memory for CQ#%d!\n", idx);
+ return -ENOMEM;
+ }
+
+ list_add_tail(&p->node, &channel->class_queues);
+ p->idx = idx;
+ p->is_claimed = 1;
+ p->parent = channel;
+ INIT_LIST_HEAD(&p->bound_lfqids);
+
+ if (ccg) {
+ cq_config.cqid = cpu_to_be16((channel->idx << 4) | idx);
+ cq_config.dcpid = channel->dcp_idx;
+ cq_config.ccgid = cpu_to_be16(ccg->idx);
+ if (qman_ceetm_configure_cq(&cq_config)) {
+ pr_err("Can't configure the CQ#%d with CCGRID#%d\n",
+ idx, ccg->idx);
+ list_del(&p->node);
+ kfree(p);
+ return -EINVAL;
+ }
+ }
+ *cq = p;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_cq_claim_B);
+
+int qman_ceetm_cq_release(struct qm_ceetm_cq *cq)
+{
+ if (!list_empty(&cq->bound_lfqids)) {
+ pr_err("The CQ#%d has unreleased LFQID\n", cq->idx);
+ return -EBUSY;
+ }
+ list_del(&cq->node);
+ qman_ceetm_drain_cq(cq);
+ kfree(cq);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_cq_release);
+
+int qman_ceetm_set_queue_weight(struct qm_ceetm_cq *cq,
+ struct qm_ceetm_weight_code *weight_code)
+{
+ struct qm_mcc_ceetm_class_scheduler_config config_opts;
+ struct qm_mcr_ceetm_class_scheduler_query query_result;
+ int i;
+
+ if (cq->idx < 8) {
+ pr_err("Can not set weight for ungrouped class queue\n");
+ return -EINVAL;
+ }
+
+ if (qman_ceetm_query_class_scheduler(cq->parent, &query_result)) {
+ pr_err("Can't query channel#%d's scheduler!\n",
+ cq->parent->idx);
+ return -EINVAL;
+ }
+
+ config_opts.cqcid = cpu_to_be16(cq->parent->idx);
+ config_opts.dcpid = cq->parent->dcp_idx;
+ config_opts.crem = query_result.crem;
+ config_opts.erem = query_result.erem;
+ config_opts.gpc_combine_flag = query_result.gpc_combine_flag;
+ config_opts.gpc_prio_a = query_result.gpc_prio_a;
+ config_opts.gpc_prio_b = query_result.gpc_prio_b;
+
+ for (i = 0; i < 8; i++)
+ config_opts.w[i] = query_result.w[i];
+ config_opts.w[cq->idx - 8] = ((weight_code->y << 3) |
+ (weight_code->x & 0x7));
+ return qman_ceetm_configure_class_scheduler(&config_opts);
+}
+EXPORT_SYMBOL(qman_ceetm_set_queue_weight);
+
+int qman_ceetm_get_queue_weight(struct qm_ceetm_cq *cq,
+ struct qm_ceetm_weight_code *weight_code)
+{
+ struct qm_mcr_ceetm_class_scheduler_query query_result;
+
+ if (cq->idx < 8) {
+ pr_err("Can not get weight for ungrouped class queue\n");
+ return -EINVAL;
+ }
+
+ if (qman_ceetm_query_class_scheduler(cq->parent,
+ &query_result)) {
+ pr_err("Can't get the weight code for CQ#%d!\n", cq->idx);
+ return -EINVAL;
+ }
+ weight_code->y = query_result.w[cq->idx - 8] >> 3;
+ weight_code->x = query_result.w[cq->idx - 8] & 0x7;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_get_queue_weight);
+
+/* The WBFS code is represent as {x,y}, the effect wieght can be calculated as:
+ * effective weight = 2^x / (1 - (y/64))
+ * = 2^(x+6) / (64 - y)
+ */
+static void reduce_fraction(u32 *n, u32 *d)
+{
+ u32 factor = 2;
+ u32 lesser = (*n < *d) ? *n : *d;
+ /* If factor exceeds the square-root of the lesser of *n and *d,
+ * then there's no point continuing. Proof: if there was a factor
+ * bigger than the square root, that would imply there exists
+ * another factor smaller than the square-root with which it
+ * multiplies to give 'lesser' - but that's a contradiction
+ * because the other factor would have already been found and
+ * divided out.
+ */
+ while ((factor * factor) <= lesser) {
+ /* If 'factor' is a factor of *n and *d, divide them both
+ * by 'factor' as many times as possible.
+ */
+ while (!(*n % factor) && !(*d % factor)) {
+ *n /= factor;
+ *d /= factor;
+ lesser /= factor;
+ }
+ if (factor == 2)
+ factor = 3;
+ else
+ factor += 2;
+ }
+}
+
+int qman_ceetm_wbfs2ratio(struct qm_ceetm_weight_code *weight_code,
+ u32 *numerator,
+ u32 *denominator)
+{
+ *numerator = (u32) 1 << (weight_code->x + 6);
+ *denominator = 64 - weight_code->y;
+ reduce_fraction(numerator, denominator);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_wbfs2ratio);
+
+/* For a given x, the weight is between 2^x (inclusive) and 2^(x+1) (exclusive).
+ * So find 'x' by range, and then estimate 'y' using:
+ * 64 - y = 2^(x + 6) / weight
+ * = 2^(x + 6) / (n/d)
+ * = d * 2^(x+6) / n
+ * y = 64 - (d * 2^(x+6) / n)
+ */
+int qman_ceetm_ratio2wbfs(u32 numerator,
+ u32 denominator,
+ struct qm_ceetm_weight_code *weight_code,
+ int rounding)
+{
+ unsigned int y, x = 0;
+ /* search incrementing 'x' until:
+ * weight < 2^(x+1)
+ * n/d < 2^(x+1)
+ * n < d * 2^(x+1)
+ */
+ while ((x < 8) && (numerator >= (denominator << (x + 1))))
+ x++;
+ if (x >= 8)
+ return -ERANGE;
+ /* because of the subtraction, use '-rounding' */
+ y = 64 - ROUNDING(denominator << (x + 6), numerator, -rounding);
+ if (y >= 32)
+ return -ERANGE;
+ weight_code->x = x;
+ weight_code->y = y;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_ratio2wbfs);
+
+int qman_ceetm_set_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 ratio)
+{
+ struct qm_ceetm_weight_code weight_code;
+
+ if (qman_ceetm_ratio2wbfs(ratio, 100, &weight_code, 0)) {
+ pr_err("Cannot get wbfs code for cq %x\n", cq->idx);
+ return -EINVAL;
+ }
+ return qman_ceetm_set_queue_weight(cq, &weight_code);
+}
+EXPORT_SYMBOL(qman_ceetm_set_queue_weight_in_ratio);
+
+int qman_ceetm_get_queue_weight_in_ratio(struct qm_ceetm_cq *cq, u32 *ratio)
+{
+ struct qm_ceetm_weight_code weight_code;
+ u32 n, d;
+
+ if (qman_ceetm_get_queue_weight(cq, &weight_code)) {
+ pr_err("Cannot query the weight code for cq%x\n", cq->idx);
+ return -EINVAL;
+ }
+
+ if (qman_ceetm_wbfs2ratio(&weight_code, &n, &d)) {
+ pr_err("Cannot get the ratio with wbfs code\n");
+ return -EINVAL;
+ }
+
+ *ratio = (n * 100) / d;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_get_queue_weight_in_ratio);
+
+int qman_ceetm_cq_get_dequeue_statistics(struct qm_ceetm_cq *cq, u32 flags,
+ u64 *frame_count, u64 *byte_count)
+{
+ struct qm_mcr_ceetm_statistics_query result;
+ u16 cid, command_type;
+ enum qm_dc_portal dcp_idx;
+ int ret;
+
+ cid = cpu_to_be16((cq->parent->idx << 4) | cq->idx);
+ dcp_idx = cq->parent->dcp_idx;
+ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
+ command_type = CEETM_QUERY_DEQUEUE_CLEAR_STATISTICS;
+ else
+ command_type = CEETM_QUERY_DEQUEUE_STATISTICS;
+
+ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
+ if (ret) {
+ pr_err("Can't query the statistics of CQ#%d!\n", cq->idx);
+ return -EINVAL;
+ }
+
+ *frame_count = be40_to_cpu(result.frm_cnt);
+ *byte_count = be48_to_cpu(result.byte_cnt);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_cq_get_dequeue_statistics);
+
+int qman_ceetm_drain_cq(struct qm_ceetm_cq *cq)
+{
+ struct qm_mcr_ceetm_cq_peek_pop_xsfdrread ppxr;
+ int ret;
+
+ do {
+ ret = qman_ceetm_cq_peek_pop_xsfdrread(cq, 1, 0, &ppxr);
+ if (ret) {
+ pr_err("Failed to pop frame from CQ\n");
+ return -EINVAL;
+ }
+ } while (!(ppxr.stat & 0x2));
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_drain_cq);
+
+#define CEETM_LFQMT_LFQID_MSB 0xF00000
+#define CEETM_LFQMT_LFQID_LSB 0x000FFF
+int qman_ceetm_lfq_claim(struct qm_ceetm_lfq **lfq,
+ struct qm_ceetm_cq *cq)
+{
+ struct qm_ceetm_lfq *p;
+ u32 lfqid;
+ int ret = 0;
+ struct qm_mcc_ceetm_lfqmt_config lfqmt_config;
+
+ if (cq->parent->dcp_idx == qm_dc_portal_fman0) {
+ ret = qman_alloc_ceetm0_lfqid(&lfqid);
+ } else if (cq->parent->dcp_idx == qm_dc_portal_fman1) {
+ ret = qman_alloc_ceetm1_lfqid(&lfqid);
+ } else {
+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
+ cq->parent->dcp_idx);
+ return -EINVAL;
+ }
+
+ if (ret) {
+ pr_err("There is no lfqid avalaible for CQ#%d!\n", cq->idx);
+ return -ENODEV;
+ }
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+ p->idx = lfqid;
+ p->dctidx = (u16)(lfqid & CEETM_LFQMT_LFQID_LSB);
+ p->parent = cq->parent;
+ list_add_tail(&p->node, &cq->bound_lfqids);
+
+ lfqmt_config.lfqid = cpu_to_be24(CEETM_LFQMT_LFQID_MSB |
+ (cq->parent->dcp_idx << 16) |
+ (lfqid & CEETM_LFQMT_LFQID_LSB));
+ lfqmt_config.cqid = cpu_to_be16((cq->parent->idx << 4) | (cq->idx));
+ lfqmt_config.dctidx = cpu_to_be16(p->dctidx);
+ if (qman_ceetm_configure_lfqmt(&lfqmt_config)) {
+ pr_err("Can't configure LFQMT for LFQID#%d @ CQ#%d\n",
+ lfqid, cq->idx);
+ list_del(&p->node);
+ kfree(p);
+ return -EINVAL;
+ }
+ *lfq = p;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_lfq_claim);
+
+int qman_ceetm_lfq_release(struct qm_ceetm_lfq *lfq)
+{
+ if (lfq->parent->dcp_idx == qm_dc_portal_fman0) {
+ qman_release_ceetm0_lfqid(lfq->idx);
+ } else if (lfq->parent->dcp_idx == qm_dc_portal_fman1) {
+ qman_release_ceetm1_lfqid(lfq->idx);
+ } else {
+ pr_err("dcp_idx %u does not correspond to a known fman in this driver\n",
+ lfq->parent->dcp_idx);
+ return -EINVAL;
+ }
+ list_del(&lfq->node);
+ kfree(lfq);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_lfq_release);
+
+int qman_ceetm_lfq_set_context(struct qm_ceetm_lfq *lfq, u64 context_a,
+ u32 context_b)
+{
+ struct qm_mcc_ceetm_dct_config dct_config;
+ lfq->context_a = context_a;
+ lfq->context_b = context_b;
+ dct_config.dctidx = cpu_to_be16((u16)lfq->dctidx);
+ dct_config.dcpid = lfq->parent->dcp_idx;
+ dct_config.context_b = cpu_to_be32(context_b);
+ dct_config.context_a = cpu_to_be64(context_a);
+
+ return qman_ceetm_configure_dct(&dct_config);
+}
+EXPORT_SYMBOL(qman_ceetm_lfq_set_context);
+
+int qman_ceetm_lfq_get_context(struct qm_ceetm_lfq *lfq, u64 *context_a,
+ u32 *context_b)
+{
+ struct qm_mcc_ceetm_dct_query dct_query;
+ struct qm_mcr_ceetm_dct_query query_result;
+
+ dct_query.dctidx = cpu_to_be16(lfq->dctidx);
+ dct_query.dcpid = lfq->parent->dcp_idx;
+ if (qman_ceetm_query_dct(&dct_query, &query_result)) {
+ pr_err("Can't query LFQID#%d's context!\n", lfq->idx);
+ return -EINVAL;
+ }
+ *context_a = be64_to_cpu(query_result.context_a);
+ *context_b = be32_to_cpu(query_result.context_b);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_lfq_get_context);
+
+int qman_ceetm_create_fq(struct qm_ceetm_lfq *lfq, struct qman_fq *fq)
+{
+ spin_lock_init(&fq->fqlock);
+ fq->fqid = lfq->idx;
+ fq->flags = QMAN_FQ_FLAG_NO_MODIFY;
+ if (lfq->ern)
+ fq->cb.ern = lfq->ern;
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+ if (unlikely(find_empty_fq_table_entry(&fq->key, fq)))
+ return -ENOMEM;
+#endif
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_create_fq);
+
+#define MAX_CCG_IDX 0x000F
+int qman_ceetm_ccg_claim(struct qm_ceetm_ccg **ccg,
+ struct qm_ceetm_channel *channel,
+ unsigned int idx,
+ void (*cscn)(struct qm_ceetm_ccg *,
+ void *cb_ctx,
+ int congested),
+ void *cb_ctx)
+{
+ struct qm_ceetm_ccg *p;
+
+ if (idx > MAX_CCG_IDX) {
+ pr_err("The given ccg index is out of range\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry(p, &channel->ccgs, node) {
+ if (p->idx == idx) {
+ pr_err("The CCG#%d has been claimed\n", idx);
+ return -EINVAL;
+ }
+ }
+
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
+ if (!p) {
+ pr_err("Can't allocate memory for CCG#%d!\n", idx);
+ return -ENOMEM;
+ }
+
+ list_add_tail(&p->node, &channel->ccgs);
+
+ p->idx = idx;
+ p->parent = channel;
+ p->cb = cscn;
+ p->cb_ctx = cb_ctx;
+ INIT_LIST_HEAD(&p->cb_node);
+
+ *ccg = p;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_ccg_claim);
+
+int qman_ceetm_ccg_release(struct qm_ceetm_ccg *ccg)
+{
+ unsigned long irqflags __maybe_unused;
+ struct qm_mcc_ceetm_ccgr_config config_opts;
+ int ret = 0;
+ struct qman_portal *p = get_affine_portal();
+
+ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
+ spin_lock_irqsave(&p->ccgr_lock, irqflags);
+ if (!list_empty(&ccg->cb_node))
+ list_del(&ccg->cb_node);
+ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
+ (ccg->parent->idx << 4) | ccg->idx);
+ config_opts.dcpid = ccg->parent->dcp_idx;
+ config_opts.we_mask = cpu_to_be16(QM_CCGR_WE_CSCN_TUPD);
+ config_opts.cm_config.cscn_tupd = cpu_to_be16(PORTAL_IDX(p));
+ ret = qman_ceetm_configure_ccgr(&config_opts);
+ spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
+ put_affine_portal();
+
+ list_del(&ccg->node);
+ kfree(ccg);
+ return ret;
+}
+EXPORT_SYMBOL(qman_ceetm_ccg_release);
+
+int qman_ceetm_ccg_set(struct qm_ceetm_ccg *ccg, u16 we_mask,
+ const struct qm_ceetm_ccg_params *params)
+{
+ struct qm_mcc_ceetm_ccgr_config config_opts;
+ unsigned long irqflags __maybe_unused;
+ int ret;
+ struct qman_portal *p;
+
+ if (((ccg->parent->idx << 4) | ccg->idx) >= (2 * __CGR_NUM))
+ return -EINVAL;
+
+ p = get_affine_portal();
+
+ memset(&config_opts, 0, sizeof(struct qm_mcc_ceetm_ccgr_config));
+ spin_lock_irqsave(&p->ccgr_lock, irqflags);
+
+ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
+ (ccg->parent->idx << 4) | ccg->idx);
+ config_opts.dcpid = ccg->parent->dcp_idx;
+ config_opts.we_mask = we_mask;
+ if (we_mask & QM_CCGR_WE_CSCN_EN) {
+ config_opts.we_mask |= QM_CCGR_WE_CSCN_TUPD;
+ config_opts.cm_config.cscn_tupd = cpu_to_be16(
+ QM_CGR_TARG_UDP_CTRL_WRITE_BIT | PORTAL_IDX(p));
+ }
+ config_opts.we_mask = cpu_to_be16(config_opts.we_mask);
+ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
+ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
+ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
+ config_opts.cm_config.ctl_td_en = params->td_en;
+ config_opts.cm_config.ctl_td_mode = params->td_mode;
+ config_opts.cm_config.ctl_cscn_en = params->cscn_en;
+ config_opts.cm_config.ctl_mode = params->mode;
+ config_opts.cm_config.oal = params->oal;
+ config_opts.cm_config.cs_thres.hword =
+ cpu_to_be16(params->cs_thres_in.hword);
+ config_opts.cm_config.cs_thres_x.hword =
+ cpu_to_be16(params->cs_thres_out.hword);
+ config_opts.cm_config.td_thres.hword =
+ cpu_to_be16(params->td_thres.hword);
+ config_opts.cm_config.wr_parm_g.word =
+ cpu_to_be32(params->wr_parm_g.word);
+ config_opts.cm_config.wr_parm_y.word =
+ cpu_to_be32(params->wr_parm_y.word);
+ config_opts.cm_config.wr_parm_r.word =
+ cpu_to_be32(params->wr_parm_r.word);
+ ret = qman_ceetm_configure_ccgr(&config_opts);
+ if (ret) {
+ pr_err("Configure CCGR CM failed!\n");
+ goto release_lock;
+ }
+
+ if (we_mask & QM_CCGR_WE_CSCN_EN)
+ if (list_empty(&ccg->cb_node))
+ list_add(&ccg->cb_node,
+ &p->ccgr_cbs[ccg->parent->dcp_idx]);
+release_lock:
+ spin_unlock_irqrestore(&p->ccgr_lock, irqflags);
+ put_affine_portal();
+ return ret;
+}
+EXPORT_SYMBOL(qman_ceetm_ccg_set);
+
+#define CEETM_CCGR_CTL_MASK 0x01
+int qman_ceetm_ccg_get(struct qm_ceetm_ccg *ccg,
+ struct qm_ceetm_ccg_params *params)
+{
+ struct qm_mcc_ceetm_ccgr_query query_opts;
+ struct qm_mcr_ceetm_ccgr_query query_result;
+
+ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
+ (ccg->parent->idx << 4) | ccg->idx);
+ query_opts.dcpid = ccg->parent->dcp_idx;
+
+ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
+ pr_err("Can't query CCGR#%d\n", ccg->idx);
+ return -EINVAL;
+ }
+
+ params->wr_parm_r.word = query_result.cm_query.wr_parm_r.word;
+ params->wr_parm_y.word = query_result.cm_query.wr_parm_y.word;
+ params->wr_parm_g.word = query_result.cm_query.wr_parm_g.word;
+ params->td_thres.hword = query_result.cm_query.td_thres.hword;
+ params->cs_thres_out.hword = query_result.cm_query.cs_thres_x.hword;
+ params->cs_thres_in.hword = query_result.cm_query.cs_thres.hword;
+ params->oal = query_result.cm_query.oal;
+ params->wr_en_g = query_result.cm_query.ctl_wr_en_g;
+ params->wr_en_y = query_result.cm_query.ctl_wr_en_y;
+ params->wr_en_r = query_result.cm_query.ctl_wr_en_r;
+ params->td_en = query_result.cm_query.ctl_td_en;
+ params->td_mode = query_result.cm_query.ctl_td_mode;
+ params->cscn_en = query_result.cm_query.ctl_cscn_en;
+ params->mode = query_result.cm_query.ctl_mode;
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_ccg_get);
+
+int qman_ceetm_ccg_get_reject_statistics(struct qm_ceetm_ccg *ccg, u32 flags,
+ u64 *frame_count, u64 *byte_count)
+{
+ struct qm_mcr_ceetm_statistics_query result;
+ u16 cid, command_type;
+ enum qm_dc_portal dcp_idx;
+ int ret;
+
+ cid = cpu_to_be16((ccg->parent->idx << 4) | ccg->idx);
+ dcp_idx = ccg->parent->dcp_idx;
+ if (flags == QMAN_CEETM_FLAG_CLEAR_STATISTICS_COUNTER)
+ command_type = CEETM_QUERY_REJECT_CLEAR_STATISTICS;
+ else
+ command_type = CEETM_QUERY_REJECT_STATISTICS;
+
+ ret = qman_ceetm_query_statistics(cid, dcp_idx, command_type, &result);
+ if (ret) {
+ pr_err("Can't query the statistics of CCG#%d!\n", ccg->idx);
+ return -EINVAL;
+ }
+
+ *frame_count = be40_to_cpu(result.frm_cnt);
+ *byte_count = be48_to_cpu(result.byte_cnt);
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_ccg_get_reject_statistics);
+
+int qman_ceetm_cscn_swp_get(struct qm_ceetm_ccg *ccg,
+ u16 swp_idx,
+ unsigned int *cscn_enabled)
+{
+ struct qm_mcc_ceetm_ccgr_query query_opts;
+ struct qm_mcr_ceetm_ccgr_query query_result;
+ int i;
+
+ DPA_ASSERT(swp_idx < 127);
+ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
+ (ccg->parent->idx << 4) | ccg->idx);
+ query_opts.dcpid = ccg->parent->dcp_idx;
+
+ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
+ pr_err("Can't query CCGR#%d\n", ccg->idx);
+ return -EINVAL;
+ }
+
+ i = swp_idx / 32;
+ i = 3 - i;
+ *cscn_enabled = query_result.cm_query.cscn_targ_swp[i] >>
+ (31 - swp_idx % 32);
+
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_cscn_swp_get);
+
+int qman_ceetm_cscn_dcp_set(struct qm_ceetm_ccg *ccg,
+ u16 dcp_idx,
+ u8 vcgid,
+ unsigned int cscn_enabled,
+ u16 we_mask,
+ const struct qm_ceetm_ccg_params *params)
+{
+ struct qm_mcc_ceetm_ccgr_config config_opts;
+ int ret;
+
+ config_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_CONFIGURE |
+ (ccg->parent->idx << 4) | ccg->idx);
+ config_opts.dcpid = ccg->parent->dcp_idx;
+ config_opts.we_mask = cpu_to_be16(we_mask | QM_CCGR_WE_CSCN_TUPD |
+ QM_CCGR_WE_CDV);
+ config_opts.cm_config.cdv = vcgid;
+ config_opts.cm_config.cscn_tupd = cpu_to_be16((cscn_enabled << 15) |
+ QM_CGR_TARG_UDP_CTRL_DCP | dcp_idx);
+ config_opts.cm_config.ctl_wr_en_g = params->wr_en_g;
+ config_opts.cm_config.ctl_wr_en_y = params->wr_en_y;
+ config_opts.cm_config.ctl_wr_en_r = params->wr_en_r;
+ config_opts.cm_config.ctl_td_en = params->td_en;
+ config_opts.cm_config.ctl_td_mode = params->td_mode;
+ config_opts.cm_config.ctl_cscn_en = params->cscn_en;
+ config_opts.cm_config.ctl_mode = params->mode;
+ config_opts.cm_config.cs_thres.hword =
+ cpu_to_be16(params->cs_thres_in.hword);
+ config_opts.cm_config.cs_thres_x.hword =
+ cpu_to_be16(params->cs_thres_out.hword);
+ config_opts.cm_config.td_thres.hword =
+ cpu_to_be16(params->td_thres.hword);
+ config_opts.cm_config.wr_parm_g.word =
+ cpu_to_be32(params->wr_parm_g.word);
+ config_opts.cm_config.wr_parm_y.word =
+ cpu_to_be32(params->wr_parm_y.word);
+ config_opts.cm_config.wr_parm_r.word =
+ cpu_to_be32(params->wr_parm_r.word);
+
+ ret = qman_ceetm_configure_ccgr(&config_opts);
+ if (ret) {
+ pr_err("Configure CSCN_TARG_DCP failed!\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_cscn_dcp_set);
+
+int qman_ceetm_cscn_dcp_get(struct qm_ceetm_ccg *ccg,
+ u16 dcp_idx,
+ u8 *vcgid,
+ unsigned int *cscn_enabled)
+{
+ struct qm_mcc_ceetm_ccgr_query query_opts;
+ struct qm_mcr_ceetm_ccgr_query query_result;
+
+ query_opts.ccgrid = cpu_to_be16(CEETM_CCGR_CM_QUERY |
+ (ccg->parent->idx << 4) | ccg->idx);
+ query_opts.dcpid = ccg->parent->dcp_idx;
+
+ if (qman_ceetm_query_ccgr(&query_opts, &query_result)) {
+ pr_err("Can't query CCGR#%d\n", ccg->idx);
+ return -EINVAL;
+ }
+
+ *vcgid = query_result.cm_query.cdv;
+ *cscn_enabled = (query_result.cm_query.cscn_targ_dcp >> dcp_idx) & 0x1;
+ return 0;
+}
+EXPORT_SYMBOL(qman_ceetm_cscn_dcp_get);
+
+int qman_ceetm_querycongestion(struct __qm_mcr_querycongestion *ccg_state,
+ unsigned int dcp_idx)
+{
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ u8 res;
+ int i, j;
+
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+
+ mcc = qm_mc_start(&p->p);
+ for (i = 0; i < 2; i++) {
+ mcc->ccgr_query.ccgrid =
+ cpu_to_be16(CEETM_QUERY_CONGESTION_STATE | i);
+ mcc->ccgr_query.dcpid = dcp_idx;
+ qm_mc_commit(&p->p, QM_CEETM_VERB_CCGR_QUERY);
+
+ while (!(mcr = qm_mc_result(&p->p)))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_CEETM_VERB_CCGR_QUERY);
+ res = mcr->result;
+ if (res == QM_MCR_RESULT_OK) {
+ for (j = 0; j < 8; j++)
+ mcr->ccgr_query.congestion_state.state.
+ __state[j] = be32_to_cpu(mcr->ccgr_query.
+ congestion_state.state.__state[j]);
+ *(ccg_state + i) =
+ mcr->ccgr_query.congestion_state.state;
+ } else {
+ pr_err("QUERY CEETM CONGESTION STATE failed\n");
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ return -EIO;
+ }
+ }
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return 0;
+}
+
+int qman_set_wpm(int wpm_enable)
+{
+ return qm_set_wpm(wpm_enable);
+}
+EXPORT_SYMBOL(qman_set_wpm);
+
+int qman_get_wpm(int *wpm_enable)
+{
+ return qm_get_wpm(wpm_enable);
+}
+EXPORT_SYMBOL(qman_get_wpm);
+
+int qman_shutdown_fq(u32 fqid)
+{
+ struct qman_portal *p;
+ unsigned long irqflags __maybe_unused;
+ int ret;
+ struct qm_portal *low_p;
+ p = get_affine_portal();
+ PORTAL_IRQ_LOCK(p, irqflags);
+ low_p = &p->p;
+ ret = qm_shutdown_fq(&low_p, 1, fqid);
+ PORTAL_IRQ_UNLOCK(p, irqflags);
+ put_affine_portal();
+ return ret;
+}
+
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal)
+{
+ return portal->sharing_redirect ? NULL : portal->config;
+}
diff --git a/drivers/staging/fsl_qbman/qman_low.h b/drivers/staging/fsl_qbman/qman_low.h
new file mode 100644
index 000000000000..d58af1b532b7
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_low.h
@@ -0,0 +1,1445 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_private.h"
+
+/***************************/
+/* Portal register assists */
+/***************************/
+
+/* Cache-inhibited register offsets */
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
+
+#define QM_REG_EQCR_PI_CINH 0x0000
+#define QM_REG_EQCR_CI_CINH 0x0004
+#define QM_REG_EQCR_ITR 0x0008
+#define QM_REG_DQRR_PI_CINH 0x0040
+#define QM_REG_DQRR_CI_CINH 0x0044
+#define QM_REG_DQRR_ITR 0x0048
+#define QM_REG_DQRR_DCAP 0x0050
+#define QM_REG_DQRR_SDQCR 0x0054
+#define QM_REG_DQRR_VDQCR 0x0058
+#define QM_REG_DQRR_PDQCR 0x005c
+#define QM_REG_MR_PI_CINH 0x0080
+#define QM_REG_MR_CI_CINH 0x0084
+#define QM_REG_MR_ITR 0x0088
+#define QM_REG_CFG 0x0100
+#define QM_REG_ISR 0x0e00
+#define QM_REG_IIR 0x0e0c
+#define QM_REG_ITPR 0x0e14
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3100
+#define QM_CL_DQRR_PI_CENA 0x3200
+#define QM_CL_DQRR_CI_CENA 0x3300
+#define QM_CL_MR_PI_CENA 0x3400
+#define QM_CL_MR_CI_CENA 0x3500
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+#endif
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+
+#define QM_REG_EQCR_PI_CINH 0x3000
+#define QM_REG_EQCR_CI_CINH 0x3040
+#define QM_REG_EQCR_ITR 0x3080
+#define QM_REG_DQRR_PI_CINH 0x3100
+#define QM_REG_DQRR_CI_CINH 0x3140
+#define QM_REG_DQRR_ITR 0x3180
+#define QM_REG_DQRR_DCAP 0x31C0
+#define QM_REG_DQRR_SDQCR 0x3200
+#define QM_REG_DQRR_VDQCR 0x3240
+#define QM_REG_DQRR_PDQCR 0x3280
+#define QM_REG_MR_PI_CINH 0x3300
+#define QM_REG_MR_CI_CINH 0x3340
+#define QM_REG_MR_ITR 0x3380
+#define QM_REG_CFG 0x3500
+#define QM_REG_ISR 0x3600
+#define QM_REG_IIR 0x36C0
+#define QM_REG_ITPR 0x3740
+
+/* Cache-enabled register offsets */
+#define QM_CL_EQCR 0x0000
+#define QM_CL_DQRR 0x1000
+#define QM_CL_MR 0x2000
+#define QM_CL_EQCR_PI_CENA 0x3000
+#define QM_CL_EQCR_CI_CENA 0x3040
+#define QM_CL_DQRR_PI_CENA 0x3100
+#define QM_CL_DQRR_CI_CENA 0x3140
+#define QM_CL_MR_PI_CENA 0x3300
+#define QM_CL_MR_CI_CENA 0x3340
+#define QM_CL_CR 0x3800
+#define QM_CL_RR0 0x3900
+#define QM_CL_RR1 0x3940
+
+#endif
+
+
+/* BTW, the drivers (and h/w programming model) already obtain the required
+ * synchronisation for portal accesses via lwsync(), hwsync(), and
+ * data-dependencies. Use of barrier()s or other order-preserving primitives
+ * simply degrade performance. Hence the use of the __raw_*() interfaces, which
+ * simply ensure that the compiler treats the portal registers as volatile (ie.
+ * non-coherent). */
+
+/* Cache-inhibited register access. */
+#define __qm_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ci + (o)))
+#define __qm_out(qm, o, val) __raw_writel((cpu_to_be32(val)), \
+ (qm)->addr_ci + (o));
+#define qm_in(reg) __qm_in(&portal->addr, QM_REG_##reg)
+#define qm_out(reg, val) __qm_out(&portal->addr, QM_REG_##reg, val)
+
+/* Cache-enabled (index) register access */
+#define __qm_cl_touch_ro(qm, o) dcbt_ro((qm)->addr_ce + (o))
+#define __qm_cl_touch_rw(qm, o) dcbt_rw((qm)->addr_ce + (o))
+#define __qm_cl_in(qm, o) be32_to_cpu(__raw_readl((qm)->addr_ce + (o)))
+#define __qm_cl_out(qm, o, val) \
+ do { \
+ u32 *__tmpclout = (qm)->addr_ce + (o); \
+ __raw_writel(cpu_to_be32(val), __tmpclout); \
+ dcbf(__tmpclout); \
+ } while (0)
+#define __qm_cl_invalidate(qm, o) dcbi((qm)->addr_ce + (o))
+#define qm_cl_touch_ro(reg) __qm_cl_touch_ro(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_touch_rw(reg) __qm_cl_touch_rw(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_in(reg) __qm_cl_in(&portal->addr, QM_CL_##reg##_CENA)
+#define qm_cl_out(reg, val) __qm_cl_out(&portal->addr, QM_CL_##reg##_CENA, val)
+#define qm_cl_invalidate(reg)\
+ __qm_cl_invalidate(&portal->addr, QM_CL_##reg##_CENA)
+
+/* Cache-enabled ring access */
+#define qm_cl(base, idx) ((void *)base + ((idx) << 6))
+
+/* Cyclic helper for rings. FIXME: once we are able to do fine-grain perf
+ * analysis, look at using the "extra" bit in the ring index registers to avoid
+ * cyclic issues. */
+static inline u8 qm_cyc_diff(u8 ringsize, u8 first, u8 last)
+{
+ /* 'first' is included, 'last' is excluded */
+ if (first <= last)
+ return last - first;
+ return ringsize + last - first;
+}
+
+/* Portal modes.
+ * Enum types;
+ * pmode == production mode
+ * cmode == consumption mode,
+ * dmode == h/w dequeue mode.
+ * Enum values use 3 letter codes. First letter matches the portal mode,
+ * remaining two letters indicate;
+ * ci == cache-inhibited portal register
+ * ce == cache-enabled portal register
+ * vb == in-band valid-bit (cache-enabled)
+ * dc == DCA (Discrete Consumption Acknowledgement), DQRR-only
+ * As for "enum qm_dqrr_dmode", it should be self-explanatory.
+ */
+enum qm_eqcr_pmode { /* matches QCSP_CFG::EPM */
+ qm_eqcr_pci = 0, /* PI index, cache-inhibited */
+ qm_eqcr_pce = 1, /* PI index, cache-enabled */
+ qm_eqcr_pvb = 2 /* valid-bit */
+};
+enum qm_dqrr_dmode { /* matches QCSP_CFG::DP */
+ qm_dqrr_dpush = 0, /* SDQCR + VDQCR */
+ qm_dqrr_dpull = 1 /* PDQCR */
+};
+enum qm_dqrr_pmode { /* s/w-only */
+ qm_dqrr_pci, /* reads DQRR_PI_CINH */
+ qm_dqrr_pce, /* reads DQRR_PI_CENA */
+ qm_dqrr_pvb /* reads valid-bit */
+};
+enum qm_dqrr_cmode { /* matches QCSP_CFG::DCM */
+ qm_dqrr_cci = 0, /* CI index, cache-inhibited */
+ qm_dqrr_cce = 1, /* CI index, cache-enabled */
+ qm_dqrr_cdc = 2 /* Discrete Consumption Acknowledgement */
+};
+enum qm_mr_pmode { /* s/w-only */
+ qm_mr_pci, /* reads MR_PI_CINH */
+ qm_mr_pce, /* reads MR_PI_CENA */
+ qm_mr_pvb /* reads valid-bit */
+};
+enum qm_mr_cmode { /* matches QCSP_CFG::MM */
+ qm_mr_cci = 0, /* CI index, cache-inhibited */
+ qm_mr_cce = 1 /* CI index, cache-enabled */
+};
+
+
+/* ------------------------- */
+/* --- Portal structures --- */
+
+#define QM_EQCR_SIZE 8
+#define QM_DQRR_SIZE 16
+#define QM_MR_SIZE 8
+
+struct qm_eqcr {
+ struct qm_eqcr_entry *ring, *cursor;
+ u8 ci, available, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ u32 busy;
+ enum qm_eqcr_pmode pmode;
+#endif
+};
+
+struct qm_dqrr {
+ const struct qm_dqrr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ enum qm_dqrr_dmode dmode;
+ enum qm_dqrr_pmode pmode;
+ enum qm_dqrr_cmode cmode;
+#endif
+};
+
+struct qm_mr {
+ const struct qm_mr_entry *ring, *cursor;
+ u8 pi, ci, fill, ithresh, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ enum qm_mr_pmode pmode;
+ enum qm_mr_cmode cmode;
+#endif
+};
+
+struct qm_mc {
+ struct qm_mc_command *cr;
+ struct qm_mc_result *rr;
+ u8 rridx, vbit;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ enum {
+ /* Can be _mc_start()ed */
+ qman_mc_idle,
+ /* Can be _mc_commit()ed or _mc_abort()ed */
+ qman_mc_user,
+ /* Can only be _mc_retry()ed */
+ qman_mc_hw
+ } state;
+#endif
+};
+
+#define QM_PORTAL_ALIGNMENT ____cacheline_aligned
+
+struct qm_addr {
+ void __iomem *addr_ce; /* cache-enabled */
+ void __iomem *addr_ci; /* cache-inhibited */
+};
+
+struct qm_portal {
+ /* In the non-CONFIG_FSL_DPA_CHECKING case, the following stuff up to
+ * and including 'mc' fits within a cacheline (yay!). The 'config' part
+ * is setup-only, so isn't a cause for a concern. In other words, don't
+ * rearrange this structure on a whim, there be dragons ... */
+ struct qm_addr addr;
+ struct qm_eqcr eqcr;
+ struct qm_dqrr dqrr;
+ struct qm_mr mr;
+ struct qm_mc mc;
+} QM_PORTAL_ALIGNMENT;
+
+
+/* ---------------- */
+/* --- EQCR API --- */
+
+/* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
+#define EQCR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_EQCR_SIZE << 6)))
+
+/* Bit-wise logic to convert a ring pointer to a ring index */
+static inline u8 EQCR_PTR2IDX(struct qm_eqcr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_EQCR_SIZE - 1);
+}
+
+/* Increment the 'cursor' ring pointer, taking 'vbit' into account */
+static inline void EQCR_INC(struct qm_eqcr *eqcr)
+{
+ /* NB: this is odd-looking, but experiments show that it generates fast
+ * code with essentially no branching overheads. We increment to the
+ * next EQCR pointer and handle overflow and 'vbit'. */
+ struct qm_eqcr_entry *partial = eqcr->cursor + 1;
+ eqcr->cursor = EQCR_CARRYCLEAR(partial);
+ if (partial != eqcr->cursor)
+ eqcr->vbit ^= QM_EQCR_VERB_VBIT;
+}
+
+static inline int qm_eqcr_init(struct qm_portal *portal,
+ enum qm_eqcr_pmode pmode,
+ unsigned int eq_stash_thresh,
+ int eq_stash_prio)
+{
+ /* This use of 'register', as well as all other occurrences, is because
+ * it has been observed to generate much faster code with gcc than is
+ * otherwise the case. */
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u32 cfg;
+ u8 pi;
+
+ eqcr->ring = portal->addr.addr_ce + QM_CL_EQCR;
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ eqcr->cursor = eqcr->ring + pi;
+ eqcr->vbit = (qm_in(EQCR_PI_CINH) & QM_EQCR_SIZE) ?
+ QM_EQCR_VERB_VBIT : 0;
+ eqcr->available = QM_EQCR_SIZE - 1 -
+ qm_cyc_diff(QM_EQCR_SIZE, eqcr->ci, pi);
+ eqcr->ithresh = qm_in(EQCR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+ eqcr->pmode = pmode;
+#endif
+ cfg = (qm_in(CFG) & 0x00ffffff) |
+ (eq_stash_thresh << 28) | /* QCSP_CFG: EST */
+ (eq_stash_prio << 26) | /* QCSP_CFG: EP */
+ ((pmode & 0x3) << 24); /* QCSP_CFG::EPM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline unsigned int qm_eqcr_get_ci_stashing(struct qm_portal *portal)
+{
+ return (qm_in(CFG) >> 28) & 0x7;
+}
+
+static inline void qm_eqcr_finish(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 pi, ci;
+ u32 cfg;
+
+ /*
+ * Disable EQCI stashing because the QMan only
+ * presents the value it previously stashed to
+ * maintain coherency. Setting the stash threshold
+ * to 1 then 0 ensures that QMan has resyncronized
+ * its internal copy so that the portal is clean
+ * when it is reinitialized in the future
+ */
+ cfg = (qm_in(CFG) & 0x0fffffff) |
+ (1 << 28); /* QCSP_CFG: EST */
+ qm_out(CFG, cfg);
+ cfg &= 0x0fffffff; /* stash threshold = 0 */
+ qm_out(CFG, cfg);
+
+ pi = qm_in(EQCR_PI_CINH) & (QM_EQCR_SIZE - 1);
+ ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+
+ /* Refresh EQCR CI cache value */
+ qm_cl_invalidate(EQCR_CI);
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+
+ DPA_ASSERT(!eqcr->busy);
+ if (pi != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("losing uncommited EQCR entries\n");
+ if (ci != eqcr->ci)
+ pr_crit("missing existing EQCR completions\n");
+ if (eqcr->ci != EQCR_PTR2IDX(eqcr->cursor))
+ pr_crit("EQCR destroyed unquiesced\n");
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_no_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ DPA_ASSERT(!eqcr->busy);
+ if (!eqcr->available)
+ return NULL;
+
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 1;
+#endif
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
+ dcbz_64(eqcr->cursor);
+#endif
+ return eqcr->cursor;
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_start_stash(struct qm_portal
+ *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci;
+
+ DPA_ASSERT(!eqcr->busy);
+ if (!eqcr->available) {
+ old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ if (!diff)
+ return NULL;
+ }
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 1;
+#endif
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
+ dcbz_64(eqcr->cursor);
+#endif
+ return eqcr->cursor;
+}
+
+static inline void qm_eqcr_abort(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+ DPA_ASSERT(eqcr->busy);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline struct qm_eqcr_entry *qm_eqcr_pend_and_next(
+ struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ DPA_ASSERT(eqcr->busy);
+ DPA_ASSERT(eqcr->pmode != qm_eqcr_pvb);
+ if (eqcr->available == 1)
+ return NULL;
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcr->cursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
+ dcbz_64(eqcr->cursor);
+#endif
+ return eqcr->cursor;
+}
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define EQCR_COMMIT_CHECKS(eqcr) \
+do { \
+ DPA_ASSERT(eqcr->busy); \
+ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & 0xffffff00)); \
+ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & 0xffffff00)); \
+} while (0)
+#else
+#define EQCR_COMMIT_CHECKS(eqcr) \
+do { \
+ DPA_ASSERT(eqcr->busy); \
+ DPA_ASSERT(eqcr->cursor->orp == (eqcr->cursor->orp & \
+ cpu_to_be32(0x00ffffff))); \
+ DPA_ASSERT(eqcr->cursor->fqid == (eqcr->cursor->fqid & \
+ cpu_to_be32(0x00ffffff))); \
+} while (0)
+#endif
+
+static inline void qm_eqcr_pci_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pci);
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ hwsync();
+ qm_out(EQCR_PI_CINH, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+ qm_cl_invalidate(EQCR_PI);
+ qm_cl_touch_rw(EQCR_PI);
+}
+
+static inline void qm_eqcr_pce_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pce);
+ eqcr->cursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ EQCR_INC(eqcr);
+ eqcr->available--;
+ dcbf(eqcr->cursor);
+ lwsync();
+ qm_cl_out(EQCR_PI, EQCR_PTR2IDX(eqcr->cursor));
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline void qm_eqcr_pvb_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ struct qm_eqcr_entry *eqcursor;
+ EQCR_COMMIT_CHECKS(eqcr);
+ DPA_ASSERT(eqcr->pmode == qm_eqcr_pvb);
+ lwsync();
+ eqcursor = eqcr->cursor;
+ eqcursor->__dont_write_directly__verb = myverb | eqcr->vbit;
+ dcbf(eqcursor);
+ EQCR_INC(eqcr);
+ eqcr->available--;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ eqcr->busy = 0;
+#endif
+}
+
+static inline u8 qm_eqcr_cci_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+ eqcr->ci = qm_in(EQCR_CI_CINH) & (QM_EQCR_SIZE - 1);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline void qm_eqcr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_eqcr *eqcr = &portal->eqcr;
+ qm_cl_touch_ro(EQCR_CI);
+}
+
+static inline u8 qm_eqcr_cce_update(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ u8 diff, old_ci = eqcr->ci;
+ eqcr->ci = qm_cl_in(EQCR_CI) & (QM_EQCR_SIZE - 1);
+ qm_cl_invalidate(EQCR_CI);
+ diff = qm_cyc_diff(QM_EQCR_SIZE, old_ci, eqcr->ci);
+ eqcr->available += diff;
+ return diff;
+}
+
+static inline u8 qm_eqcr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ return eqcr->ithresh;
+}
+
+static inline void qm_eqcr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ eqcr->ithresh = ithresh;
+ qm_out(EQCR_ITR, ithresh);
+}
+
+static inline u8 qm_eqcr_get_avail(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ return eqcr->available;
+}
+
+static inline u8 qm_eqcr_get_fill(struct qm_portal *portal)
+{
+ register struct qm_eqcr *eqcr = &portal->eqcr;
+ return QM_EQCR_SIZE - 1 - eqcr->available;
+}
+
+
+/* ---------------- */
+/* --- DQRR API --- */
+
+/* FIXME: many possible improvements;
+ * - look at changing the API to use pointer rather than index parameters now
+ * that 'cursor' is a pointer,
+ * - consider moving other parameters to pointer if it could help (ci)
+ */
+
+#define DQRR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_DQRR_SIZE << 6)))
+
+static inline u8 DQRR_PTR2IDX(const struct qm_dqrr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_DQRR_SIZE - 1);
+}
+
+static inline const struct qm_dqrr_entry *DQRR_INC(
+ const struct qm_dqrr_entry *e)
+{
+ return DQRR_CARRYCLEAR(e + 1);
+}
+
+static inline void qm_dqrr_set_maxfill(struct qm_portal *portal, u8 mf)
+{
+ qm_out(CFG, (qm_in(CFG) & 0xff0fffff) |
+ ((mf & (QM_DQRR_SIZE - 1)) << 20));
+}
+
+static inline void qm_dqrr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+ dqrr->ci = (dqrr->ci + num) & (QM_DQRR_SIZE - 1);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cdc_consume_n(struct qm_portal *portal, u16 bitmask)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_out(DQRR_DCAP, (1 << 8) | /* DQRR_DCAP::S */
+ ((u32)bitmask << 16)); /* DQRR_DCAP::DCAP_CI */
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+}
+
+static inline int qm_dqrr_init(struct qm_portal *portal,
+ const struct qm_portal_config *config,
+ enum qm_dqrr_dmode dmode,
+ __maybe_unused enum qm_dqrr_pmode pmode,
+ enum qm_dqrr_cmode cmode, u8 max_fill)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u32 cfg;
+
+ /* Make sure the DQRR will be idle when we enable */
+ qm_out(DQRR_SDQCR, 0);
+ qm_out(DQRR_VDQCR, 0);
+ qm_out(DQRR_PDQCR, 0);
+ dqrr->ring = portal->addr.addr_ce + QM_CL_DQRR;
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->ci = qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ dqrr->fill = qm_cyc_diff(QM_DQRR_SIZE, dqrr->ci, dqrr->pi);
+ dqrr->vbit = (qm_in(DQRR_PI_CINH) & QM_DQRR_SIZE) ?
+ QM_DQRR_VERB_VBIT : 0;
+ dqrr->ithresh = qm_in(DQRR_ITR);
+
+ /* Free up pending DQRR entries if any as per current DCM */
+ if (dqrr->fill) {
+ enum qm_dqrr_cmode dcm = (qm_in(CFG) >> 16) & 3;
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ dqrr->cmode = dcm;
+#endif
+ switch (dcm) {
+ case qm_dqrr_cci:
+ qm_dqrr_cci_consume(portal, dqrr->fill);
+ break;
+ case qm_dqrr_cce:
+ qm_dqrr_cce_consume(portal, dqrr->fill);
+ break;
+ case qm_dqrr_cdc:
+ qm_dqrr_cdc_consume_n(portal, (1<<QM_DQRR_SIZE) - 1);
+ break;
+ default:
+ DPA_ASSERT(0);
+ }
+ }
+
+#ifdef CONFIG_FSL_DPA_CHECKING
+ dqrr->dmode = dmode;
+ dqrr->pmode = pmode;
+ dqrr->cmode = cmode;
+#endif
+ /* Invalidate every ring entry before beginning */
+ for (cfg = 0; cfg < QM_DQRR_SIZE; cfg++)
+ dcbi(qm_cl(dqrr->ring, cfg));
+ cfg = (qm_in(CFG) & 0xff000f00) |
+ ((max_fill & (QM_DQRR_SIZE - 1)) << 20) | /* DQRR_MF */
+ ((dmode & 1) << 18) | /* DP */
+ ((cmode & 3) << 16) | /* DCM */
+ 0xa0 | /* RE+SE */
+ (0 ? 0x40 : 0) | /* Ignore RP */
+ (0 ? 0x10 : 0); /* Ignore SP */
+ qm_out(CFG, cfg);
+ qm_dqrr_set_maxfill(portal, max_fill);
+
+ /* Recalculate cursor as we may have consumed frames */
+ dqrr->cursor = dqrr->ring + dqrr->ci;
+ return 0;
+}
+
+static inline void qm_dqrr_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if ((dqrr->cmode != qm_dqrr_cdc) &&
+ (dqrr->ci != DQRR_PTR2IDX(dqrr->cursor)))
+ pr_crit("Ignoring completed DQRR entries\n");
+#endif
+}
+
+static inline const struct qm_dqrr_entry *qm_dqrr_current(
+ struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ if (!dqrr->fill)
+ return NULL;
+ return dqrr->cursor;
+}
+
+static inline u8 qm_dqrr_cursor(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ return DQRR_PTR2IDX(dqrr->cursor);
+}
+
+static inline u8 qm_dqrr_next(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->fill);
+ dqrr->cursor = DQRR_INC(dqrr->cursor);
+ return --dqrr->fill;
+}
+
+static inline u8 qm_dqrr_pci_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pci);
+ dqrr->pi = qm_in(DQRR_PI_CINH) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+ qm_cl_invalidate(DQRR_PI);
+ qm_cl_touch_ro(DQRR_PI);
+}
+
+static inline u8 qm_dqrr_pce_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 diff, old_pi = dqrr->pi;
+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pce);
+ dqrr->pi = qm_cl_in(DQRR_PI) & (QM_DQRR_SIZE - 1);
+ diff = qm_cyc_diff(QM_DQRR_SIZE, old_pi, dqrr->pi);
+ dqrr->fill += diff;
+ return diff;
+}
+
+static inline void qm_dqrr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ const struct qm_dqrr_entry *res = qm_cl(dqrr->ring, dqrr->pi);
+ DPA_ASSERT(dqrr->pmode == qm_dqrr_pvb);
+#if (defined CONFIG_PPC || defined CONFIG_PPC64) && !defined CONFIG_FSL_PAMU
+ /*
+ * On PowerPC platforms if PAMU is not available we need to
+ * manually invalidate the cache. When PAMU is available the
+ * cache is updated by stashing operations generated by QMan
+ */
+ dcbi(res);
+ dcbt_ro(res);
+#endif
+
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads". */
+ if ((__raw_readb(&res->verb) & QM_DQRR_VERB_VBIT) == dqrr->vbit) {
+ dqrr->pi = (dqrr->pi + 1) & (QM_DQRR_SIZE - 1);
+ if (!dqrr->pi)
+ dqrr->vbit ^= QM_DQRR_VERB_VBIT;
+ dqrr->fill++;
+ }
+}
+
+
+static inline void qm_dqrr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cci);
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_out(DQRR_CI_CINH, dqrr->ci);
+}
+
+static inline void qm_dqrr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_rw(DQRR_CI);
+}
+
+static inline void qm_dqrr_cce_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cce);
+ dqrr->ci = DQRR_PTR2IDX(dqrr->cursor);
+ qm_cl_out(DQRR_CI, dqrr->ci);
+}
+
+static inline void qm_dqrr_cdc_consume_1(struct qm_portal *portal, u8 idx,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ ((park ? 1 : 0) << 6) | /* PK */
+ idx); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_cdc_consume_1ptr(struct qm_portal *portal,
+ const struct qm_dqrr_entry *dq,
+ int park)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ u8 idx = DQRR_PTR2IDX(dq);
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ DPA_ASSERT((dqrr->ring + idx) == dq);
+ DPA_ASSERT(idx < QM_DQRR_SIZE);
+ qm_out(DQRR_DCAP, (0 << 8) | /* DQRR_DCAP::S */
+ ((park ? 1 : 0) << 6) | /* DQRR_DCAP::PK */
+ idx); /* DQRR_DCAP::DCAP_CI */
+}
+
+static inline u8 qm_dqrr_cdc_cci(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ return qm_in(DQRR_CI_CINH) & (QM_DQRR_SIZE - 1);
+}
+
+static inline void qm_dqrr_cdc_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ qm_cl_invalidate(DQRR_CI);
+ qm_cl_touch_ro(DQRR_CI);
+}
+
+static inline u8 qm_dqrr_cdc_cce(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode == qm_dqrr_cdc);
+ return qm_cl_in(DQRR_CI) & (QM_DQRR_SIZE - 1);
+}
+
+static inline u8 qm_dqrr_get_ci(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+ return dqrr->ci;
+}
+
+static inline void qm_dqrr_park(struct qm_portal *portal, u8 idx)
+{
+ __maybe_unused register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ (idx & (QM_DQRR_SIZE - 1))); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_park_current(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ DPA_ASSERT(dqrr->cmode != qm_dqrr_cdc);
+ qm_out(DQRR_DCAP, (0 << 8) | /* S */
+ (1 << 6) | /* PK */
+ DQRR_PTR2IDX(dqrr->cursor)); /* DCAP_CI */
+}
+
+static inline void qm_dqrr_sdqcr_set(struct qm_portal *portal, u32 sdqcr)
+{
+ qm_out(DQRR_SDQCR, sdqcr);
+}
+
+static inline u32 qm_dqrr_sdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_SDQCR);
+}
+
+static inline void qm_dqrr_vdqcr_set(struct qm_portal *portal, u32 vdqcr)
+{
+ qm_out(DQRR_VDQCR, vdqcr);
+}
+
+static inline u32 qm_dqrr_vdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_VDQCR);
+}
+
+static inline void qm_dqrr_pdqcr_set(struct qm_portal *portal, u32 pdqcr)
+{
+ qm_out(DQRR_PDQCR, pdqcr);
+}
+
+static inline u32 qm_dqrr_pdqcr_get(struct qm_portal *portal)
+{
+ return qm_in(DQRR_PDQCR);
+}
+
+static inline u8 qm_dqrr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_dqrr *dqrr = &portal->dqrr;
+ return dqrr->ithresh;
+}
+
+static inline void qm_dqrr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(DQRR_ITR, ithresh);
+}
+
+static inline u8 qm_dqrr_get_maxfill(struct qm_portal *portal)
+{
+ return (qm_in(CFG) & 0x00f00000) >> 20;
+}
+
+
+/* -------------- */
+/* --- MR API --- */
+
+#define MR_CARRYCLEAR(p) \
+ (void *)((unsigned long)(p) & (~(unsigned long)(QM_MR_SIZE << 6)))
+
+static inline u8 MR_PTR2IDX(const struct qm_mr_entry *e)
+{
+ return ((uintptr_t)e >> 6) & (QM_MR_SIZE - 1);
+}
+
+static inline const struct qm_mr_entry *MR_INC(const struct qm_mr_entry *e)
+{
+ return MR_CARRYCLEAR(e + 1);
+}
+
+static inline int qm_mr_init(struct qm_portal *portal, enum qm_mr_pmode pmode,
+ enum qm_mr_cmode cmode)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u32 cfg;
+
+ mr->ring = portal->addr.addr_ce + QM_CL_MR;
+ mr->pi = qm_in(MR_PI_CINH) & (QM_MR_SIZE - 1);
+ mr->ci = qm_in(MR_CI_CINH) & (QM_MR_SIZE - 1);
+ mr->cursor = mr->ring + mr->ci;
+ mr->fill = qm_cyc_diff(QM_MR_SIZE, mr->ci, mr->pi);
+ mr->vbit = (qm_in(MR_PI_CINH) & QM_MR_SIZE) ? QM_MR_VERB_VBIT : 0;
+ mr->ithresh = qm_in(MR_ITR);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mr->pmode = pmode;
+ mr->cmode = cmode;
+#endif
+ cfg = (qm_in(CFG) & 0xfffff0ff) |
+ ((cmode & 1) << 8); /* QCSP_CFG:MM */
+ qm_out(CFG, cfg);
+ return 0;
+}
+
+static inline void qm_mr_finish(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ if (mr->ci != MR_PTR2IDX(mr->cursor))
+ pr_crit("Ignoring completed MR entries\n");
+}
+
+static inline const struct qm_mr_entry *qm_mr_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ if (!mr->fill)
+ return NULL;
+ return mr->cursor;
+}
+
+static inline u8 qm_mr_cursor(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ return MR_PTR2IDX(mr->cursor);
+}
+
+static inline u8 qm_mr_next(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ DPA_ASSERT(mr->fill);
+ mr->cursor = MR_INC(mr->cursor);
+ return --mr->fill;
+}
+
+static inline u8 qm_mr_pci_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u8 diff, old_pi = mr->pi;
+ DPA_ASSERT(mr->pmode == qm_mr_pci);
+ mr->pi = qm_in(MR_PI_CINH);
+ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
+ mr->fill += diff;
+ return diff;
+}
+
+static inline void qm_mr_pce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mr *mr = &portal->mr;
+ DPA_ASSERT(mr->pmode == qm_mr_pce);
+ qm_cl_invalidate(MR_PI);
+ qm_cl_touch_ro(MR_PI);
+}
+
+static inline u8 qm_mr_pce_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ u8 diff, old_pi = mr->pi;
+ DPA_ASSERT(mr->pmode == qm_mr_pce);
+ mr->pi = qm_cl_in(MR_PI) & (QM_MR_SIZE - 1);
+ diff = qm_cyc_diff(QM_MR_SIZE, old_pi, mr->pi);
+ mr->fill += diff;
+ return diff;
+}
+
+static inline void qm_mr_pvb_update(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ const struct qm_mr_entry *res = qm_cl(mr->ring, mr->pi);
+ DPA_ASSERT(mr->pmode == qm_mr_pvb);
+ /* when accessing 'verb', use __raw_readb() to ensure that compiler
+ * inlining doesn't try to optimise out "excess reads". */
+ if ((__raw_readb(&res->verb) & QM_MR_VERB_VBIT) == mr->vbit) {
+ mr->pi = (mr->pi + 1) & (QM_MR_SIZE - 1);
+ if (!mr->pi)
+ mr->vbit ^= QM_MR_VERB_VBIT;
+ mr->fill++;
+ res = MR_INC(res);
+ }
+ dcbit_ro(res);
+}
+
+static inline void qm_mr_cci_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_mr *mr = &portal->mr;
+ DPA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cci_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ DPA_ASSERT(mr->cmode == qm_mr_cci);
+ mr->ci = MR_PTR2IDX(mr->cursor);
+ qm_out(MR_CI_CINH, mr->ci);
+}
+
+static inline void qm_mr_cce_prefetch(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mr *mr = &portal->mr;
+ DPA_ASSERT(mr->cmode == qm_mr_cce);
+ qm_cl_invalidate(MR_CI);
+ qm_cl_touch_rw(MR_CI);
+}
+
+static inline void qm_mr_cce_consume(struct qm_portal *portal, u8 num)
+{
+ register struct qm_mr *mr = &portal->mr;
+ DPA_ASSERT(mr->cmode == qm_mr_cce);
+ mr->ci = (mr->ci + num) & (QM_MR_SIZE - 1);
+ qm_cl_out(MR_CI, mr->ci);
+}
+
+static inline void qm_mr_cce_consume_to_current(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ DPA_ASSERT(mr->cmode == qm_mr_cce);
+ mr->ci = MR_PTR2IDX(mr->cursor);
+ qm_cl_out(MR_CI, mr->ci);
+}
+
+static inline u8 qm_mr_get_ci(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ return mr->ci;
+}
+
+static inline u8 qm_mr_get_ithresh(struct qm_portal *portal)
+{
+ register struct qm_mr *mr = &portal->mr;
+ return mr->ithresh;
+}
+
+static inline void qm_mr_set_ithresh(struct qm_portal *portal, u8 ithresh)
+{
+ qm_out(MR_ITR, ithresh);
+}
+
+
+/* ------------------------------ */
+/* --- Management command API --- */
+
+static inline int qm_mc_init(struct qm_portal *portal)
+{
+ u8 rr0, rr1;
+ register struct qm_mc *mc = &portal->mc;
+
+ mc->cr = portal->addr.addr_ce + QM_CL_CR;
+ mc->rr = portal->addr.addr_ce + QM_CL_RR0;
+
+ /*
+ * The expected valid bit polarity for the next CR command is 0
+ * if RR1 contains a valid response, and is 1 if RR0 contains a
+ * valid response. If both RR contain all 0, this indicates either
+ * that no command has been executed since reset (in which case the
+ * expected valid bit polarity is 1)
+ */
+ rr0 = __raw_readb(&mc->rr->verb);
+ rr1 = __raw_readb(&(mc->rr+1)->verb);
+ if ((rr0 == 0 && rr1 == 0) || rr0 != 0)
+ mc->rridx = 1;
+ else
+ mc->rridx = 0;
+
+ mc->vbit = mc->rridx ? QM_MCC_VERB_VBIT : 0;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return 0;
+}
+
+static inline void qm_mc_finish(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mc *mc = &portal->mc;
+ DPA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ if (mc->state != qman_mc_idle)
+ pr_crit("Losing incomplete MC command\n");
+#endif
+}
+
+static inline struct qm_mc_command *qm_mc_start(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+ DPA_ASSERT(mc->state == qman_mc_idle);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_user;
+#endif
+#if defined(CONFIG_PPC32) || defined(CONFIG_PPC64)
+ dcbz_64(mc->cr);
+#endif
+ return mc->cr;
+}
+
+static inline void qm_mc_abort(struct qm_portal *portal)
+{
+ __maybe_unused register struct qm_mc *mc = &portal->mc;
+ DPA_ASSERT(mc->state == qman_mc_user);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+}
+
+static inline void qm_mc_commit(struct qm_portal *portal, u8 myverb)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+ DPA_ASSERT(mc->state == qman_mc_user);
+ lwsync();
+ mc->cr->__dont_write_directly__verb = myverb | mc->vbit;
+ dcbf(mc->cr);
+ dcbit_ro(rr);
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_hw;
+#endif
+}
+
+static inline struct qm_mc_result *qm_mc_result(struct qm_portal *portal)
+{
+ register struct qm_mc *mc = &portal->mc;
+ struct qm_mc_result *rr = mc->rr + mc->rridx;
+ DPA_ASSERT(mc->state == qman_mc_hw);
+ /* The inactive response register's verb byte always returns zero until
+ * its command is submitted and completed. This includes the valid-bit,
+ * in case you were wondering... */
+ if (!__raw_readb(&rr->verb)) {
+ dcbit_ro(rr);
+ return NULL;
+ }
+ mc->rridx ^= 1;
+ mc->vbit ^= QM_MCC_VERB_VBIT;
+#ifdef CONFIG_FSL_DPA_CHECKING
+ mc->state = qman_mc_idle;
+#endif
+ return rr;
+}
+
+
+/* ------------------------------------- */
+/* --- Portal interrupt register API --- */
+
+static inline int qm_isr_init(__always_unused struct qm_portal *portal)
+{
+ return 0;
+}
+
+static inline void qm_isr_finish(__always_unused struct qm_portal *portal)
+{
+}
+
+static inline void qm_isr_set_iperiod(struct qm_portal *portal, u16 iperiod)
+{
+ qm_out(ITPR, iperiod);
+}
+
+static inline u32 __qm_isr_read(struct qm_portal *portal, enum qm_isr_reg n)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 6));
+#else
+ return __qm_in(&portal->addr, QM_REG_ISR + (n << 2));
+#endif
+}
+
+static inline void __qm_isr_write(struct qm_portal *portal, enum qm_isr_reg n,
+ u32 val)
+{
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 6), val);
+#else
+ __qm_out(&portal->addr, QM_REG_ISR + (n << 2), val);
+#endif
+}
+
+/* Cleanup FQs */
+static inline int qm_shutdown_fq(struct qm_portal **portal, int portal_count,
+ u32 fqid)
+{
+
+ struct qm_mc_command *mcc;
+ struct qm_mc_result *mcr;
+ u8 state;
+ int orl_empty, fq_empty, i, drain = 0;
+ u32 result;
+ u32 channel, wq;
+ u16 dest_wq;
+
+ /* Determine the state of the FQID */
+ mcc = qm_mc_start(portal[0]);
+ mcc->queryfq_np.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ_NP);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ_NP);
+ state = mcr->queryfq_np.state & QM_MCR_NP_STATE_MASK;
+ if (state == QM_MCR_NP_STATE_OOS)
+ return 0; /* Already OOS, no need to do anymore checks */
+
+ /* Query which channel the FQ is using */
+ mcc = qm_mc_start(portal[0]);
+ mcc->queryfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(portal[0], QM_MCC_VERB_QUERYFQ);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) == QM_MCR_VERB_QUERYFQ);
+
+ /* Need to store these since the MCR gets reused */
+ dest_wq = be16_to_cpu(mcr->queryfq.fqd.dest_wq);
+ wq = dest_wq & 0x7;
+ channel = dest_wq>>3;
+
+ switch (state) {
+ case QM_MCR_NP_STATE_TEN_SCHED:
+ case QM_MCR_NP_STATE_TRU_SCHED:
+ case QM_MCR_NP_STATE_ACTIVE:
+ case QM_MCR_NP_STATE_PARKED:
+ orl_empty = 0;
+ mcc = qm_mc_start(portal[0]);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_RETIRE);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_RETIRE);
+ result = mcr->result; /* Make a copy as we reuse MCR below */
+
+ if (result == QM_MCR_RESULT_PENDING) {
+ /* Need to wait for the FQRN in the message ring, which
+ will only occur once the FQ has been drained. In
+ order for the FQ to drain the portal needs to be set
+ to dequeue from the channel the FQ is scheduled on */
+ const struct qm_mr_entry *msg;
+ const struct qm_dqrr_entry *dqrr = NULL;
+ int found_fqrn = 0;
+ u16 dequeue_wq = 0;
+
+ /* Flag that we need to drain FQ */
+ drain = 1;
+
+ if (channel >= qm_channel_pool1 &&
+ channel < (qm_channel_pool1 + 15)) {
+ /* Pool channel, enable the bit in the portal */
+ dequeue_wq = (channel -
+ qm_channel_pool1 + 1)<<4 | wq;
+ } else if (channel < qm_channel_pool1) {
+ /* Dedicated channel */
+ dequeue_wq = wq;
+ } else {
+ pr_info("Cannot recover FQ 0x%x, it is "
+ "scheduled on channel 0x%x",
+ fqid, channel);
+ return -EBUSY;
+ }
+ /* Set the sdqcr to drain this channel */
+ if (channel < qm_channel_pool1)
+ for (i = 0; i < portal_count; i++)
+ qm_dqrr_sdqcr_set(portal[i],
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_DEDICATED);
+ else
+ for (i = 0; i < portal_count; i++)
+ qm_dqrr_sdqcr_set(
+ portal[i],
+ QM_SDQCR_TYPE_ACTIVE |
+ QM_SDQCR_CHANNELS_POOL_CONV
+ (channel));
+ while (!found_fqrn) {
+ /* Keep draining DQRR while checking the MR*/
+ for (i = 0; i < portal_count; i++) {
+ qm_dqrr_pvb_update(portal[i]);
+ dqrr = qm_dqrr_current(portal[i]);
+ while (dqrr) {
+ qm_dqrr_cdc_consume_1ptr(
+ portal[i], dqrr, 0);
+ qm_dqrr_pvb_update(portal[i]);
+ qm_dqrr_next(portal[i]);
+ dqrr = qm_dqrr_current(
+ portal[i]);
+ }
+ /* Process message ring too */
+ qm_mr_pvb_update(portal[i]);
+ msg = qm_mr_current(portal[i]);
+ while (msg) {
+ if ((msg->verb &
+ QM_MR_VERB_TYPE_MASK)
+ == QM_MR_VERB_FQRN)
+ found_fqrn = 1;
+ qm_mr_next(portal[i]);
+ qm_mr_cci_consume_to_current(
+ portal[i]);
+ qm_mr_pvb_update(portal[i]);
+ msg = qm_mr_current(portal[i]);
+ }
+ cpu_relax();
+ }
+ }
+ }
+ if (result != QM_MCR_RESULT_OK &&
+ result != QM_MCR_RESULT_PENDING) {
+ /* error */
+ pr_err("qman_retire_fq failed on FQ 0x%x, result=0x%x\n",
+ fqid, result);
+ return -1;
+ }
+ if (!(mcr->alterfq.fqs & QM_MCR_FQS_ORLPRESENT)) {
+ /* ORL had no entries, no need to wait until the
+ ERNs come in */
+ orl_empty = 1;
+ }
+ /* Retirement succeeded, check to see if FQ needs
+ to be drained */
+ if (drain || mcr->alterfq.fqs & QM_MCR_FQS_NOTEMPTY) {
+ /* FQ is Not Empty, drain using volatile DQ commands */
+ fq_empty = 0;
+ do {
+ const struct qm_dqrr_entry *dqrr = NULL;
+ u32 vdqcr = fqid | QM_VDQCR_NUMFRAMES_SET(3);
+ qm_dqrr_vdqcr_set(portal[0], vdqcr);
+
+ /* Wait for a dequeue to occur */
+ while (dqrr == NULL) {
+ qm_dqrr_pvb_update(portal[0]);
+ dqrr = qm_dqrr_current(portal[0]);
+ if (!dqrr)
+ cpu_relax();
+ }
+ /* Process the dequeues, making sure to
+ empty the ring completely */
+ while (dqrr) {
+ if (be32_to_cpu(dqrr->fqid) == fqid &&
+ dqrr->stat & QM_DQRR_STAT_FQ_EMPTY)
+ fq_empty = 1;
+ qm_dqrr_cdc_consume_1ptr(portal[0],
+ dqrr, 0);
+ qm_dqrr_pvb_update(portal[0]);
+ qm_dqrr_next(portal[0]);
+ dqrr = qm_dqrr_current(portal[0]);
+ }
+ } while (fq_empty == 0);
+ }
+ for (i = 0; i < portal_count; i++)
+ qm_dqrr_sdqcr_set(portal[i], 0);
+
+ /* Wait for the ORL to have been completely drained */
+ while (orl_empty == 0) {
+ const struct qm_mr_entry *msg;
+ qm_mr_pvb_update(portal[0]);
+ msg = qm_mr_current(portal[0]);
+ while (msg) {
+ if ((msg->verb & QM_MR_VERB_TYPE_MASK) ==
+ QM_MR_VERB_FQRL)
+ orl_empty = 1;
+ qm_mr_next(portal[0]);
+ qm_mr_cci_consume_to_current(portal[0]);
+ qm_mr_pvb_update(portal[0]);
+ msg = qm_mr_current(portal[0]);
+ }
+ cpu_relax();
+ }
+ mcc = qm_mc_start(portal[0]);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result != QM_MCR_RESULT_OK) {
+ pr_err("OOS after drain Failed on FQID 0x%x, result 0x%x\n",
+ fqid, mcr->result);
+ return -1;
+ }
+ return 0;
+ case QM_MCR_NP_STATE_RETIRED:
+ /* Send OOS Command */
+ mcc = qm_mc_start(portal[0]);
+ mcc->alterfq.fqid = cpu_to_be32(fqid);
+ qm_mc_commit(portal[0], QM_MCC_VERB_ALTER_OOS);
+ while (!(mcr = qm_mc_result(portal[0])))
+ cpu_relax();
+ DPA_ASSERT((mcr->verb & QM_MCR_VERB_MASK) ==
+ QM_MCR_VERB_ALTER_OOS);
+ if (mcr->result) {
+ pr_err("OOS Failed on FQID 0x%x\n", fqid);
+ return -1;
+ }
+ return 0;
+ }
+ return -1;
+}
diff --git a/drivers/staging/fsl_qbman/qman_private.h b/drivers/staging/fsl_qbman/qman_private.h
new file mode 100644
index 000000000000..ee025cfff194
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_private.h
@@ -0,0 +1,398 @@
+/* Copyright 2008-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dpa_sys.h"
+#include <linux/fsl_qman.h>
+#include <linux/iommu.h>
+
+#if defined(CONFIG_FSL_PAMU)
+#include <asm/fsl_pamu_stash.h>
+#endif
+
+#if !defined(CONFIG_FSL_QMAN_FQ_LOOKUP) && defined(CONFIG_PPC64)
+#error "_PPC64 requires _FSL_QMAN_FQ_LOOKUP"
+#endif
+
+#define QBMAN_ANY_PORTAL_IDX 0xffffffff
+ /* ----------------- */
+ /* Congestion Groups */
+ /* ----------------- */
+/* This wrapper represents a bit-array for the state of the 256 Qman congestion
+ * groups. Is also used as a *mask* for congestion groups, eg. so we ignore
+ * those that don't concern us. We harness the structure and accessor details
+ * already used in the management command to query congestion groups. */
+struct qman_cgrs {
+ struct __qm_mcr_querycongestion q;
+};
+static inline void qman_cgrs_init(struct qman_cgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+static inline void qman_cgrs_fill(struct qman_cgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+static inline int qman_cgrs_get(struct qman_cgrs *c, int num)
+{
+ return QM_MCR_QUERYCONGESTION(&c->q, num);
+}
+static inline void qman_cgrs_set(struct qman_cgrs *c, int num)
+{
+ c->q.__state[__CGR_WORD(num)] |= (0x80000000 >> __CGR_SHIFT(num));
+}
+static inline void qman_cgrs_unset(struct qman_cgrs *c, int num)
+{
+ c->q.__state[__CGR_WORD(num)] &= ~(0x80000000 >> __CGR_SHIFT(num));
+}
+static inline int qman_cgrs_next(struct qman_cgrs *c, int num)
+{
+ while ((++num < __CGR_NUM) && !qman_cgrs_get(c, num))
+ ;
+ return num;
+}
+static inline void qman_cgrs_cp(struct qman_cgrs *dest,
+ const struct qman_cgrs *src)
+{
+ *dest = *src;
+}
+static inline void qman_cgrs_and(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.__state;
+ const u32 *_a = a->q.__state;
+ const u32 *_b = b->q.__state;
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) & *(_b++);
+}
+static inline void qman_cgrs_xor(struct qman_cgrs *dest,
+ const struct qman_cgrs *a, const struct qman_cgrs *b)
+{
+ int ret;
+ u32 *_d = dest->q.__state;
+ const u32 *_a = a->q.__state;
+ const u32 *_b = b->q.__state;
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) ^ *(_b++);
+}
+
+ /* ----------------------- */
+ /* CEETM Congestion Groups */
+ /* ----------------------- */
+/* This wrapper represents a bit-array for the state of the 512 Qman CEETM
+ * congestion groups.
+ */
+struct qman_ccgrs {
+ struct __qm_mcr_querycongestion q[2];
+};
+static inline void qman_ccgrs_init(struct qman_ccgrs *c)
+{
+ memset(c, 0, sizeof(*c));
+}
+static inline void qman_ccgrs_fill(struct qman_ccgrs *c)
+{
+ memset(c, 0xff, sizeof(*c));
+}
+static inline int qman_ccgrs_get(struct qman_ccgrs *c, int num)
+{
+ if (num < __CGR_NUM)
+ return QM_MCR_QUERYCONGESTION(&c->q[0], num);
+ else
+ return QM_MCR_QUERYCONGESTION(&c->q[1], (num - __CGR_NUM));
+}
+static inline int qman_ccgrs_next(struct qman_ccgrs *c, int num)
+{
+ while ((++num < __CGR_NUM) && !qman_ccgrs_get(c, num))
+ ;
+ return num;
+}
+static inline void qman_ccgrs_cp(struct qman_ccgrs *dest,
+ const struct qman_ccgrs *src)
+{
+ *dest = *src;
+}
+static inline void qman_ccgrs_and(struct qman_ccgrs *dest,
+ const struct qman_ccgrs *a, const struct qman_ccgrs *b)
+{
+ int ret, i;
+ u32 *_d;
+ const u32 *_a, *_b;
+ for (i = 0; i < 2; i++) {
+ _d = dest->q[i].__state;
+ _a = a->q[i].__state;
+ _b = b->q[i].__state;
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) & *(_b++);
+ }
+}
+static inline void qman_ccgrs_xor(struct qman_ccgrs *dest,
+ const struct qman_ccgrs *a, const struct qman_ccgrs *b)
+{
+ int ret, i;
+ u32 *_d;
+ const u32 *_a, *_b;
+ for (i = 0; i < 2; i++) {
+ _d = dest->q[i].__state;
+ _a = a->q[i].__state;
+ _b = b->q[i].__state;
+ for (ret = 0; ret < 8; ret++)
+ *(_d++) = *(_a++) ^ *(_b++);
+ }
+}
+
+/* used by CCSR and portal interrupt code */
+enum qm_isr_reg {
+ qm_isr_status = 0,
+ qm_isr_enable = 1,
+ qm_isr_disable = 2,
+ qm_isr_inhibit = 3
+};
+
+struct qm_portal_config {
+ /* Corenet portal addresses;
+ * [0]==cache-enabled, [1]==cache-inhibited. */
+ __iomem void *addr_virt[2];
+ struct resource addr_phys[2];
+ struct device dev;
+ struct iommu_domain *iommu_domain;
+ /* Allow these to be joined in lists */
+ struct list_head list;
+ /* User-visible portal configuration settings */
+ struct qman_portal_config public_cfg;
+ /* power management saved data */
+ u32 saved_isdr;
+};
+
+/* Revision info (for errata and feature handling) */
+#define QMAN_REV11 0x0101
+#define QMAN_REV12 0x0102
+#define QMAN_REV20 0x0200
+#define QMAN_REV30 0x0300
+#define QMAN_REV31 0x0301
+#define QMAN_REV32 0x0302
+
+/* QMan REV_2 register contains the Cfg option */
+#define QMAN_REV_CFG_0 0x0
+#define QMAN_REV_CFG_1 0x1
+#define QMAN_REV_CFG_2 0x2
+#define QMAN_REV_CFG_3 0x3
+
+extern u16 qman_ip_rev; /* 0 if uninitialised, otherwise QMAN_REVx */
+extern u8 qman_ip_cfg;
+extern u32 qman_clk;
+extern u16 qman_portal_max;
+
+#ifdef CONFIG_FSL_QMAN_CONFIG
+/* Hooks from qman_driver.c to qman_config.c */
+int qman_init_ccsr(struct device_node *node);
+void qman_liodn_fixup(u16 channel);
+int qman_set_sdest(u16 channel, unsigned int cpu_idx);
+size_t get_qman_fqd_size(void);
+#else
+static inline size_t get_qman_fqd_size(void)
+{
+ return (PAGE_SIZE << CONFIG_FSL_QMAN_FQD_SZ);
+}
+#endif
+
+int qm_set_wpm(int wpm);
+int qm_get_wpm(int *wpm);
+
+/* Hooks from qman_driver.c in to qman_high.c */
+struct qman_portal *qman_create_portal(
+ struct qman_portal *portal,
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+
+struct qman_portal *qman_create_affine_portal(
+ const struct qm_portal_config *config,
+ const struct qman_cgrs *cgrs);
+struct qman_portal *qman_create_affine_slave(struct qman_portal *redirect,
+ int cpu);
+const struct qm_portal_config *qman_destroy_affine_portal(void);
+void qman_destroy_portal(struct qman_portal *qm);
+
+/* Hooks from fsl_usdpaa.c to qman_driver.c */
+struct qm_portal_config *qm_get_unused_portal(void);
+struct qm_portal_config *qm_get_unused_portal_idx(uint32_t idx);
+
+void qm_put_unused_portal(struct qm_portal_config *pcfg);
+void qm_set_liodns(struct qm_portal_config *pcfg);
+
+/* This CGR feature is supported by h/w and required by unit-tests and the
+ * debugfs hooks, so is implemented in the driver. However it allows an explicit
+ * corruption of h/w fields by s/w that are usually incorruptible (because the
+ * counters are usually maintained entirely within h/w). As such, we declare
+ * this API internally. */
+int qman_testwrite_cgr(struct qman_cgr *cgr, u64 i_bcnt,
+ struct qm_mcr_cgrtestwrite *result);
+
+#ifdef CONFIG_FSL_QMAN_FQ_LOOKUP
+/* If the fq object pointer is greater than the size of context_b field,
+ * than a lookup table is required. */
+int qman_setup_fq_lookup_table(size_t num_entries);
+#endif
+
+
+/*************************************************/
+/* QMan s/w corenet portal, low-level i/face */
+/*************************************************/
+
+/* Note: most functions are only used by the high-level interface, so are
+ * inlined from qman_low.h. The stuff below is for use by other parts of the
+ * driver. */
+
+/* For qm_dqrr_sdqcr_set(); Choose one SOURCE. Choose one COUNT. Choose one
+ * dequeue TYPE. Choose TOKEN (8-bit).
+ * If SOURCE == CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL(n).
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SOURCE == SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ */
+#define QM_SDQCR_SOURCE_CHANNELS 0x0
+#define QM_SDQCR_SOURCE_SPECIFICWQ 0x40000000
+#define QM_SDQCR_COUNT_EXACT1 0x0
+#define QM_SDQCR_COUNT_UPTO3 0x20000000
+#define QM_SDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_SDQCR_TYPE_MASK 0x03000000
+#define QM_SDQCR_TYPE_NULL 0x0
+#define QM_SDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_SDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_SDQCR_TYPE_ACTIVE 0x03000000
+#define QM_SDQCR_TOKEN_MASK 0x00ff0000
+#define QM_SDQCR_TOKEN_SET(v) (((v) & 0xff) << 16)
+#define QM_SDQCR_TOKEN_GET(v) (((v) >> 16) & 0xff)
+#define QM_SDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_SDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_SDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_SDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_SDQCR_SPECIFICWQ_WQ(n) (n)
+
+/* For qm_dqrr_vdqcr_set(): use FQID(n) to fill in the frame queue ID */
+#define QM_VDQCR_FQID_MASK 0x00ffffff
+#define QM_VDQCR_FQID(n) ((n) & QM_VDQCR_FQID_MASK)
+
+/* For qm_dqrr_pdqcr_set(); Choose one MODE. Choose one COUNT.
+ * If MODE==SCHEDULED
+ * Choose SCHEDULED_CHANNELS or SCHEDULED_SPECIFICWQ. Choose one dequeue TYPE.
+ * If CHANNELS,
+ * Choose CHANNELS_DEDICATED and/or CHANNELS_POOL() channels.
+ * You can choose DEDICATED_PRECEDENCE if the portal channel should have
+ * priority.
+ * If SPECIFICWQ,
+ * Either select the work-queue ID with SPECIFICWQ_WQ(), or select the
+ * channel (SPECIFICWQ_DEDICATED or SPECIFICWQ_POOL()) and specify the
+ * work-queue priority (0-7) with SPECIFICWQ_WQ() - either way, you get the
+ * same value.
+ * If MODE==UNSCHEDULED
+ * Choose FQID().
+ */
+#define QM_PDQCR_MODE_SCHEDULED 0x0
+#define QM_PDQCR_MODE_UNSCHEDULED 0x80000000
+#define QM_PDQCR_SCHEDULED_CHANNELS 0x0
+#define QM_PDQCR_SCHEDULED_SPECIFICWQ 0x40000000
+#define QM_PDQCR_COUNT_EXACT1 0x0
+#define QM_PDQCR_COUNT_UPTO3 0x20000000
+#define QM_PDQCR_DEDICATED_PRECEDENCE 0x10000000
+#define QM_PDQCR_TYPE_MASK 0x03000000
+#define QM_PDQCR_TYPE_NULL 0x0
+#define QM_PDQCR_TYPE_PRIO_QOS 0x01000000
+#define QM_PDQCR_TYPE_ACTIVE_QOS 0x02000000
+#define QM_PDQCR_TYPE_ACTIVE 0x03000000
+#define QM_PDQCR_CHANNELS_DEDICATED 0x00008000
+#define QM_PDQCR_CHANNELS_POOL(n) (0x00008000 >> (n))
+#define QM_PDQCR_SPECIFICWQ_MASK 0x000000f7
+#define QM_PDQCR_SPECIFICWQ_DEDICATED 0x00000000
+#define QM_PDQCR_SPECIFICWQ_POOL(n) ((n) << 4)
+#define QM_PDQCR_SPECIFICWQ_WQ(n) (n)
+#define QM_PDQCR_FQID(n) ((n) & 0xffffff)
+
+/* Used by all portal interrupt registers except 'inhibit'
+ * Channels with frame availability
+ */
+#define QM_PIRQ_DQAVAIL 0x0000ffff
+
+/* The DQAVAIL interrupt fields break down into these bits; */
+#define QM_DQAVAIL_PORTAL 0x8000 /* Portal channel */
+#define QM_DQAVAIL_POOL(n) (0x8000 >> (n)) /* Pool channel, n==[1..15] */
+#define QM_DQAVAIL_MASK 0xffff
+/* This mask contains all the "irqsource" bits visible to API users */
+#define QM_PIRQ_VISIBLE (QM_PIRQ_SLOW | QM_PIRQ_DQRI)
+
+/* These are qm_<reg>_<verb>(). So for example, qm_disable_write() means "write
+ * the disable register" rather than "disable the ability to write". */
+#define qm_isr_status_read(qm) __qm_isr_read(qm, qm_isr_status)
+#define qm_isr_status_clear(qm, m) __qm_isr_write(qm, qm_isr_status, m)
+#define qm_isr_enable_read(qm) __qm_isr_read(qm, qm_isr_enable)
+#define qm_isr_enable_write(qm, v) __qm_isr_write(qm, qm_isr_enable, v)
+#define qm_isr_disable_read(qm) __qm_isr_read(qm, qm_isr_disable)
+#define qm_isr_disable_write(qm, v) __qm_isr_write(qm, qm_isr_disable, v)
+/* TODO: unfortunate name-clash here, reword? */
+#define qm_isr_inhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 1)
+#define qm_isr_uninhibit(qm) __qm_isr_write(qm, qm_isr_inhibit, 0)
+
+#ifdef CONFIG_FSL_QMAN_CONFIG
+int qman_have_ccsr(void);
+#else
+#define qman_have_ccsr 0
+#endif
+
+__init int qman_init(void);
+__init int qman_resource_init(void);
+
+/* CEETM related */
+#define QMAN_CEETM_MAX 2
+extern u8 num_ceetms;
+extern struct qm_ceetm qman_ceetms[QMAN_CEETM_MAX];
+int qman_sp_enable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
+int qman_sp_disable_ceetm_mode(enum qm_dc_portal portal, u16 sub_portal);
+int qman_ceetm_set_prescaler(enum qm_dc_portal portal);
+int qman_ceetm_get_prescaler(u16 *pres);
+int qman_ceetm_query_cq(unsigned int cqid, unsigned int dcpid,
+ struct qm_mcr_ceetm_cq_query *cq_query);
+int qman_ceetm_query_ccgr(struct qm_mcc_ceetm_ccgr_query *ccgr_query,
+ struct qm_mcr_ceetm_ccgr_query *response);
+int qman_ceetm_get_xsfdr(enum qm_dc_portal portal, unsigned int *num);
+
+extern void *affine_portals[NR_CPUS];
+const struct qm_portal_config *qman_get_qm_portal_config(
+ struct qman_portal *portal);
+
+/* power management */
+#ifdef CONFIG_SUSPEND
+void suspend_unused_qportal(void);
+void resume_unused_qportal(void);
+#endif
diff --git a/drivers/staging/fsl_qbman/qman_test.c b/drivers/staging/fsl_qbman/qman_test.c
new file mode 100644
index 000000000000..7995dd8c8435
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_test.c
@@ -0,0 +1,57 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+MODULE_AUTHOR("Geoff Thorpe");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("Qman testing");
+
+static int test_init(void)
+{
+ int loop = 1;
+ while (loop--) {
+#ifdef CONFIG_FSL_QMAN_TEST_STASH_POTATO
+ qman_test_hotpotato();
+#endif
+#ifdef CONFIG_FSL_QMAN_TEST_HIGH
+ qman_test_high();
+#endif
+ }
+ return 0;
+}
+
+static void test_exit(void)
+{
+}
+
+module_init(test_init);
+module_exit(test_exit);
diff --git a/drivers/staging/fsl_qbman/qman_test.h b/drivers/staging/fsl_qbman/qman_test.h
new file mode 100644
index 000000000000..8c4181c7ea3d
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_test.h
@@ -0,0 +1,45 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+
+#include <linux/fsl_qman.h>
+
+void qman_test_hotpotato(void);
+void qman_test_high(void);
+
diff --git a/drivers/staging/fsl_qbman/qman_test_high.c b/drivers/staging/fsl_qbman/qman_test_high.c
new file mode 100644
index 000000000000..65ee270e4245
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_test_high.c
@@ -0,0 +1,216 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_test.h"
+
+/*************/
+/* constants */
+/*************/
+
+#define CGR_ID 27
+#define POOL_ID 2
+#define FQ_FLAGS QMAN_FQ_FLAG_DYNAMIC_FQID
+#define NUM_ENQUEUES 10
+#define NUM_PARTIAL 4
+#define PORTAL_SDQCR (QM_SDQCR_SOURCE_CHANNELS | \
+ QM_SDQCR_TYPE_PRIO_QOS | \
+ QM_SDQCR_TOKEN_SET(0x98) | \
+ QM_SDQCR_CHANNELS_DEDICATED | \
+ QM_SDQCR_CHANNELS_POOL(POOL_ID))
+#define PORTAL_OPAQUE ((void *)0xf00dbeef)
+#define VDQCR_FLAGS (QMAN_VOLATILE_FLAG_WAIT | QMAN_VOLATILE_FLAG_FINISH)
+
+/*************************************/
+/* Predeclarations (eg. for fq_base) */
+/*************************************/
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
+ struct qman_fq *,
+ const struct qm_dqrr_entry *);
+static void cb_ern(struct qman_portal *, struct qman_fq *,
+ const struct qm_mr_entry *);
+static void cb_fqs(struct qman_portal *, struct qman_fq *,
+ const struct qm_mr_entry *);
+
+/***************/
+/* global vars */
+/***************/
+
+static struct qm_fd fd, fd_dq;
+static struct qman_fq fq_base = {
+ .cb.dqrr = cb_dqrr,
+ .cb.ern = cb_ern,
+ .cb.fqs = cb_fqs
+};
+static DECLARE_WAIT_QUEUE_HEAD(waitqueue);
+static int retire_complete, sdqcr_complete;
+
+/**********************/
+/* internal functions */
+/**********************/
+
+/* Helpers for initialising and "incrementing" a frame descriptor */
+static void fd_init(struct qm_fd *__fd)
+{
+ qm_fd_addr_set64(__fd, 0xabdeadbeefLLU);
+ __fd->format = qm_fd_contig_big;
+ __fd->length29 = 0x0000ffff;
+ __fd->cmd = 0xfeedf00d;
+}
+
+static void fd_inc(struct qm_fd *__fd)
+{
+ u64 t = qm_fd_addr_get64(__fd);
+ int z = t >> 40;
+ t <<= 1;
+ if (z)
+ t |= 1;
+ qm_fd_addr_set64(__fd, t);
+ __fd->length29--;
+ __fd->cmd++;
+}
+
+/* The only part of the 'fd' we can't memcmp() is the ppid */
+static int fd_cmp(const struct qm_fd *a, const struct qm_fd *b)
+{
+ int r = (qm_fd_addr_get64(a) == qm_fd_addr_get64(b)) ? 0 : -1;
+ if (!r)
+ r = a->format - b->format;
+ if (!r)
+ r = a->opaque - b->opaque;
+ if (!r)
+ r = a->cmd - b->cmd;
+ return r;
+}
+
+/********/
+/* test */
+/********/
+
+static void do_enqueues(struct qman_fq *fq)
+{
+ unsigned int loop;
+ for (loop = 0; loop < NUM_ENQUEUES; loop++) {
+ if (qman_enqueue(fq, &fd, QMAN_ENQUEUE_FLAG_WAIT |
+ (((loop + 1) == NUM_ENQUEUES) ?
+ QMAN_ENQUEUE_FLAG_WAIT_SYNC : 0)))
+ panic("qman_enqueue() failed\n");
+ fd_inc(&fd);
+ }
+}
+
+void qman_test_high(void)
+{
+ unsigned int flags;
+ int res;
+ struct qman_fq *fq = &fq_base;
+
+ pr_info("qman_test_high starting\n");
+ fd_init(&fd);
+ fd_init(&fd_dq);
+
+ /* Initialise (parked) FQ */
+ if (qman_create_fq(0, FQ_FLAGS, fq))
+ panic("qman_create_fq() failed\n");
+ if (qman_init_fq(fq, QMAN_INITFQ_FLAG_LOCAL, NULL))
+ panic("qman_init_fq() failed\n");
+
+ /* Do enqueues + VDQCR, twice. (Parked FQ) */
+ do_enqueues(fq);
+ pr_info("VDQCR (till-empty);\n");
+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
+ QM_VDQCR_NUMFRAMES_TILLEMPTY))
+ panic("qman_volatile_dequeue() failed\n");
+ do_enqueues(fq);
+ pr_info("VDQCR (%d of %d);\n", NUM_PARTIAL, NUM_ENQUEUES);
+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
+ QM_VDQCR_NUMFRAMES_SET(NUM_PARTIAL)))
+ panic("qman_volatile_dequeue() failed\n");
+ pr_info("VDQCR (%d of %d);\n", NUM_ENQUEUES - NUM_PARTIAL,
+ NUM_ENQUEUES);
+ if (qman_volatile_dequeue(fq, VDQCR_FLAGS,
+ QM_VDQCR_NUMFRAMES_SET(NUM_ENQUEUES - NUM_PARTIAL)))
+ panic("qman_volatile_dequeue() failed\n");
+
+ do_enqueues(fq);
+ pr_info("scheduled dequeue (till-empty)\n");
+ if (qman_schedule_fq(fq))
+ panic("qman_schedule_fq() failed\n");
+ wait_event(waitqueue, sdqcr_complete);
+
+ /* Retire and OOS the FQ */
+ res = qman_retire_fq(fq, &flags);
+ if (res < 0)
+ panic("qman_retire_fq() failed\n");
+ wait_event(waitqueue, retire_complete);
+ if (flags & QMAN_FQ_STATE_BLOCKOOS)
+ panic("leaking frames\n");
+ if (qman_oos_fq(fq))
+ panic("qman_oos_fq() failed\n");
+ qman_destroy_fq(fq, 0);
+ pr_info("qman_test_high finished\n");
+}
+
+static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dq)
+{
+ if (fd_cmp(&fd_dq, &dq->fd)) {
+ pr_err("BADNESS: dequeued frame doesn't match;\n");
+ pr_err("Expected 0x%llx, got 0x%llx\n",
+ (unsigned long long)fd_dq.length29,
+ (unsigned long long)dq->fd.length29);
+ BUG();
+ }
+ fd_inc(&fd_dq);
+ if (!(dq->stat & QM_DQRR_STAT_UNSCHEDULED) && !fd_cmp(&fd_dq, &fd)) {
+ sdqcr_complete = 1;
+ wake_up(&waitqueue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void cb_ern(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ panic("cb_ern() unimplemented");
+}
+
+static void cb_fqs(struct qman_portal *p, struct qman_fq *fq,
+ const struct qm_mr_entry *msg)
+{
+ u8 verb = (msg->verb & QM_MR_VERB_TYPE_MASK);
+ if ((verb != QM_MR_VERB_FQRN) && (verb != QM_MR_VERB_FQRNI))
+ panic("unexpected FQS message");
+ pr_info("Retirement message received\n");
+ retire_complete = 1;
+ wake_up(&waitqueue);
+}
diff --git a/drivers/staging/fsl_qbman/qman_test_hotpotato.c b/drivers/staging/fsl_qbman/qman_test_hotpotato.c
new file mode 100644
index 000000000000..899d2aa9d580
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_test_hotpotato.c
@@ -0,0 +1,502 @@
+/* Copyright 2009-2012 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include "qman_test.h"
+
+/* Algorithm:
+ *
+ * Each cpu will have HP_PER_CPU "handlers" set up, each of which incorporates
+ * an rx/tx pair of FQ objects (both of which are stashed on dequeue). The
+ * organisation of FQIDs is such that the HP_PER_CPU*NUM_CPUS handlers will
+ * shuttle a "hot potato" frame around them such that every forwarding action
+ * moves it from one cpu to another. (The use of more than one handler per cpu
+ * is to allow enough handlers/FQs to truly test the significance of caching -
+ * ie. when cache-expiries are occurring.)
+ *
+ * The "hot potato" frame content will be HP_NUM_WORDS*4 bytes in size, and the
+ * first and last words of the frame data will undergo a transformation step on
+ * each forwarding action. To achieve this, each handler will be assigned a
+ * 32-bit "mixer", that is produced using a 32-bit LFSR. When a frame is
+ * received by a handler, the mixer of the expected sender is XOR'd into all
+ * words of the entire frame, which is then validated against the original
+ * values. Then, before forwarding, the entire frame is XOR'd with the mixer of
+ * the current handler. Apart from validating that the frame is taking the
+ * expected path, this also provides some quasi-realistic overheads to each
+ * forwarding action - dereferencing *all* the frame data, computation, and
+ * conditional branching. There is a "special" handler designated to act as the
+ * instigator of the test by creating an enqueuing the "hot potato" frame, and
+ * to determine when the test has completed by counting HP_LOOPS iterations.
+ *
+ * Init phases:
+ *
+ * 1. prepare each cpu's 'hp_cpu' struct using on_each_cpu(,,1) and link them
+ * into 'hp_cpu_list'. Specifically, set processor_id, allocate HP_PER_CPU
+ * handlers and link-list them (but do no other handler setup).
+ *
+ * 2. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * allocate rx/tx FQIDs and mixer values to the hp_cpu's iterator handler
+ * and advance the iterator for the next loop. This includes a final fixup,
+ * which connects the last handler to the first (and which is why phase 2
+ * and 3 are separate).
+ *
+ * 3. scan over 'hp_cpu_list' HP_PER_CPU times, the first time sets each
+ * hp_cpu's 'iterator' to point to its first handler. With each loop,
+ * initialise FQ objects and advance the iterator for the next loop.
+ * Moreover, do this initialisation on the cpu it applies to so that Rx FQ
+ * initialisation targets the correct cpu.
+ */
+
+/* helper to run something on all cpus (can't use on_each_cpu(), as that invokes
+ * the fn from irq context, which is too restrictive). */
+struct bstrap {
+ void (*fn)(void);
+ atomic_t started;
+};
+static int bstrap_fn(void *__bstrap)
+{
+ struct bstrap *bstrap = __bstrap;
+ atomic_inc(&bstrap->started);
+ bstrap->fn();
+ while (!kthread_should_stop())
+ msleep(1);
+ return 0;
+}
+static int on_all_cpus(void (*fn)(void))
+{
+ int cpu;
+ for_each_cpu(cpu, cpu_online_mask) {
+ struct bstrap bstrap = {
+ .fn = fn,
+ .started = ATOMIC_INIT(0)
+ };
+ struct task_struct *k = kthread_create(bstrap_fn, &bstrap,
+ "hotpotato%d", cpu);
+ int ret;
+ if (IS_ERR(k))
+ return -ENOMEM;
+ kthread_bind(k, cpu);
+ wake_up_process(k);
+ /* If we call kthread_stop() before the "wake up" has had an
+ * effect, then the thread may exit with -EINTR without ever
+ * running the function. So poll until it's started before
+ * requesting it to stop. */
+ while (!atomic_read(&bstrap.started))
+ msleep(10);
+ ret = kthread_stop(k);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+struct hp_handler {
+
+ /* The following data is stashed when 'rx' is dequeued; */
+ /* -------------- */
+ /* The Rx FQ, dequeues of which will stash the entire hp_handler */
+ struct qman_fq rx;
+ /* The Tx FQ we should forward to */
+ struct qman_fq tx;
+ /* The value we XOR post-dequeue, prior to validating */
+ u32 rx_mixer;
+ /* The value we XOR pre-enqueue, after validating */
+ u32 tx_mixer;
+ /* what the hotpotato address should be on dequeue */
+ dma_addr_t addr;
+ u32 *frame_ptr;
+
+ /* The following data isn't (necessarily) stashed on dequeue; */
+ /* -------------- */
+ u32 fqid_rx, fqid_tx;
+ /* list node for linking us into 'hp_cpu' */
+ struct list_head node;
+ /* Just to check ... */
+ unsigned int processor_id;
+} ____cacheline_aligned;
+
+struct hp_cpu {
+ /* identify the cpu we run on; */
+ unsigned int processor_id;
+ /* root node for the per-cpu list of handlers */
+ struct list_head handlers;
+ /* list node for linking us into 'hp_cpu_list' */
+ struct list_head node;
+ /* when repeatedly scanning 'hp_list', each time linking the n'th
+ * handlers together, this is used as per-cpu iterator state */
+ struct hp_handler *iterator;
+};
+
+/* Each cpu has one of these */
+static DEFINE_PER_CPU(struct hp_cpu, hp_cpus);
+
+/* links together the hp_cpu structs, in first-come first-serve order. */
+static LIST_HEAD(hp_cpu_list);
+static spinlock_t hp_lock = __SPIN_LOCK_UNLOCKED(hp_lock);
+
+static unsigned int hp_cpu_list_length;
+
+/* the "special" handler, that starts and terminates the test. */
+static struct hp_handler *special_handler;
+static int loop_counter;
+
+/* handlers are allocated out of this, so they're properly aligned. */
+static struct kmem_cache *hp_handler_slab;
+
+/* this is the frame data */
+static void *__frame_ptr;
+static u32 *frame_ptr;
+static dma_addr_t frame_dma;
+
+/* the main function waits on this */
+static DECLARE_WAIT_QUEUE_HEAD(queue);
+
+#define HP_PER_CPU 2
+#define HP_LOOPS 8
+/* 80 bytes, like a small ethernet frame, and bleeds into a second cacheline */
+#define HP_NUM_WORDS 80
+/* First word of the LFSR-based frame data */
+#define HP_FIRST_WORD 0xabbaf00d
+
+static inline u32 do_lfsr(u32 prev)
+{
+ return (prev >> 1) ^ (-(prev & 1u) & 0xd0000001u);
+}
+
+static void allocate_frame_data(void)
+{
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+ struct platform_device *pdev = platform_device_alloc("foobar", -1);
+ if (!pdev)
+ panic("platform_device_alloc() failed");
+ if (platform_device_add(pdev))
+ panic("platform_device_add() failed");
+ __frame_ptr = kmalloc(4 * HP_NUM_WORDS, GFP_KERNEL);
+ if (!__frame_ptr)
+ panic("kmalloc() failed");
+ frame_ptr = (void *)(((unsigned long)__frame_ptr + 63) &
+ ~(unsigned long)63);
+ for (loop = 0; loop < HP_NUM_WORDS; loop++) {
+ frame_ptr[loop] = lfsr;
+ lfsr = do_lfsr(lfsr);
+ }
+ frame_dma = dma_map_single(&pdev->dev, frame_ptr, 4 * HP_NUM_WORDS,
+ DMA_BIDIRECTIONAL);
+ platform_device_del(pdev);
+ platform_device_put(pdev);
+}
+
+static void deallocate_frame_data(void)
+{
+ kfree(__frame_ptr);
+}
+
+static inline void process_frame_data(struct hp_handler *handler,
+ const struct qm_fd *fd)
+{
+ u32 *p = handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+ if (qm_fd_addr_get64(fd) != (handler->addr & 0xffffffffff)) {
+ pr_err("Got 0x%llx expected 0x%llx\n",
+ qm_fd_addr_get64(fd), handler->addr);
+ panic("bad frame address");
+ }
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ *p ^= handler->rx_mixer;
+ if (*p != lfsr)
+ panic("corrupt frame data");
+ *p ^= handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+}
+
+static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
+ panic("qman_enqueue() failed");
+ return qman_cb_dqrr_consume;
+}
+
+static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
+ struct qman_fq *fq,
+ const struct qm_dqrr_entry *dqrr)
+{
+ struct hp_handler *handler = (struct hp_handler *)fq;
+
+ process_frame_data(handler, &dqrr->fd);
+ if (++loop_counter < HP_LOOPS) {
+ if (qman_enqueue(&handler->tx, &dqrr->fd, 0))
+ panic("qman_enqueue() failed");
+ } else {
+ pr_info("Received final (%dth) frame\n", loop_counter);
+ wake_up(&queue);
+ }
+ return qman_cb_dqrr_consume;
+}
+
+static void create_per_cpu_handlers(void)
+{
+ struct hp_handler *handler;
+ int loop;
+ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
+
+ hp_cpu->processor_id = smp_processor_id();
+ spin_lock(&hp_lock);
+ list_add_tail(&hp_cpu->node, &hp_cpu_list);
+ hp_cpu_list_length++;
+ spin_unlock(&hp_lock);
+ INIT_LIST_HEAD(&hp_cpu->handlers);
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ handler = kmem_cache_alloc(hp_handler_slab, GFP_KERNEL);
+ if (!handler)
+ panic("kmem_cache_alloc() failed");
+ handler->processor_id = hp_cpu->processor_id;
+ handler->addr = frame_dma;
+ handler->frame_ptr = frame_ptr;
+ list_add_tail(&handler->node, &hp_cpu->handlers);
+ }
+ put_cpu_var(hp_cpus);
+}
+
+static void destroy_per_cpu_handlers(void)
+{
+ struct list_head *loop, *tmp;
+ struct hp_cpu *hp_cpu = &get_cpu_var(hp_cpus);
+
+ spin_lock(&hp_lock);
+ list_del(&hp_cpu->node);
+ spin_unlock(&hp_lock);
+ list_for_each_safe(loop, tmp, &hp_cpu->handlers) {
+ u32 flags;
+ struct hp_handler *handler = list_entry(loop, struct hp_handler,
+ node);
+ if (qman_retire_fq(&handler->rx, &flags))
+ panic("qman_retire_fq(rx) failed");
+ BUG_ON(flags & QMAN_FQ_STATE_BLOCKOOS);
+ if (qman_oos_fq(&handler->rx))
+ panic("qman_oos_fq(rx) failed");
+ qman_destroy_fq(&handler->rx, 0);
+ qman_destroy_fq(&handler->tx, 0);
+ qman_release_fqid(handler->fqid_rx);
+ list_del(&handler->node);
+ kmem_cache_free(hp_handler_slab, handler);
+ }
+ put_cpu_var(hp_cpus);
+}
+
+static inline u8 num_cachelines(u32 offset)
+{
+ u8 res = (offset + (L1_CACHE_BYTES - 1))
+ / (L1_CACHE_BYTES);
+ if (res > 3)
+ return 3;
+ return res;
+}
+#define STASH_DATA_CL \
+ num_cachelines(HP_NUM_WORDS * 4)
+#define STASH_CTX_CL \
+ num_cachelines(offsetof(struct hp_handler, fqid_rx))
+
+static void init_handler(void *__handler)
+{
+ struct qm_mcc_initfq opts;
+ struct hp_handler *handler = __handler;
+ BUG_ON(handler->processor_id != smp_processor_id());
+ /* Set up rx */
+ memset(&handler->rx, 0, sizeof(handler->rx));
+ if (handler == special_handler)
+ handler->rx.cb.dqrr = special_dqrr;
+ else
+ handler->rx.cb.dqrr = normal_dqrr;
+ if (qman_create_fq(handler->fqid_rx, 0, &handler->rx))
+ panic("qman_create_fq(rx) failed");
+ memset(&opts, 0, sizeof(opts));
+ opts.we_mask = QM_INITFQ_WE_FQCTRL | QM_INITFQ_WE_CONTEXTA;
+ opts.fqd.fq_ctrl = QM_FQCTRL_CTXASTASHING;
+ opts.fqd.context_a.stashing.data_cl = STASH_DATA_CL;
+ opts.fqd.context_a.stashing.context_cl = STASH_CTX_CL;
+ if (qman_init_fq(&handler->rx, QMAN_INITFQ_FLAG_SCHED |
+ QMAN_INITFQ_FLAG_LOCAL, &opts))
+ panic("qman_init_fq(rx) failed");
+ /* Set up tx */
+ memset(&handler->tx, 0, sizeof(handler->tx));
+ if (qman_create_fq(handler->fqid_tx, QMAN_FQ_FLAG_NO_MODIFY,
+ &handler->tx))
+ panic("qman_create_fq(tx) failed");
+}
+
+static void init_phase2(void)
+{
+ int loop;
+ u32 fqid = 0;
+ u32 lfsr = 0xdeadbeef;
+ struct hp_cpu *hp_cpu;
+ struct hp_handler *handler;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ int ret;
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ /* Rx FQID is the previous handler's Tx FQID */
+ hp_cpu->iterator->fqid_rx = fqid;
+ /* Allocate new FQID for Tx */
+ ret = qman_alloc_fqid(&fqid);
+ if (ret)
+ panic("qman_alloc_fqid() failed");
+ hp_cpu->iterator->fqid_tx = fqid;
+ /* Rx mixer is the previous handler's Tx mixer */
+ hp_cpu->iterator->rx_mixer = lfsr;
+ /* Get new mixer for Tx */
+ lfsr = do_lfsr(lfsr);
+ hp_cpu->iterator->tx_mixer = lfsr;
+ }
+ }
+ /* Fix up the first handler (fqid_rx==0, rx_mixer=0xdeadbeef) */
+ hp_cpu = list_first_entry(&hp_cpu_list, struct hp_cpu, node);
+ handler = list_first_entry(&hp_cpu->handlers, struct hp_handler, node);
+ BUG_ON((handler->fqid_rx != 0) || (handler->rx_mixer != 0xdeadbeef));
+ handler->fqid_rx = fqid;
+ handler->rx_mixer = lfsr;
+ /* and tag it as our "special" handler */
+ special_handler = handler;
+}
+
+static void init_phase3(void)
+{
+ int loop;
+ struct hp_cpu *hp_cpu;
+
+ for (loop = 0; loop < HP_PER_CPU; loop++) {
+ list_for_each_entry(hp_cpu, &hp_cpu_list, node) {
+ if (!loop)
+ hp_cpu->iterator = list_first_entry(
+ &hp_cpu->handlers,
+ struct hp_handler, node);
+ else
+ hp_cpu->iterator = list_entry(
+ hp_cpu->iterator->node.next,
+ struct hp_handler, node);
+ preempt_disable();
+ if (hp_cpu->processor_id == smp_processor_id())
+ init_handler(hp_cpu->iterator);
+ else
+ smp_call_function_single(hp_cpu->processor_id,
+ init_handler, hp_cpu->iterator, 1);
+ preempt_enable();
+ }
+ }
+}
+
+static void send_first_frame(void *ignore)
+{
+ u32 *p = special_handler->frame_ptr;
+ u32 lfsr = HP_FIRST_WORD;
+ int loop;
+ struct qm_fd fd;
+
+ BUG_ON(special_handler->processor_id != smp_processor_id());
+ memset(&fd, 0, sizeof(fd));
+ qm_fd_addr_set64(&fd, special_handler->addr);
+ fd.format = qm_fd_contig_big;
+ fd.length29 = HP_NUM_WORDS * 4;
+ for (loop = 0; loop < HP_NUM_WORDS; loop++, p++) {
+ if (*p != lfsr)
+ panic("corrupt frame data");
+ *p ^= special_handler->tx_mixer;
+ lfsr = do_lfsr(lfsr);
+ }
+ pr_info("Sending first frame\n");
+ if (qman_enqueue(&special_handler->tx, &fd, 0))
+ panic("qman_enqueue() failed");
+}
+
+void qman_test_hotpotato(void)
+{
+ if (cpumask_weight(cpu_online_mask) < 2) {
+ pr_info("qman_test_hotpotato, skip - only 1 CPU\n");
+ return;
+ }
+
+ pr_info("qman_test_hotpotato starting\n");
+
+ hp_cpu_list_length = 0;
+ loop_counter = 0;
+ hp_handler_slab = kmem_cache_create("hp_handler_slab",
+ sizeof(struct hp_handler), L1_CACHE_BYTES,
+ SLAB_HWCACHE_ALIGN, NULL);
+ if (!hp_handler_slab)
+ panic("kmem_cache_create() failed");
+
+ allocate_frame_data();
+
+ /* Init phase 1 */
+ pr_info("Creating %d handlers per cpu...\n", HP_PER_CPU);
+ if (on_all_cpus(create_per_cpu_handlers))
+ panic("on_each_cpu() failed");
+ pr_info("Number of cpus: %d, total of %d handlers\n",
+ hp_cpu_list_length, hp_cpu_list_length * HP_PER_CPU);
+
+ init_phase2();
+
+ init_phase3();
+
+ preempt_disable();
+ if (special_handler->processor_id == smp_processor_id())
+ send_first_frame(NULL);
+ else
+ smp_call_function_single(special_handler->processor_id,
+ send_first_frame, NULL, 1);
+ preempt_enable();
+
+ wait_event(queue, loop_counter == HP_LOOPS);
+ deallocate_frame_data();
+ if (on_all_cpus(destroy_per_cpu_handlers))
+ panic("on_each_cpu() failed");
+ kmem_cache_destroy(hp_handler_slab);
+ pr_info("qman_test_hotpotato finished\n");
+}
diff --git a/drivers/staging/fsl_qbman/qman_utility.c b/drivers/staging/fsl_qbman/qman_utility.c
new file mode 100644
index 000000000000..f1e390236264
--- /dev/null
+++ b/drivers/staging/fsl_qbman/qman_utility.c
@@ -0,0 +1,129 @@
+/* Copyright 2008-2011 Freescale Semiconductor, Inc.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Freescale Semiconductor nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "qman_private.h"
+
+/* ----------------- */
+/* --- FQID Pool --- */
+
+struct qman_fqid_pool {
+ /* Base and size of the FQID range */
+ u32 fqid_base;
+ u32 total;
+ /* Number of FQIDs currently "allocated" */
+ u32 used;
+ /* Allocation optimisation. When 'used<total', it is the index of an
+ * available FQID. Otherwise there are no available FQIDs, and this
+ * will be set when the next deallocation occurs. */
+ u32 next;
+ /* A bit-field representation of the FQID range. */
+ unsigned long *bits;
+};
+
+#define QLONG_BYTES sizeof(unsigned long)
+#define QLONG_BITS (QLONG_BYTES * 8)
+/* Number of 'longs' required for the given number of bits */
+#define QNUM_LONGS(b) (((b) + QLONG_BITS - 1) / QLONG_BITS)
+/* Shorthand for the number of bytes of same (kmalloc, memset, etc) */
+#define QNUM_BYTES(b) (QNUM_LONGS(b) * QLONG_BYTES)
+/* And in bits */
+#define QNUM_BITS(b) (QNUM_LONGS(b) * QLONG_BITS)
+
+struct qman_fqid_pool *qman_fqid_pool_create(u32 fqid_start, u32 num)
+{
+ struct qman_fqid_pool *pool = kmalloc(sizeof(*pool), GFP_KERNEL);
+ unsigned int i;
+
+ BUG_ON(!num);
+ if (!pool)
+ return NULL;
+ pool->fqid_base = fqid_start;
+ pool->total = num;
+ pool->used = 0;
+ pool->next = 0;
+ pool->bits = kzalloc(QNUM_BYTES(num), GFP_KERNEL);
+ if (!pool->bits) {
+ kfree(pool);
+ return NULL;
+ }
+ /* If num is not an even multiple of QLONG_BITS (or even 8, for
+ * byte-oriented searching) then we fill the trailing bits with 1, to
+ * make them look allocated (permanently). */
+ for (i = num + 1; i < QNUM_BITS(num); i++)
+ set_bit(i, pool->bits);
+ return pool;
+}
+EXPORT_SYMBOL(qman_fqid_pool_create);
+
+int qman_fqid_pool_destroy(struct qman_fqid_pool *pool)
+{
+ int ret = pool->used;
+ kfree(pool->bits);
+ kfree(pool);
+ return ret;
+}
+EXPORT_SYMBOL(qman_fqid_pool_destroy);
+
+int qman_fqid_pool_alloc(struct qman_fqid_pool *pool, u32 *fqid)
+{
+ int ret;
+ if (pool->used == pool->total)
+ return -ENOMEM;
+ *fqid = pool->fqid_base + pool->next;
+ ret = test_and_set_bit(pool->next, pool->bits);
+ BUG_ON(ret);
+ if (++pool->used == pool->total)
+ return 0;
+ pool->next = find_next_zero_bit(pool->bits, pool->total, pool->next);
+ if (pool->next >= pool->total)
+ pool->next = find_first_zero_bit(pool->bits, pool->total);
+ BUG_ON(pool->next >= pool->total);
+ return 0;
+}
+EXPORT_SYMBOL(qman_fqid_pool_alloc);
+
+void qman_fqid_pool_free(struct qman_fqid_pool *pool, u32 fqid)
+{
+ int ret;
+
+ fqid -= pool->fqid_base;
+ ret = test_and_clear_bit(fqid, pool->bits);
+ BUG_ON(!ret);
+ if (pool->used-- == pool->total)
+ pool->next = fqid;
+}
+EXPORT_SYMBOL(qman_fqid_pool_free);
+
+u32 qman_fqid_pool_used(struct qman_fqid_pool *pool)
+{
+ return pool->used;
+}
+EXPORT_SYMBOL(qman_fqid_pool_used);
diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile
index 2f1711a8aeed..7cd981ec74e1 100644
--- a/drivers/staging/media/Makefile
+++ b/drivers/staging/media/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_VIDEO_ALLEGRO_DVT) += allegro-dvt/
-obj-$(CONFIG_VIDEO_IMX_MEDIA) += imx/
+obj-$(CONFIG_VIDEO_IMX_CAPTURE) += imx/
obj-$(CONFIG_VIDEO_MESON_VDEC) += meson/vdec/
obj-$(CONFIG_VIDEO_OMAP4) += omap4iss/
obj-$(CONFIG_VIDEO_SUNXI) += sunxi/
diff --git a/drivers/staging/media/imx/Kconfig b/drivers/staging/media/imx/Kconfig
index 8f1ae50a4abd..be3e8545c86a 100644
--- a/drivers/staging/media/imx/Kconfig
+++ b/drivers/staging/media/imx/Kconfig
@@ -31,3 +31,87 @@ config VIDEO_IMX7_CSI
i.MX6UL/L or i.MX7.
endmenu
endif
+
+config VIDEO_IMX_CAPTURE
+ tristate "i.MX V4L2 media core driver"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on MEDIA_CONTROLLER && VIDEO_V4L2
+ depends on VIDEO_V4L2_SUBDEV_API
+ depends on HAS_DMA
+ select VIDEOBUF2_DMA_CONTIG
+ select V4L2_FWNODE
+ help
+ Say yes here to enable support for video4linux media controller
+ driver for the i.MX5/6 SOC.
+
+if VIDEO_IMX_CAPTURE
+menu "i.MX8QXP/QM Camera ISI/MIPI Features support"
+
+config IMX8_MEDIA_DEVICE
+ tristate "IMX8 Media Device Driver"
+ select V4L2_FWNODE
+ default y
+ help
+ This media device is a virtual device which used to manage
+ all modules in image subsystem of imx8qxp/qm platform.
+
+config IMX8_ISI_HW
+ tristate "IMX8 Image Sensor Interface hardware driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ default y
+ help
+ ISI hardware driver is used to export functions to config
+ ISI registers and it is shared by isi capture and mem2mem
+ driver
+
+config IMX8_ISI_CORE
+ tristate "IMX8 Image Sensor Interface Core Driver"
+ depends on IMX8_ISI_CAPTURE && IMX8_ISI_M2M
+ default y
+
+config IMX8_ISI_CAPTURE
+ tristate "IMX8 Image Sensor Interface Capture Device Driver"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ depends on IMX8_ISI_HW
+ select VIDEOBUF2_DMA_CONTIG
+ default y
+
+config IMX8_ISI_M2M
+ tristate "IMX8 Image Sensor Interface Memory to Memory Device Driver"
+ select V4L2_MEM2MEM_DEV
+ depends on IMX8_ISI_HW
+ default y
+
+config IMX8_MIPI_CSI2
+ tristate "IMX8 MIPI CSI2 Controller"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ default y
+ help
+ Enable support for video4linux camera sensor interface driver for
+ i.MX8QM/QXP platform.
+
+config IMX8_MIPI_CSI2_SAM
+ tristate "IMX8 MIPI CSI2 SAMSUNG Controller"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ default y
+ help
+ Enable support for video4linux MIPI CSI2 Samsung driver for
+ i.MX8MN platform.
+
+config IMX8_PARALLEL_CSI
+ tristate "IMX8 Parallel Capture Controller"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ default y
+ help
+ Enable support for video4linux parallel camera sensor interface
+ driver for i.MX8QM/QXP platform.
+
+config GMSL_MAX9286
+ tristate "GMSL MAX8286"
+ depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ default y
+ help
+ Enable support for video4linux camera sensor driver for GMSL MAX9286
+
+endmenu
+endif #VIDEO_IMX_CAPTURE
diff --git a/drivers/staging/media/imx/Makefile b/drivers/staging/media/imx/Makefile
index 9bd9e873ba7c..90173fdac967 100644
--- a/drivers/staging/media/imx/Makefile
+++ b/drivers/staging/media/imx/Makefile
@@ -3,6 +3,8 @@ imx6-media-objs := imx-media-dev.o imx-media-internal-sd.o \
imx-ic-common.o imx-ic-prp.o imx-ic-prpencvf.o imx-media-vdic.o \
imx-media-csc-scaler.o
+imx8-capture-objs := imx8-isi-core.o
+
imx-media-common-objs := imx-media-capture.o imx-media-dev-common.o \
imx-media-of.o imx-media-utils.o
@@ -16,3 +18,12 @@ obj-$(CONFIG_VIDEO_IMX_CSI) += imx6-mipi-csi2.o
obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-media-csi.o
obj-$(CONFIG_VIDEO_IMX7_CSI) += imx7-mipi-csis.o
+obj-$(CONFIG_IMX8_MEDIA_DEVICE) += imx8-media-dev.o
+obj-$(CONFIG_IMX8_ISI_CORE) += imx8-capture.o
+obj-$(CONFIG_IMX8_ISI_CAPTURE) += imx8-isi-cap.o
+obj-$(CONFIG_IMX8_ISI_M2M) += imx8-isi-m2m.o
+obj-$(CONFIG_IMX8_ISI_HW) += imx8-isi-hw.o
+obj-$(CONFIG_IMX8_MIPI_CSI2) += imx8-mipi-csi2.o
+obj-$(CONFIG_IMX8_MIPI_CSI2_SAM) += imx8-mipi-csi2-sam.o
+obj-$(CONFIG_IMX8_PARALLEL_CSI) += imx8-parallel-csi.o
+obj-$(CONFIG_GMSL_MAX9286) += gmsl-max9286.o
diff --git a/drivers/staging/media/imx/gmsl-max9286.c b/drivers/staging/media/imx/gmsl-max9286.c
new file mode 100644
index 000000000000..2d48b0f4b925
--- /dev/null
+++ b/drivers/staging/media/imx/gmsl-max9286.c
@@ -0,0 +1,3344 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 NXP Semiconductor
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/ctype.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <linux/v4l2-mediabus.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <media/v4l2-subdev.h>
+
+#define MAX9271_MAX_SENSOR_NUM 4
+#define CAMERA_USES_15HZ
+
+#define ADDR_MAX9286 0x6A
+#define ADDR_MAX9271 0x40
+#define ADDR_MAX9271_ALL (ADDR_MAX9271 + 5) /* Broadcast address */
+
+#define MIPI_CSI2_SENS_VC0_PAD_SOURCE 0
+#define MIPI_CSI2_SENS_VC1_PAD_SOURCE 1
+#define MIPI_CSI2_SENS_VC2_PAD_SOURCE 2
+#define MIPI_CSI2_SENS_VC3_PAD_SOURCE 3
+#define MIPI_CSI2_SENS_VCX_PADS_NUM 4
+
+#define MAX_FPS 30
+#define MIN_FPS 15
+#define DEFAULT_FPS 30
+
+#define ADDR_OV_SENSOR 0x30
+#define ADDR_AP_SENSOR 0x5D
+
+/*!
+ * Maintains the information on the current state of the sesor.
+ */
+struct imxdpu_videomode {
+ char name[64]; /* may not be needed */
+
+ u32 pixelclock; /* Hz */
+
+ /* htotal (pixels) = hlen + hfp + hsync + hbp */
+ u32 hlen;
+ u32 hfp;
+ u32 hbp;
+ u32 hsync;
+
+ /* field0 - vtotal (lines) = vlen + vfp + vsync + vbp */
+ u32 vlen;
+ u32 vfp;
+ u32 vbp;
+ u32 vsync;
+
+ /* field1 */
+ u32 vlen1;
+ u32 vfp1;
+ u32 vbp1;
+ u32 vsync1;
+
+ u32 flags;
+
+ u32 format;
+ u32 dest_format; /*buffer format for capture*/
+
+ s16 clip_top;
+ s16 clip_left;
+ u16 clip_width;
+ u16 clip_height;
+};
+
+enum ov10635_mode {
+ ov10635_mode_MIN = 0,
+ ov10635_mode_WXGA_1280_800 = 0,
+ ov10635_mode_720P_1280_720 = 1,
+ ov10635_mode_WVGA_752_480 = 2,
+ ov10635_mode_VGA_640_480 = 3,
+ ov10635_mode_CIF_352_288 = 4,
+ ov10635_mode_QVGA_320_240 = 5,
+ ov10635_mode_MAX = 5,
+};
+
+enum ov10635_frame_rate {
+ OV10635_15_FPS = 0,
+ OV10635_30_FPS,
+};
+
+struct sensor_data {
+ struct v4l2_subdev subdev;
+ struct media_pad pads[MIPI_CSI2_SENS_VCX_PADS_NUM];
+ struct i2c_client *i2c_client;
+ struct v4l2_mbus_framefmt format;
+ enum ov10635_frame_rate current_fr;
+ enum ov10635_mode current_mode;
+ struct v4l2_fract frame_interval;
+
+ /* lock to protect shared members */
+ struct mutex lock;
+ char running;
+
+ /* control settings */
+ int brightness;
+ int hue;
+ int contrast;
+ int saturation;
+ int red;
+ int green;
+ int blue;
+ int ae_mode;
+
+ u32 mclk;
+ u8 mclk_source;
+ struct clk *sensor_clk;
+ int v_channel;
+ bool is_mipi;
+ struct imxdpu_videomode cap_mode;
+
+ unsigned int sensor_num; /* sensor num connect max9271 */
+ unsigned char sensor_is_there; /* Bit 0~3 for 4 cameras
+ * 0b1= is there;
+ * 0b0 = is not there
+ */
+ struct gpio_desc *pwn_gpio;
+};
+
+#define OV10635_REG_PID 0x300A
+#define OV10635_REG_VER 0x300B
+
+struct reg_value {
+ unsigned short reg_addr;
+ unsigned char val;
+ unsigned int delay_ms;
+};
+
+static int ov10635_framerates[] = {
+ [OV10635_15_FPS] = 15,
+ [OV10635_30_FPS] = 30,
+};
+
+static struct reg_value ov10635_init_data[] = {
+ { 0x0103, 0x01, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x300c, 0x61, 0 },
+ { 0x301b, 0xff, 0 },
+ { 0x301c, 0xff, 0 },
+ { 0x301a, 0xff, 0 },
+ { 0x3011, 0x42, 0 },
+ { 0x6900, 0x0c, 0 },
+ { 0x6901, 0x11, 0 },
+ { 0x3503, 0x10, 0 },
+ { 0x3025, 0x03, 0 },
+ { 0x3003, 0x20, 0 },
+ { 0x3004, 0x21, 0 },
+ { 0x3005, 0x20, 0 },
+ { 0x3006, 0x91, 0 },
+ { 0x3600, 0x74, 0 },
+ { 0x3601, 0x2b, 0 },
+ { 0x3612, 0x00, 0 },
+ { 0x3611, 0x67, 0 },
+ { 0x3633, 0xca, 0 },
+ { 0x3602, 0x2f, 0 },
+ { 0x3603, 0x00, 0 },
+ { 0x3630, 0x28, 0 },
+ { 0x3631, 0x16, 0 },
+ { 0x3714, 0x10, 0 },
+ { 0x371d, 0x01, 0 },
+ { 0x4300, 0x38, 0 },
+ { 0x3007, 0x01, 0 },
+ { 0x3024, 0x01, 0 },
+ { 0x3020, 0x0b, 0 },
+ { 0x3702, 0x20, 0 },
+ { 0x3703, 0x48, 0 },
+ { 0x3704, 0x32, 0 },
+ { 0x3709, 0xa8, 0 },
+ { 0x3709, 0xa8, 0 },
+ { 0x370c, 0xc7, 0 },
+ { 0x370d, 0x80, 0 },
+ { 0x3712, 0x00, 0 },
+ { 0x3713, 0x20, 0 },
+ { 0x3715, 0x04, 0 },
+ { 0x381d, 0x40, 0 },
+ { 0x381c, 0x00, 0 },
+ { 0x3822, 0x50, 0 },
+ { 0x3824, 0x50, 0 },
+ { 0x3815, 0x8c, 0 },
+ { 0x3804, 0x05, 0 },
+ { 0x3805, 0x1f, 0 },
+ { 0x3800, 0x00, 0 },
+ { 0x3801, 0x00, 0 },
+ { 0x3806, 0x03, 0 },
+ { 0x3807, 0x29, 0 },
+ { 0x3802, 0x00, 0 },
+ { 0x3803, 0x04, 0 },
+ { 0x3808, 0x05, 0 },
+ { 0x3809, 0x00, 0 },
+ { 0x380a, 0x03, 0 },
+ { 0x380b, 0x20, 0 },
+ { 0x380c, 0x07, 0 },
+ { 0x380d, 0x71, 0 },
+ { 0x6e42, 0x03, 0 },
+ { 0x6e43, 0x48, 0 },
+ { 0x380e, 0x03, 0 },
+ { 0x380f, 0x48, 0 },
+ { 0x3813, 0x02, 0 },
+ { 0x3811, 0x10, 0 },
+ { 0x381f, 0x0c, 0 },
+ { 0x3828, 0x03, 0 },
+ { 0x3829, 0x10, 0 },
+ { 0x382a, 0x10, 0 },
+ { 0x382b, 0x10, 0 },
+ { 0x3621, 0x64, 0 },
+ { 0x5005, 0x08, 0 },
+ { 0x56d5, 0x00, 0 },
+ { 0x56d6, 0x80, 0 },
+ { 0x56d7, 0x00, 0 },
+ { 0x56d8, 0x00, 0 },
+ { 0x56d9, 0x00, 0 },
+ { 0x56da, 0x80, 0 },
+ { 0x56db, 0x00, 0 },
+ { 0x56dc, 0x00, 0 },
+ { 0x56e8, 0x00, 0 },
+ { 0x56e9, 0x7f, 0 },
+ { 0x56ea, 0x00, 0 },
+ { 0x56eb, 0x7f, 0 },
+ { 0x5100, 0x00, 0 },
+ { 0x5101, 0x80, 0 },
+ { 0x5102, 0x00, 0 },
+ { 0x5103, 0x80, 0 },
+ { 0x5104, 0x00, 0 },
+ { 0x5105, 0x80, 0 },
+ { 0x5106, 0x00, 0 },
+ { 0x5107, 0x80, 0 },
+ { 0x5108, 0x00, 0 },
+ { 0x5109, 0x00, 0 },
+ { 0x510a, 0x00, 0 },
+ { 0x510b, 0x00, 0 },
+ { 0x510c, 0x00, 0 },
+ { 0x510d, 0x00, 0 },
+ { 0x510e, 0x00, 0 },
+ { 0x510f, 0x00, 0 },
+ { 0x5110, 0x00, 0 },
+ { 0x5111, 0x80, 0 },
+ { 0x5112, 0x00, 0 },
+ { 0x5113, 0x80, 0 },
+ { 0x5114, 0x00, 0 },
+ { 0x5115, 0x80, 0 },
+ { 0x5116, 0x00, 0 },
+ { 0x5117, 0x80, 0 },
+ { 0x5118, 0x00, 0 },
+ { 0x5119, 0x00, 0 },
+ { 0x511a, 0x00, 0 },
+ { 0x511b, 0x00, 0 },
+ { 0x511c, 0x00, 0 },
+ { 0x511d, 0x00, 0 },
+ { 0x511e, 0x00, 0 },
+ { 0x511f, 0x00, 0 },
+ { 0x56d0, 0x00, 0 },
+ { 0x5006, 0x24, 0 },
+ { 0x5608, 0x0d, 0 },
+ { 0x52d7, 0x06, 0 },
+ { 0x528d, 0x08, 0 },
+ { 0x5293, 0x12, 0 },
+ { 0x52d3, 0x12, 0 },
+ { 0x5288, 0x06, 0 },
+ { 0x5289, 0x20, 0 },
+ { 0x52c8, 0x06, 0 },
+ { 0x52c9, 0x20, 0 },
+ { 0x52cd, 0x04, 0 },
+ { 0x5381, 0x00, 0 },
+ { 0x5382, 0xff, 0 },
+ { 0x5589, 0x76, 0 },
+ { 0x558a, 0x47, 0 },
+ { 0x558b, 0xef, 0 },
+ { 0x558c, 0xc9, 0 },
+ { 0x558d, 0x49, 0 },
+ { 0x558e, 0x30, 0 },
+ { 0x558f, 0x67, 0 },
+ { 0x5590, 0x3f, 0 },
+ { 0x5591, 0xf0, 0 },
+ { 0x5592, 0x10, 0 },
+ { 0x55a2, 0x6d, 0 },
+ { 0x55a3, 0x55, 0 },
+ { 0x55a4, 0xc3, 0 },
+ { 0x55a5, 0xb5, 0 },
+ { 0x55a6, 0x43, 0 },
+ { 0x55a7, 0x38, 0 },
+ { 0x55a8, 0x5f, 0 },
+ { 0x55a9, 0x4b, 0 },
+ { 0x55aa, 0xf0, 0 },
+ { 0x55ab, 0x10, 0 },
+ { 0x5581, 0x52, 0 },
+ { 0x5300, 0x01, 0 },
+ { 0x5301, 0x00, 0 },
+ { 0x5302, 0x00, 0 },
+ { 0x5303, 0x0e, 0 },
+ { 0x5304, 0x00, 0 },
+ { 0x5305, 0x0e, 0 },
+ { 0x5306, 0x00, 0 },
+ { 0x5307, 0x36, 0 },
+ { 0x5308, 0x00, 0 },
+ { 0x5309, 0xd9, 0 },
+ { 0x530a, 0x00, 0 },
+ { 0x530b, 0x0f, 0 },
+ { 0x530c, 0x00, 0 },
+ { 0x530d, 0x2c, 0 },
+ { 0x530e, 0x00, 0 },
+ { 0x530f, 0x59, 0 },
+ { 0x5310, 0x00, 0 },
+ { 0x5311, 0x7b, 0 },
+ { 0x5312, 0x00, 0 },
+ { 0x5313, 0x22, 0 },
+ { 0x5314, 0x00, 0 },
+ { 0x5315, 0xd5, 0 },
+ { 0x5316, 0x00, 0 },
+ { 0x5317, 0x13, 0 },
+ { 0x5318, 0x00, 0 },
+ { 0x5319, 0x18, 0 },
+ { 0x531a, 0x00, 0 },
+ { 0x531b, 0x26, 0 },
+ { 0x531c, 0x00, 0 },
+ { 0x531d, 0xdc, 0 },
+ { 0x531e, 0x00, 0 },
+ { 0x531f, 0x02, 0 },
+ { 0x5320, 0x00, 0 },
+ { 0x5321, 0x24, 0 },
+ { 0x5322, 0x00, 0 },
+ { 0x5323, 0x56, 0 },
+ { 0x5324, 0x00, 0 },
+ { 0x5325, 0x85, 0 },
+ { 0x5326, 0x00, 0 },
+ { 0x5327, 0x20, 0 },
+ { 0x5609, 0x01, 0 },
+ { 0x560a, 0x40, 0 },
+ { 0x560b, 0x01, 0 },
+ { 0x560c, 0x40, 0 },
+ { 0x560d, 0x00, 0 },
+ { 0x560e, 0xfa, 0 },
+ { 0x560f, 0x00, 0 },
+ { 0x5610, 0xfa, 0 },
+ { 0x5611, 0x02, 0 },
+ { 0x5612, 0x80, 0 },
+ { 0x5613, 0x02, 0 },
+ { 0x5614, 0x80, 0 },
+ { 0x5615, 0x01, 0 },
+ { 0x5616, 0x2c, 0 },
+ { 0x5617, 0x01, 0 },
+ { 0x5618, 0x2c, 0 },
+ { 0x563b, 0x01, 0 },
+ { 0x563c, 0x01, 0 },
+ { 0x563d, 0x01, 0 },
+ { 0x563e, 0x01, 0 },
+ { 0x563f, 0x03, 0 },
+ { 0x5640, 0x03, 0 },
+ { 0x5641, 0x03, 0 },
+ { 0x5642, 0x05, 0 },
+ { 0x5643, 0x09, 0 },
+ { 0x5644, 0x05, 0 },
+ { 0x5645, 0x05, 0 },
+ { 0x5646, 0x05, 0 },
+ { 0x5647, 0x05, 0 },
+ { 0x5651, 0x00, 0 },
+ { 0x5652, 0x80, 0 },
+ { 0x521a, 0x01, 0 },
+ { 0x521b, 0x03, 0 },
+ { 0x521c, 0x06, 0 },
+ { 0x521d, 0x0a, 0 },
+ { 0x521e, 0x0e, 0 },
+ { 0x521f, 0x12, 0 },
+ { 0x5220, 0x16, 0 },
+ { 0x5223, 0x02, 0 },
+ { 0x5225, 0x04, 0 },
+ { 0x5227, 0x08, 0 },
+ { 0x5229, 0x0c, 0 },
+ { 0x522b, 0x12, 0 },
+ { 0x522d, 0x18, 0 },
+ { 0x522f, 0x1e, 0 },
+ { 0x5241, 0x04, 0 },
+ { 0x5242, 0x01, 0 },
+ { 0x5243, 0x03, 0 },
+ { 0x5244, 0x06, 0 },
+ { 0x5245, 0x0a, 0 },
+ { 0x5246, 0x0e, 0 },
+ { 0x5247, 0x12, 0 },
+ { 0x5248, 0x16, 0 },
+ { 0x524a, 0x03, 0 },
+ { 0x524c, 0x04, 0 },
+ { 0x524e, 0x08, 0 },
+ { 0x5250, 0x0c, 0 },
+ { 0x5252, 0x12, 0 },
+ { 0x5254, 0x18, 0 },
+ { 0x5256, 0x1e, 0 },
+ { 0x4606, 0x07, 0 },
+ { 0x4607, 0x71, 0 },
+ { 0x460a, 0x02, 0 },
+ { 0x460b, 0x70, 0 },
+ { 0x460c, 0x00, 0 },
+ { 0x4620, 0x0e, 0 },
+ { 0x4700, 0x04, 0 },
+ { 0x4701, 0x00, 0 },
+ { 0x4702, 0x01, 0 },
+ { 0x4004, 0x04, 0 },
+ { 0x4005, 0x18, 0 },
+ { 0x4001, 0x06, 0 },
+ { 0x4050, 0x22, 0 },
+ { 0x4051, 0x24, 0 },
+ { 0x4052, 0x02, 0 },
+ { 0x4057, 0x9c, 0 },
+ { 0x405a, 0x00, 0 },
+ { 0x4202, 0x02, 0 },
+ { 0x3023, 0x10, 0 },
+ { 0x0100, 0x0f, 0 },
+ { 0x0100, 0x0f, 0 },
+ { 0x6f10, 0x07, 0 },
+ { 0x6f11, 0x82, 0 },
+ { 0x6f12, 0x04, 0 },
+ { 0x6f13, 0x00, 0 },
+ { 0x6f14, 0x1f, 0 },
+ { 0x6f15, 0xdd, 0 },
+ { 0x6f16, 0x04, 0 },
+ { 0x6f17, 0x04, 0 },
+ { 0x6f18, 0x36, 0 },
+ { 0x6f19, 0x66, 0 },
+ { 0x6f1a, 0x04, 0 },
+ { 0x6f1b, 0x08, 0 },
+ { 0x6f1c, 0x0c, 0 },
+ { 0x6f1d, 0xe7, 0 },
+ { 0x6f1e, 0x04, 0 },
+ { 0x6f1f, 0x0c, 0 },
+ { 0xd000, 0x19, 0 },
+ { 0xd001, 0xa0, 0 },
+ { 0xd002, 0x00, 0 },
+ { 0xd003, 0x01, 0 },
+ { 0xd004, 0xa9, 0 },
+ { 0xd005, 0xad, 0 },
+ { 0xd006, 0x10, 0 },
+ { 0xd007, 0x40, 0 },
+ { 0xd008, 0x44, 0 },
+ { 0xd009, 0x00, 0 },
+ { 0xd00a, 0x68, 0 },
+ { 0xd00b, 0x00, 0 },
+ { 0xd00c, 0x15, 0 },
+ { 0xd00d, 0x00, 0 },
+ { 0xd00e, 0x00, 0 },
+ { 0xd00f, 0x00, 0 },
+ { 0xd010, 0x19, 0 },
+ { 0xd011, 0xa0, 0 },
+ { 0xd012, 0x00, 0 },
+ { 0xd013, 0x01, 0 },
+ { 0xd014, 0xa9, 0 },
+ { 0xd015, 0xad, 0 },
+ { 0xd016, 0x13, 0 },
+ { 0xd017, 0xd0, 0 },
+ { 0xd018, 0x44, 0 },
+ { 0xd019, 0x00, 0 },
+ { 0xd01a, 0x68, 0 },
+ { 0xd01b, 0x00, 0 },
+ { 0xd01c, 0x15, 0 },
+ { 0xd01d, 0x00, 0 },
+ { 0xd01e, 0x00, 0 },
+ { 0xd01f, 0x00, 0 },
+ { 0xd020, 0x19, 0 },
+ { 0xd021, 0xa0, 0 },
+ { 0xd022, 0x00, 0 },
+ { 0xd023, 0x01, 0 },
+ { 0xd024, 0xa9, 0 },
+ { 0xd025, 0xad, 0 },
+ { 0xd026, 0x14, 0 },
+ { 0xd027, 0xb8, 0 },
+ { 0xd028, 0x44, 0 },
+ { 0xd029, 0x00, 0 },
+ { 0xd02a, 0x68, 0 },
+ { 0xd02b, 0x00, 0 },
+ { 0xd02c, 0x15, 0 },
+ { 0xd02d, 0x00, 0 },
+ { 0xd02e, 0x00, 0 },
+ { 0xd02f, 0x00, 0 },
+ { 0xd030, 0x19, 0 },
+ { 0xd031, 0xa0, 0 },
+ { 0xd032, 0x00, 0 },
+ { 0xd033, 0x01, 0 },
+ { 0xd034, 0xa9, 0 },
+ { 0xd035, 0xad, 0 },
+ { 0xd036, 0x14, 0 },
+ { 0xd037, 0xdc, 0 },
+ { 0xd038, 0x44, 0 },
+ { 0xd039, 0x00, 0 },
+ { 0xd03a, 0x68, 0 },
+ { 0xd03b, 0x00, 0 },
+ { 0xd03c, 0x15, 0 },
+ { 0xd03d, 0x00, 0 },
+ { 0xd03e, 0x00, 0 },
+ { 0xd03f, 0x00, 0 },
+ { 0xd040, 0x9c, 0 },
+ { 0xd041, 0x21, 0 },
+ { 0xd042, 0xff, 0 },
+ { 0xd043, 0xe4, 0 },
+ { 0xd044, 0xd4, 0 },
+ { 0xd045, 0x01, 0 },
+ { 0xd046, 0x48, 0 },
+ { 0xd047, 0x00, 0 },
+ { 0xd048, 0xd4, 0 },
+ { 0xd049, 0x01, 0 },
+ { 0xd04a, 0x50, 0 },
+ { 0xd04b, 0x04, 0 },
+ { 0xd04c, 0xd4, 0 },
+ { 0xd04d, 0x01, 0 },
+ { 0xd04e, 0x60, 0 },
+ { 0xd04f, 0x08, 0 },
+ { 0xd050, 0xd4, 0 },
+ { 0xd051, 0x01, 0 },
+ { 0xd052, 0x70, 0 },
+ { 0xd053, 0x0c, 0 },
+ { 0xd054, 0xd4, 0 },
+ { 0xd055, 0x01, 0 },
+ { 0xd056, 0x80, 0 },
+ { 0xd057, 0x10, 0 },
+ { 0xd058, 0x19, 0 },
+ { 0xd059, 0xc0, 0 },
+ { 0xd05a, 0x00, 0 },
+ { 0xd05b, 0x01, 0 },
+ { 0xd05c, 0xa9, 0 },
+ { 0xd05d, 0xce, 0 },
+ { 0xd05e, 0x02, 0 },
+ { 0xd05f, 0xa4, 0 },
+ { 0xd060, 0x9c, 0 },
+ { 0xd061, 0xa0, 0 },
+ { 0xd062, 0x00, 0 },
+ { 0xd063, 0x00, 0 },
+ { 0xd064, 0x84, 0 },
+ { 0xd065, 0x6e, 0 },
+ { 0xd066, 0x00, 0 },
+ { 0xd067, 0x00, 0 },
+ { 0xd068, 0xd8, 0 },
+ { 0xd069, 0x03, 0 },
+ { 0xd06a, 0x28, 0 },
+ { 0xd06b, 0x76, 0 },
+ { 0xd06c, 0x1a, 0 },
+ { 0xd06d, 0x00, 0 },
+ { 0xd06e, 0x00, 0 },
+ { 0xd06f, 0x01, 0 },
+ { 0xd070, 0xaa, 0 },
+ { 0xd071, 0x10, 0 },
+ { 0xd072, 0x03, 0 },
+ { 0xd073, 0xf0, 0 },
+ { 0xd074, 0x18, 0 },
+ { 0xd075, 0x60, 0 },
+ { 0xd076, 0x00, 0 },
+ { 0xd077, 0x01, 0 },
+ { 0xd078, 0xa8, 0 },
+ { 0xd079, 0x63, 0 },
+ { 0xd07a, 0x07, 0 },
+ { 0xd07b, 0x80, 0 },
+ { 0xd07c, 0xe0, 0 },
+ { 0xd07d, 0xa0, 0 },
+ { 0xd07e, 0x00, 0 },
+ { 0xd07f, 0x04, 0 },
+ { 0xd080, 0x18, 0 },
+ { 0xd081, 0xc0, 0 },
+ { 0xd082, 0x00, 0 },
+ { 0xd083, 0x00, 0 },
+ { 0xd084, 0xa8, 0 },
+ { 0xd085, 0xc6, 0 },
+ { 0xd086, 0x00, 0 },
+ { 0xd087, 0x00, 0 },
+ { 0xd088, 0x8c, 0 },
+ { 0xd089, 0x63, 0 },
+ { 0xd08a, 0x00, 0 },
+ { 0xd08b, 0x00, 0 },
+ { 0xd08c, 0xd4, 0 },
+ { 0xd08d, 0x01, 0 },
+ { 0xd08e, 0x28, 0 },
+ { 0xd08f, 0x14, 0 },
+ { 0xd090, 0xd4, 0 },
+ { 0xd091, 0x01, 0 },
+ { 0xd092, 0x30, 0 },
+ { 0xd093, 0x18, 0 },
+ { 0xd094, 0x07, 0 },
+ { 0xd095, 0xff, 0 },
+ { 0xd096, 0xf8, 0 },
+ { 0xd097, 0xfd, 0 },
+ { 0xd098, 0x9c, 0 },
+ { 0xd099, 0x80, 0 },
+ { 0xd09a, 0x00, 0 },
+ { 0xd09b, 0x03, 0 },
+ { 0xd09c, 0xa5, 0 },
+ { 0xd09d, 0x6b, 0 },
+ { 0xd09e, 0x00, 0 },
+ { 0xd09f, 0xff, 0 },
+ { 0xd0a0, 0x18, 0 },
+ { 0xd0a1, 0xc0, 0 },
+ { 0xd0a2, 0x00, 0 },
+ { 0xd0a3, 0x01, 0 },
+ { 0xd0a4, 0xa8, 0 },
+ { 0xd0a5, 0xc6, 0 },
+ { 0xd0a6, 0x01, 0 },
+ { 0xd0a7, 0x02, 0 },
+ { 0xd0a8, 0xe1, 0 },
+ { 0xd0a9, 0x6b, 0 },
+ { 0xd0aa, 0x58, 0 },
+ { 0xd0ab, 0x00, 0 },
+ { 0xd0ac, 0x84, 0 },
+ { 0xd0ad, 0x8e, 0 },
+ { 0xd0ae, 0x00, 0 },
+ { 0xd0af, 0x00, 0 },
+ { 0xd0b0, 0xe1, 0 },
+ { 0xd0b1, 0x6b, 0 },
+ { 0xd0b2, 0x30, 0 },
+ { 0xd0b3, 0x00, 0 },
+ { 0xd0b4, 0x98, 0 },
+ { 0xd0b5, 0xb0, 0 },
+ { 0xd0b6, 0x00, 0 },
+ { 0xd0b7, 0x00, 0 },
+ { 0xd0b8, 0x8c, 0 },
+ { 0xd0b9, 0x64, 0 },
+ { 0xd0ba, 0x00, 0 },
+ { 0xd0bb, 0x6e, 0 },
+ { 0xd0bc, 0xe5, 0 },
+ { 0xd0bd, 0xa5, 0 },
+ { 0xd0be, 0x18, 0 },
+ { 0xd0bf, 0x00, 0 },
+ { 0xd0c0, 0x10, 0 },
+ { 0xd0c1, 0x00, 0 },
+ { 0xd0c2, 0x00, 0 },
+ { 0xd0c3, 0x06, 0 },
+ { 0xd0c4, 0x95, 0 },
+ { 0xd0c5, 0x8b, 0 },
+ { 0xd0c6, 0x00, 0 },
+ { 0xd0c7, 0x00, 0 },
+ { 0xd0c8, 0x94, 0 },
+ { 0xd0c9, 0xa4, 0 },
+ { 0xd0ca, 0x00, 0 },
+ { 0xd0cb, 0x70, 0 },
+ { 0xd0cc, 0xe5, 0 },
+ { 0xd0cd, 0x65, 0 },
+ { 0xd0ce, 0x60, 0 },
+ { 0xd0cf, 0x00, 0 },
+ { 0xd0d0, 0x0c, 0 },
+ { 0xd0d1, 0x00, 0 },
+ { 0xd0d2, 0x00, 0 },
+ { 0xd0d3, 0x62, 0 },
+ { 0xd0d4, 0x15, 0 },
+ { 0xd0d5, 0x00, 0 },
+ { 0xd0d6, 0x00, 0 },
+ { 0xd0d7, 0x00, 0 },
+ { 0xd0d8, 0x18, 0 },
+ { 0xd0d9, 0x60, 0 },
+ { 0xd0da, 0x80, 0 },
+ { 0xd0db, 0x06, 0 },
+ { 0xd0dc, 0xa8, 0 },
+ { 0xd0dd, 0x83, 0 },
+ { 0xd0de, 0x38, 0 },
+ { 0xd0df, 0x29, 0 },
+ { 0xd0e0, 0xa8, 0 },
+ { 0xd0e1, 0xe3, 0 },
+ { 0xd0e2, 0x40, 0 },
+ { 0xd0e3, 0x08, 0 },
+ { 0xd0e4, 0x8c, 0 },
+ { 0xd0e5, 0x84, 0 },
+ { 0xd0e6, 0x00, 0 },
+ { 0xd0e7, 0x00, 0 },
+ { 0xd0e8, 0xa8, 0 },
+ { 0xd0e9, 0xa3, 0 },
+ { 0xd0ea, 0x40, 0 },
+ { 0xd0eb, 0x09, 0 },
+ { 0xd0ec, 0xa8, 0 },
+ { 0xd0ed, 0xc3, 0 },
+ { 0xd0ee, 0x38, 0 },
+ { 0xd0ef, 0x2a, 0 },
+ { 0xd0f0, 0xd8, 0 },
+ { 0xd0f1, 0x07, 0 },
+ { 0xd0f2, 0x20, 0 },
+ { 0xd0f3, 0x00, 0 },
+ { 0xd0f4, 0x8c, 0 },
+ { 0xd0f5, 0x66, 0 },
+ { 0xd0f6, 0x00, 0 },
+ { 0xd0f7, 0x00, 0 },
+ { 0xd0f8, 0xd8, 0 },
+ { 0xd0f9, 0x05, 0 },
+ { 0xd0fa, 0x18, 0 },
+ { 0xd0fb, 0x00, 0 },
+ { 0xd0fc, 0x18, 0 },
+ { 0xd0fd, 0x60, 0 },
+ { 0xd0fe, 0x00, 0 },
+ { 0xd0ff, 0x01, 0 },
+ { 0xd100, 0x98, 0 },
+ { 0xd101, 0x90, 0 },
+ { 0xd102, 0x00, 0 },
+ { 0xd103, 0x00, 0 },
+ { 0xd104, 0x84, 0 },
+ { 0xd105, 0xae, 0 },
+ { 0xd106, 0x00, 0 },
+ { 0xd107, 0x00, 0 },
+ { 0xd108, 0xa8, 0 },
+ { 0xd109, 0x63, 0 },
+ { 0xd10a, 0x06, 0 },
+ { 0xd10b, 0x4c, 0 },
+ { 0xd10c, 0x9c, 0 },
+ { 0xd10d, 0xc0, 0 },
+ { 0xd10e, 0x00, 0 },
+ { 0xd10f, 0x00, 0 },
+ { 0xd110, 0xd8, 0 },
+ { 0xd111, 0x03, 0 },
+ { 0xd112, 0x30, 0 },
+ { 0xd113, 0x00, 0 },
+ { 0xd114, 0x8c, 0 },
+ { 0xd115, 0x65, 0 },
+ { 0xd116, 0x00, 0 },
+ { 0xd117, 0x6e, 0 },
+ { 0xd118, 0xe5, 0 },
+ { 0xd119, 0x84, 0 },
+ { 0xd11a, 0x18, 0 },
+ { 0xd11b, 0x00, 0 },
+ { 0xd11c, 0x10, 0 },
+ { 0xd11d, 0x00, 0 },
+ { 0xd11e, 0x00, 0 },
+ { 0xd11f, 0x07, 0 },
+ { 0xd120, 0x18, 0 },
+ { 0xd121, 0x80, 0 },
+ { 0xd122, 0x80, 0 },
+ { 0xd123, 0x06, 0 },
+ { 0xd124, 0x94, 0 },
+ { 0xd125, 0x65, 0 },
+ { 0xd126, 0x00, 0 },
+ { 0xd127, 0x70, 0 },
+ { 0xd128, 0xe5, 0 },
+ { 0xd129, 0x43, 0 },
+ { 0xd12a, 0x60, 0 },
+ { 0xd12b, 0x00, 0 },
+ { 0xd12c, 0x0c, 0 },
+ { 0xd12d, 0x00, 0 },
+ { 0xd12e, 0x00, 0 },
+ { 0xd12f, 0x3e, 0 },
+ { 0xd130, 0xa8, 0 },
+ { 0xd131, 0x64, 0 },
+ { 0xd132, 0x38, 0 },
+ { 0xd133, 0x24, 0 },
+ { 0xd134, 0x18, 0 },
+ { 0xd135, 0x80, 0 },
+ { 0xd136, 0x80, 0 },
+ { 0xd137, 0x06, 0 },
+ { 0xd138, 0xa8, 0 },
+ { 0xd139, 0x64, 0 },
+ { 0xd13a, 0x38, 0 },
+ { 0xd13b, 0x24, 0 },
+ { 0xd13c, 0x8c, 0 },
+ { 0xd13d, 0x63, 0 },
+ { 0xd13e, 0x00, 0 },
+ { 0xd13f, 0x00, 0 },
+ { 0xd140, 0xa4, 0 },
+ { 0xd141, 0x63, 0 },
+ { 0xd142, 0x00, 0 },
+ { 0xd143, 0x40, 0 },
+ { 0xd144, 0xbc, 0 },
+ { 0xd145, 0x23, 0 },
+ { 0xd146, 0x00, 0 },
+ { 0xd147, 0x00, 0 },
+ { 0xd148, 0x0c, 0 },
+ { 0xd149, 0x00, 0 },
+ { 0xd14a, 0x00, 0 },
+ { 0xd14b, 0x2a, 0 },
+ { 0xd14c, 0xa8, 0 },
+ { 0xd14d, 0x64, 0 },
+ { 0xd14e, 0x6e, 0 },
+ { 0xd14f, 0x44, 0 },
+ { 0xd150, 0x19, 0 },
+ { 0xd151, 0x00, 0 },
+ { 0xd152, 0x80, 0 },
+ { 0xd153, 0x06, 0 },
+ { 0xd154, 0xa8, 0 },
+ { 0xd155, 0xe8, 0 },
+ { 0xd156, 0x3d, 0 },
+ { 0xd157, 0x05, 0 },
+ { 0xd158, 0x8c, 0 },
+ { 0xd159, 0x67, 0 },
+ { 0xd15a, 0x00, 0 },
+ { 0xd15b, 0x00, 0 },
+ { 0xd15c, 0xb8, 0 },
+ { 0xd15d, 0x63, 0 },
+ { 0xd15e, 0x00, 0 },
+ { 0xd15f, 0x18, 0 },
+ { 0xd160, 0xb8, 0 },
+ { 0xd161, 0x63, 0 },
+ { 0xd162, 0x00, 0 },
+ { 0xd163, 0x98, 0 },
+ { 0xd164, 0xbc, 0 },
+ { 0xd165, 0x03, 0 },
+ { 0xd166, 0x00, 0 },
+ { 0xd167, 0x00, 0 },
+ { 0xd168, 0x10, 0 },
+ { 0xd169, 0x00, 0 },
+ { 0xd16a, 0x00, 0 },
+ { 0xd16b, 0x10, 0 },
+ { 0xd16c, 0xa9, 0 },
+ { 0xd16d, 0x48, 0 },
+ { 0xd16e, 0x67, 0 },
+ { 0xd16f, 0x02, 0 },
+ { 0xd170, 0xb8, 0 },
+ { 0xd171, 0xa3, 0 },
+ { 0xd172, 0x00, 0 },
+ { 0xd173, 0x19, 0 },
+ { 0xd174, 0x8c, 0 },
+ { 0xd175, 0x8a, 0 },
+ { 0xd176, 0x00, 0 },
+ { 0xd177, 0x00, 0 },
+ { 0xd178, 0xa9, 0 },
+ { 0xd179, 0x68, 0 },
+ { 0xd17a, 0x67, 0 },
+ { 0xd17b, 0x03, 0 },
+ { 0xd17c, 0xb8, 0 },
+ { 0xd17d, 0xc4, 0 },
+ { 0xd17e, 0x00, 0 },
+ { 0xd17f, 0x08, 0 },
+ { 0xd180, 0x8c, 0 },
+ { 0xd181, 0x6b, 0 },
+ { 0xd182, 0x00, 0 },
+ { 0xd183, 0x00, 0 },
+ { 0xd184, 0xb8, 0 },
+ { 0xd185, 0x85, 0 },
+ { 0xd186, 0x00, 0 },
+ { 0xd187, 0x98, 0 },
+ { 0xd188, 0xe0, 0 },
+ { 0xd189, 0x63, 0 },
+ { 0xd18a, 0x30, 0 },
+ { 0xd18b, 0x04, 0 },
+ { 0xd18c, 0xe0, 0 },
+ { 0xd18d, 0x64, 0 },
+ { 0xd18e, 0x18, 0 },
+ { 0xd18f, 0x00, 0 },
+ { 0xd190, 0xa4, 0 },
+ { 0xd191, 0x83, 0 },
+ { 0xd192, 0xff, 0 },
+ { 0xd193, 0xff, 0 },
+ { 0xd194, 0xb8, 0 },
+ { 0xd195, 0x64, 0 },
+ { 0xd196, 0x00, 0 },
+ { 0xd197, 0x48, 0 },
+ { 0xd198, 0xd8, 0 },
+ { 0xd199, 0x0a, 0 },
+ { 0xd19a, 0x18, 0 },
+ { 0xd19b, 0x00, 0 },
+ { 0xd19c, 0xd8, 0 },
+ { 0xd19d, 0x0b, 0 },
+ { 0xd19e, 0x20, 0 },
+ { 0xd19f, 0x00, 0 },
+ { 0xd1a0, 0x9c, 0 },
+ { 0xd1a1, 0x60, 0 },
+ { 0xd1a2, 0x00, 0 },
+ { 0xd1a3, 0x00, 0 },
+ { 0xd1a4, 0xd8, 0 },
+ { 0xd1a5, 0x07, 0 },
+ { 0xd1a6, 0x18, 0 },
+ { 0xd1a7, 0x00, 0 },
+ { 0xd1a8, 0xa8, 0 },
+ { 0xd1a9, 0x68, 0 },
+ { 0xd1aa, 0x38, 0 },
+ { 0xd1ab, 0x22, 0 },
+ { 0xd1ac, 0x9c, 0 },
+ { 0xd1ad, 0x80, 0 },
+ { 0xd1ae, 0x00, 0 },
+ { 0xd1af, 0x70, 0 },
+ { 0xd1b0, 0xa8, 0 },
+ { 0xd1b1, 0xe8, 0 },
+ { 0xd1b2, 0x38, 0 },
+ { 0xd1b3, 0x43, 0 },
+ { 0xd1b4, 0xd8, 0 },
+ { 0xd1b5, 0x03, 0 },
+ { 0xd1b6, 0x20, 0 },
+ { 0xd1b7, 0x00, 0 },
+ { 0xd1b8, 0x9c, 0 },
+ { 0xd1b9, 0xa0, 0 },
+ { 0xd1ba, 0x00, 0 },
+ { 0xd1bb, 0x00, 0 },
+ { 0xd1bc, 0xa8, 0 },
+ { 0xd1bd, 0xc8, 0 },
+ { 0xd1be, 0x38, 0 },
+ { 0xd1bf, 0x42, 0 },
+ { 0xd1c0, 0x8c, 0 },
+ { 0xd1c1, 0x66, 0 },
+ { 0xd1c2, 0x00, 0 },
+ { 0xd1c3, 0x00, 0 },
+ { 0xd1c4, 0x9c, 0 },
+ { 0xd1c5, 0xa5, 0 },
+ { 0xd1c6, 0x00, 0 },
+ { 0xd1c7, 0x01, 0 },
+ { 0xd1c8, 0xb8, 0 },
+ { 0xd1c9, 0x83, 0 },
+ { 0xd1ca, 0x00, 0 },
+ { 0xd1cb, 0x08, 0 },
+ { 0xd1cc, 0xa4, 0 },
+ { 0xd1cd, 0xa5, 0 },
+ { 0xd1ce, 0x00, 0 },
+ { 0xd1cf, 0xff, 0 },
+ { 0xd1d0, 0x8c, 0 },
+ { 0xd1d1, 0x67, 0 },
+ { 0xd1d2, 0x00, 0 },
+ { 0xd1d3, 0x00, 0 },
+ { 0xd1d4, 0xe0, 0 },
+ { 0xd1d5, 0x63, 0 },
+ { 0xd1d6, 0x20, 0 },
+ { 0xd1d7, 0x00, 0 },
+ { 0xd1d8, 0xa4, 0 },
+ { 0xd1d9, 0x63, 0 },
+ { 0xd1da, 0xff, 0 },
+ { 0xd1db, 0xff, 0 },
+ { 0xd1dc, 0xbc, 0 },
+ { 0xd1dd, 0x43, 0 },
+ { 0xd1de, 0x00, 0 },
+ { 0xd1df, 0x07, 0 },
+ { 0xd1e0, 0x0c, 0 },
+ { 0xd1e1, 0x00, 0 },
+ { 0xd1e2, 0x00, 0 },
+ { 0xd1e3, 0x5b, 0 },
+ { 0xd1e4, 0xbc, 0 },
+ { 0xd1e5, 0x05, 0 },
+ { 0xd1e6, 0x00, 0 },
+ { 0xd1e7, 0x02, 0 },
+ { 0xd1e8, 0x03, 0 },
+ { 0xd1e9, 0xff, 0 },
+ { 0xd1ea, 0xff, 0 },
+ { 0xd1eb, 0xf6, 0 },
+ { 0xd1ec, 0x9c, 0 },
+ { 0xd1ed, 0xa0, 0 },
+ { 0xd1ee, 0x00, 0 },
+ { 0xd1ef, 0x00, 0 },
+ { 0xd1f0, 0xa8, 0 },
+ { 0xd1f1, 0xa4, 0 },
+ { 0xd1f2, 0x55, 0 },
+ { 0xd1f3, 0x86, 0 },
+ { 0xd1f4, 0x8c, 0 },
+ { 0xd1f5, 0x63, 0 },
+ { 0xd1f6, 0x00, 0 },
+ { 0xd1f7, 0x00, 0 },
+ { 0xd1f8, 0xa8, 0 },
+ { 0xd1f9, 0xc4, 0 },
+ { 0xd1fa, 0x6e, 0 },
+ { 0xd1fb, 0x45, 0 },
+ { 0xd1fc, 0xa8, 0 },
+ { 0xd1fd, 0xe4, 0 },
+ { 0xd1fe, 0x55, 0 },
+ { 0xd1ff, 0x87, 0 },
+ { 0xd200, 0xd8, 0 },
+ { 0xd201, 0x05, 0 },
+ { 0xd202, 0x18, 0 },
+ { 0xd203, 0x00, 0 },
+ { 0xd204, 0x8c, 0 },
+ { 0xd205, 0x66, 0 },
+ { 0xd206, 0x00, 0 },
+ { 0xd207, 0x00, 0 },
+ { 0xd208, 0xa8, 0 },
+ { 0xd209, 0xa4, 0 },
+ { 0xd20a, 0x6e, 0 },
+ { 0xd20b, 0x46, 0 },
+ { 0xd20c, 0xd8, 0 },
+ { 0xd20d, 0x07, 0 },
+ { 0xd20e, 0x18, 0 },
+ { 0xd20f, 0x00, 0 },
+ { 0xd210, 0xa8, 0 },
+ { 0xd211, 0x84, 0 },
+ { 0xd212, 0x55, 0 },
+ { 0xd213, 0x88, 0 },
+ { 0xd214, 0x8c, 0 },
+ { 0xd215, 0x65, 0 },
+ { 0xd216, 0x00, 0 },
+ { 0xd217, 0x00, 0 },
+ { 0xd218, 0xd8, 0 },
+ { 0xd219, 0x04, 0 },
+ { 0xd21a, 0x18, 0 },
+ { 0xd21b, 0x00, 0 },
+ { 0xd21c, 0x03, 0 },
+ { 0xd21d, 0xff, 0 },
+ { 0xd21e, 0xff, 0 },
+ { 0xd21f, 0xce, 0 },
+ { 0xd220, 0x19, 0 },
+ { 0xd221, 0x00, 0 },
+ { 0xd222, 0x80, 0 },
+ { 0xd223, 0x06, 0 },
+ { 0xd224, 0x8c, 0 },
+ { 0xd225, 0x63, 0 },
+ { 0xd226, 0x00, 0 },
+ { 0xd227, 0x00, 0 },
+ { 0xd228, 0xa4, 0 },
+ { 0xd229, 0x63, 0 },
+ { 0xd22a, 0x00, 0 },
+ { 0xd22b, 0x40, 0 },
+ { 0xd22c, 0xbc, 0 },
+ { 0xd22d, 0x23, 0 },
+ { 0xd22e, 0x00, 0 },
+ { 0xd22f, 0x00, 0 },
+ { 0xd230, 0x13, 0 },
+ { 0xd231, 0xff, 0 },
+ { 0xd232, 0xff, 0 },
+ { 0xd233, 0xc8, 0 },
+ { 0xd234, 0x9d, 0 },
+ { 0xd235, 0x00, 0 },
+ { 0xd236, 0x00, 0 },
+ { 0xd237, 0x40, 0 },
+ { 0xd238, 0xa8, 0 },
+ { 0xd239, 0x64, 0 },
+ { 0xd23a, 0x55, 0 },
+ { 0xd23b, 0x86, 0 },
+ { 0xd23c, 0xa8, 0 },
+ { 0xd23d, 0xa4, 0 },
+ { 0xd23e, 0x55, 0 },
+ { 0xd23f, 0x87, 0 },
+ { 0xd240, 0xd8, 0 },
+ { 0xd241, 0x03, 0 },
+ { 0xd242, 0x40, 0 },
+ { 0xd243, 0x00, 0 },
+ { 0xd244, 0xa8, 0 },
+ { 0xd245, 0x64, 0 },
+ { 0xd246, 0x55, 0 },
+ { 0xd247, 0x88, 0 },
+ { 0xd248, 0xd8, 0 },
+ { 0xd249, 0x05, 0 },
+ { 0xd24a, 0x40, 0 },
+ { 0xd24b, 0x00, 0 },
+ { 0xd24c, 0xd8, 0 },
+ { 0xd24d, 0x03, 0 },
+ { 0xd24e, 0x40, 0 },
+ { 0xd24f, 0x00, 0 },
+ { 0xd250, 0x03, 0 },
+ { 0xd251, 0xff, 0 },
+ { 0xd252, 0xff, 0 },
+ { 0xd253, 0xc1, 0 },
+ { 0xd254, 0x19, 0 },
+ { 0xd255, 0x00, 0 },
+ { 0xd256, 0x80, 0 },
+ { 0xd257, 0x06, 0 },
+ { 0xd258, 0x94, 0 },
+ { 0xd259, 0x84, 0 },
+ { 0xd25a, 0x00, 0 },
+ { 0xd25b, 0x72, 0 },
+ { 0xd25c, 0xe5, 0 },
+ { 0xd25d, 0xa4, 0 },
+ { 0xd25e, 0x60, 0 },
+ { 0xd25f, 0x00, 0 },
+ { 0xd260, 0x0c, 0 },
+ { 0xd261, 0x00, 0 },
+ { 0xd262, 0x00, 0 },
+ { 0xd263, 0x3f, 0 },
+ { 0xd264, 0x9d, 0 },
+ { 0xd265, 0x60, 0 },
+ { 0xd266, 0x01, 0 },
+ { 0xd267, 0x00, 0 },
+ { 0xd268, 0x85, 0 },
+ { 0xd269, 0x4e, 0 },
+ { 0xd26a, 0x00, 0 },
+ { 0xd26b, 0x00, 0 },
+ { 0xd26c, 0x98, 0 },
+ { 0xd26d, 0x70, 0 },
+ { 0xd26e, 0x00, 0 },
+ { 0xd26f, 0x00, 0 },
+ { 0xd270, 0x8c, 0 },
+ { 0xd271, 0x8a, 0 },
+ { 0xd272, 0x00, 0 },
+ { 0xd273, 0x6f, 0 },
+ { 0xd274, 0xe5, 0 },
+ { 0xd275, 0x63, 0 },
+ { 0xd276, 0x20, 0 },
+ { 0xd277, 0x00, 0 },
+ { 0xd278, 0x10, 0 },
+ { 0xd279, 0x00, 0 },
+ { 0xd27a, 0x00, 0 },
+ { 0xd27b, 0x07, 0 },
+ { 0xd27c, 0x15, 0 },
+ { 0xd27d, 0x00, 0 },
+ { 0xd27e, 0x00, 0 },
+ { 0xd27f, 0x00, 0 },
+ { 0xd280, 0x8c, 0 },
+ { 0xd281, 0xaa, 0 },
+ { 0xd282, 0x00, 0 },
+ { 0xd283, 0x6e, 0 },
+ { 0xd284, 0xe0, 0 },
+ { 0xd285, 0x63, 0 },
+ { 0xd286, 0x28, 0 },
+ { 0xd287, 0x02, 0 },
+ { 0xd288, 0xe0, 0 },
+ { 0xd289, 0x84, 0 },
+ { 0xd28a, 0x28, 0 },
+ { 0xd28b, 0x02, 0 },
+ { 0xd28c, 0x07, 0 },
+ { 0xd28d, 0xff, 0 },
+ { 0xd28e, 0xf8, 0 },
+ { 0xd28f, 0x66, 0 },
+ { 0xd290, 0xe0, 0 },
+ { 0xd291, 0x63, 0 },
+ { 0xd292, 0x5b, 0 },
+ { 0xd293, 0x06, 0 },
+ { 0xd294, 0x8c, 0 },
+ { 0xd295, 0x6a, 0 },
+ { 0xd296, 0x00, 0 },
+ { 0xd297, 0x77, 0 },
+ { 0xd298, 0xe0, 0 },
+ { 0xd299, 0x63, 0 },
+ { 0xd29a, 0x5b, 0 },
+ { 0xd29b, 0x06, 0 },
+ { 0xd29c, 0xbd, 0 },
+ { 0xd29d, 0x63, 0 },
+ { 0xd29e, 0x00, 0 },
+ { 0xd29f, 0x00, 0 },
+ { 0xd2a0, 0x0c, 0 },
+ { 0xd2a1, 0x00, 0 },
+ { 0xd2a2, 0x00, 0 },
+ { 0xd2a3, 0x3c, 0 },
+ { 0xd2a4, 0x15, 0 },
+ { 0xd2a5, 0x00, 0 },
+ { 0xd2a6, 0x00, 0 },
+ { 0xd2a7, 0x00, 0 },
+ { 0xd2a8, 0x8c, 0 },
+ { 0xd2a9, 0x8a, 0 },
+ { 0xd2aa, 0x00, 0 },
+ { 0xd2ab, 0x78, 0 },
+ { 0xd2ac, 0xb8, 0 },
+ { 0xd2ad, 0x63, 0 },
+ { 0xd2ae, 0x00, 0 },
+ { 0xd2af, 0x88, 0 },
+ { 0xd2b0, 0xe1, 0 },
+ { 0xd2b1, 0x64, 0 },
+ { 0xd2b2, 0x5b, 0 },
+ { 0xd2b3, 0x06, 0 },
+ { 0xd2b4, 0xbd, 0 },
+ { 0xd2b5, 0x6b, 0 },
+ { 0xd2b6, 0x00, 0 },
+ { 0xd2b7, 0x00, 0 },
+ { 0xd2b8, 0x0c, 0 },
+ { 0xd2b9, 0x00, 0 },
+ { 0xd2ba, 0x00, 0 },
+ { 0xd2bb, 0x34, 0 },
+ { 0xd2bc, 0xd4, 0 },
+ { 0xd2bd, 0x01, 0 },
+ { 0xd2be, 0x18, 0 },
+ { 0xd2bf, 0x14, 0 },
+ { 0xd2c0, 0xb9, 0 },
+ { 0xd2c1, 0x6b, 0 },
+ { 0xd2c2, 0x00, 0 },
+ { 0xd2c3, 0x88, 0 },
+ { 0xd2c4, 0x85, 0 },
+ { 0xd2c5, 0x01, 0 },
+ { 0xd2c6, 0x00, 0 },
+ { 0xd2c7, 0x14, 0 },
+ { 0xd2c8, 0xbd, 0 },
+ { 0xd2c9, 0x68, 0 },
+ { 0xd2ca, 0x00, 0 },
+ { 0xd2cb, 0x00, 0 },
+ { 0xd2cc, 0x0c, 0 },
+ { 0xd2cd, 0x00, 0 },
+ { 0xd2ce, 0x00, 0 },
+ { 0xd2cf, 0x2c, 0 },
+ { 0xd2d0, 0xd4, 0 },
+ { 0xd2d1, 0x01, 0 },
+ { 0xd2d2, 0x58, 0 },
+ { 0xd2d3, 0x18, 0 },
+ { 0xd2d4, 0x84, 0 },
+ { 0xd2d5, 0x81, 0 },
+ { 0xd2d6, 0x00, 0 },
+ { 0xd2d7, 0x14, 0 },
+ { 0xd2d8, 0xbd, 0 },
+ { 0xd2d9, 0xa4, 0 },
+ { 0xd2da, 0x01, 0 },
+ { 0xd2db, 0x00, 0 },
+ { 0xd2dc, 0x10, 0 },
+ { 0xd2dd, 0x00, 0 },
+ { 0xd2de, 0x00, 0 },
+ { 0xd2df, 0x05, 0 },
+ { 0xd2e0, 0x84, 0 },
+ { 0xd2e1, 0xc1, 0 },
+ { 0xd2e2, 0x00, 0 },
+ { 0xd2e3, 0x18, 0 },
+ { 0xd2e4, 0x9c, 0 },
+ { 0xd2e5, 0xa0, 0 },
+ { 0xd2e6, 0x01, 0 },
+ { 0xd2e7, 0x00, 0 },
+ { 0xd2e8, 0xd4, 0 },
+ { 0xd2e9, 0x01, 0 },
+ { 0xd2ea, 0x28, 0 },
+ { 0xd2eb, 0x14, 0 },
+ { 0xd2ec, 0x84, 0 },
+ { 0xd2ed, 0xc1, 0 },
+ { 0xd2ee, 0x00, 0 },
+ { 0xd2ef, 0x18, 0 },
+ { 0xd2f0, 0xbd, 0 },
+ { 0xd2f1, 0x66, 0 },
+ { 0xd2f2, 0x00, 0 },
+ { 0xd2f3, 0x00, 0 },
+ { 0xd2f4, 0x0c, 0 },
+ { 0xd2f5, 0x00, 0 },
+ { 0xd2f6, 0x00, 0 },
+ { 0xd2f7, 0x20, 0 },
+ { 0xd2f8, 0x9d, 0 },
+ { 0xd2f9, 0x00, 0 },
+ { 0xd2fa, 0x00, 0 },
+ { 0xd2fb, 0x00, 0 },
+ { 0xd2fc, 0x84, 0 },
+ { 0xd2fd, 0x61, 0 },
+ { 0xd2fe, 0x00, 0 },
+ { 0xd2ff, 0x18, 0 },
+ { 0xd300, 0xbd, 0 },
+ { 0xd301, 0xa3, 0 },
+ { 0xd302, 0x01, 0 },
+ { 0xd303, 0x00, 0 },
+ { 0xd304, 0x10, 0 },
+ { 0xd305, 0x00, 0 },
+ { 0xd306, 0x00, 0 },
+ { 0xd307, 0x03, 0 },
+ { 0xd308, 0x9c, 0 },
+ { 0xd309, 0x80, 0 },
+ { 0xd30a, 0x01, 0 },
+ { 0xd30b, 0x00, 0 },
+ { 0xd30c, 0xd4, 0 },
+ { 0xd30d, 0x01, 0 },
+ { 0xd30e, 0x20, 0 },
+ { 0xd30f, 0x18, 0 },
+ { 0xd310, 0x18, 0 },
+ { 0xd311, 0x60, 0 },
+ { 0xd312, 0x80, 0 },
+ { 0xd313, 0x06, 0 },
+ { 0xd314, 0x85, 0 },
+ { 0xd315, 0x01, 0 },
+ { 0xd316, 0x00, 0 },
+ { 0xd317, 0x14, 0 },
+ { 0xd318, 0xa8, 0 },
+ { 0xd319, 0x83, 0 },
+ { 0xd31a, 0x38, 0 },
+ { 0xd31b, 0x29, 0 },
+ { 0xd31c, 0xa8, 0 },
+ { 0xd31d, 0xc3, 0 },
+ { 0xd31e, 0x40, 0 },
+ { 0xd31f, 0x08, 0 },
+ { 0xd320, 0x8c, 0 },
+ { 0xd321, 0x84, 0 },
+ { 0xd322, 0x00, 0 },
+ { 0xd323, 0x00, 0 },
+ { 0xd324, 0xa8, 0 },
+ { 0xd325, 0xa3, 0 },
+ { 0xd326, 0x38, 0 },
+ { 0xd327, 0x2a, 0 },
+ { 0xd328, 0xa8, 0 },
+ { 0xd329, 0xe3, 0 },
+ { 0xd32a, 0x40, 0 },
+ { 0xd32b, 0x09, 0 },
+ { 0xd32c, 0xe0, 0 },
+ { 0xd32d, 0x64, 0 },
+ { 0xd32e, 0x40, 0 },
+ { 0xd32f, 0x00, 0 },
+ { 0xd330, 0xd8, 0 },
+ { 0xd331, 0x06, 0 },
+ { 0xd332, 0x18, 0 },
+ { 0xd333, 0x00, 0 },
+ { 0xd334, 0x8c, 0 },
+ { 0xd335, 0x65, 0 },
+ { 0xd336, 0x00, 0 },
+ { 0xd337, 0x00, 0 },
+ { 0xd338, 0x84, 0 },
+ { 0xd339, 0x81, 0 },
+ { 0xd33a, 0x00, 0 },
+ { 0xd33b, 0x18, 0 },
+ { 0xd33c, 0xe3, 0 },
+ { 0xd33d, 0xe3, 0 },
+ { 0xd33e, 0x20, 0 },
+ { 0xd33f, 0x00, 0 },
+ { 0xd340, 0xd8, 0 },
+ { 0xd341, 0x07, 0 },
+ { 0xd342, 0xf8, 0 },
+ { 0xd343, 0x00, 0 },
+ { 0xd344, 0x03, 0 },
+ { 0xd345, 0xff, 0 },
+ { 0xd346, 0xff, 0 },
+ { 0xd347, 0x6f, 0 },
+ { 0xd348, 0x18, 0 },
+ { 0xd349, 0x60, 0 },
+ { 0xd34a, 0x00, 0 },
+ { 0xd34b, 0x01, 0 },
+ { 0xd34c, 0x0f, 0 },
+ { 0xd34d, 0xff, 0 },
+ { 0xd34e, 0xff, 0 },
+ { 0xd34f, 0x9d, 0 },
+ { 0xd350, 0x18, 0 },
+ { 0xd351, 0x60, 0 },
+ { 0xd352, 0x80, 0 },
+ { 0xd353, 0x06, 0 },
+ { 0xd354, 0x00, 0 },
+ { 0xd355, 0x00, 0 },
+ { 0xd356, 0x00, 0 },
+ { 0xd357, 0x11, 0 },
+ { 0xd358, 0xa8, 0 },
+ { 0xd359, 0x83, 0 },
+ { 0xd35a, 0x6e, 0 },
+ { 0xd35b, 0x43, 0 },
+ { 0xd35c, 0xe0, 0 },
+ { 0xd35d, 0x6c, 0 },
+ { 0xd35e, 0x28, 0 },
+ { 0xd35f, 0x02, 0 },
+ { 0xd360, 0xe0, 0 },
+ { 0xd361, 0x84, 0 },
+ { 0xd362, 0x28, 0 },
+ { 0xd363, 0x02, 0 },
+ { 0xd364, 0x07, 0 },
+ { 0xd365, 0xff, 0 },
+ { 0xd366, 0xf8, 0 },
+ { 0xd367, 0x30, 0 },
+ { 0xd368, 0xb8, 0 },
+ { 0xd369, 0x63, 0 },
+ { 0xd36a, 0x00, 0 },
+ { 0xd36b, 0x08, 0 },
+ { 0xd36c, 0x03, 0 },
+ { 0xd36d, 0xff, 0 },
+ { 0xd36e, 0xff, 0 },
+ { 0xd36f, 0xc0, 0 },
+ { 0xd370, 0x85, 0 },
+ { 0xd371, 0x4e, 0 },
+ { 0xd372, 0x00, 0 },
+ { 0xd373, 0x00, 0 },
+ { 0xd374, 0x03, 0 },
+ { 0xd375, 0xff, 0 },
+ { 0xd376, 0xff, 0 },
+ { 0xd377, 0xe7, 0 },
+ { 0xd378, 0xd4, 0 },
+ { 0xd379, 0x01, 0 },
+ { 0xd37a, 0x40, 0 },
+ { 0xd37b, 0x18, 0 },
+ { 0xd37c, 0x9c, 0 },
+ { 0xd37d, 0x60, 0 },
+ { 0xd37e, 0x00, 0 },
+ { 0xd37f, 0x00, 0 },
+ { 0xd380, 0x03, 0 },
+ { 0xd381, 0xff, 0 },
+ { 0xd382, 0xff, 0 },
+ { 0xd383, 0xdb, 0 },
+ { 0xd384, 0xd4, 0 },
+ { 0xd385, 0x01, 0 },
+ { 0xd386, 0x18, 0 },
+ { 0xd387, 0x14, 0 },
+ { 0xd388, 0x03, 0 },
+ { 0xd389, 0xff, 0 },
+ { 0xd38a, 0xff, 0 },
+ { 0xd38b, 0xce, 0 },
+ { 0xd38c, 0x9d, 0 },
+ { 0xd38d, 0x6b, 0 },
+ { 0xd38e, 0x00, 0 },
+ { 0xd38f, 0xff, 0 },
+ { 0xd390, 0x03, 0 },
+ { 0xd391, 0xff, 0 },
+ { 0xd392, 0xff, 0 },
+ { 0xd393, 0xc6, 0 },
+ { 0xd394, 0x9c, 0 },
+ { 0xd395, 0x63, 0 },
+ { 0xd396, 0x00, 0 },
+ { 0xd397, 0xff, 0 },
+ { 0xd398, 0xa8, 0 },
+ { 0xd399, 0xe3, 0 },
+ { 0xd39a, 0x38, 0 },
+ { 0xd39b, 0x0f, 0 },
+ { 0xd39c, 0x8c, 0 },
+ { 0xd39d, 0x84, 0 },
+ { 0xd39e, 0x00, 0 },
+ { 0xd39f, 0x00, 0 },
+ { 0xd3a0, 0xa8, 0 },
+ { 0xd3a1, 0xa3, 0 },
+ { 0xd3a2, 0x38, 0 },
+ { 0xd3a3, 0x0e, 0 },
+ { 0xd3a4, 0xa8, 0 },
+ { 0xd3a5, 0xc3, 0 },
+ { 0xd3a6, 0x6e, 0 },
+ { 0xd3a7, 0x42, 0 },
+ { 0xd3a8, 0xd8, 0 },
+ { 0xd3a9, 0x07, 0 },
+ { 0xd3aa, 0x20, 0 },
+ { 0xd3ab, 0x00, 0 },
+ { 0xd3ac, 0x8c, 0 },
+ { 0xd3ad, 0x66, 0 },
+ { 0xd3ae, 0x00, 0 },
+ { 0xd3af, 0x00, 0 },
+ { 0xd3b0, 0xd8, 0 },
+ { 0xd3b1, 0x05, 0 },
+ { 0xd3b2, 0x18, 0 },
+ { 0xd3b3, 0x00, 0 },
+ { 0xd3b4, 0x85, 0 },
+ { 0xd3b5, 0x21, 0 },
+ { 0xd3b6, 0x00, 0 },
+ { 0xd3b7, 0x00, 0 },
+ { 0xd3b8, 0x85, 0 },
+ { 0xd3b9, 0x41, 0 },
+ { 0xd3ba, 0x00, 0 },
+ { 0xd3bb, 0x04, 0 },
+ { 0xd3bc, 0x85, 0 },
+ { 0xd3bd, 0x81, 0 },
+ { 0xd3be, 0x00, 0 },
+ { 0xd3bf, 0x08, 0 },
+ { 0xd3c0, 0x85, 0 },
+ { 0xd3c1, 0xc1, 0 },
+ { 0xd3c2, 0x00, 0 },
+ { 0xd3c3, 0x0c, 0 },
+ { 0xd3c4, 0x86, 0 },
+ { 0xd3c5, 0x01, 0 },
+ { 0xd3c6, 0x00, 0 },
+ { 0xd3c7, 0x10, 0 },
+ { 0xd3c8, 0x44, 0 },
+ { 0xd3c9, 0x00, 0 },
+ { 0xd3ca, 0x48, 0 },
+ { 0xd3cb, 0x00, 0 },
+ { 0xd3cc, 0x9c, 0 },
+ { 0xd3cd, 0x21, 0 },
+ { 0xd3ce, 0x00, 0 },
+ { 0xd3cf, 0x1c, 0 },
+ { 0xd3d0, 0x9c, 0 },
+ { 0xd3d1, 0x21, 0 },
+ { 0xd3d2, 0xff, 0 },
+ { 0xd3d3, 0xfc, 0 },
+ { 0xd3d4, 0xd4, 0 },
+ { 0xd3d5, 0x01, 0 },
+ { 0xd3d6, 0x48, 0 },
+ { 0xd3d7, 0x00, 0 },
+ { 0xd3d8, 0x18, 0 },
+ { 0xd3d9, 0x60, 0 },
+ { 0xd3da, 0x00, 0 },
+ { 0xd3db, 0x01, 0 },
+ { 0xd3dc, 0xa8, 0 },
+ { 0xd3dd, 0x63, 0 },
+ { 0xd3de, 0x07, 0 },
+ { 0xd3df, 0x80, 0 },
+ { 0xd3e0, 0x8c, 0 },
+ { 0xd3e1, 0x63, 0 },
+ { 0xd3e2, 0x00, 0 },
+ { 0xd3e3, 0x68, 0 },
+ { 0xd3e4, 0xbc, 0 },
+ { 0xd3e5, 0x03, 0 },
+ { 0xd3e6, 0x00, 0 },
+ { 0xd3e7, 0x00, 0 },
+ { 0xd3e8, 0x10, 0 },
+ { 0xd3e9, 0x00, 0 },
+ { 0xd3ea, 0x00, 0 },
+ { 0xd3eb, 0x0c, 0 },
+ { 0xd3ec, 0x15, 0 },
+ { 0xd3ed, 0x00, 0 },
+ { 0xd3ee, 0x00, 0 },
+ { 0xd3ef, 0x00, 0 },
+ { 0xd3f0, 0x07, 0 },
+ { 0xd3f1, 0xff, 0 },
+ { 0xd3f2, 0xd9, 0 },
+ { 0xd3f3, 0x98, 0 },
+ { 0xd3f4, 0x15, 0 },
+ { 0xd3f5, 0x00, 0 },
+ { 0xd3f6, 0x00, 0 },
+ { 0xd3f7, 0x00, 0 },
+ { 0xd3f8, 0x18, 0 },
+ { 0xd3f9, 0x60, 0 },
+ { 0xd3fa, 0x80, 0 },
+ { 0xd3fb, 0x06, 0 },
+ { 0xd3fc, 0xa8, 0 },
+ { 0xd3fd, 0x63, 0 },
+ { 0xd3fe, 0xc4, 0 },
+ { 0xd3ff, 0xb8, 0 },
+ { 0xd400, 0x8c, 0 },
+ { 0xd401, 0x63, 0 },
+ { 0xd402, 0x00, 0 },
+ { 0xd403, 0x00, 0 },
+ { 0xd404, 0xbc, 0 },
+ { 0xd405, 0x23, 0 },
+ { 0xd406, 0x00, 0 },
+ { 0xd407, 0x01, 0 },
+ { 0xd408, 0x10, 0 },
+ { 0xd409, 0x00, 0 },
+ { 0xd40a, 0x00, 0 },
+ { 0xd40b, 0x25, 0 },
+ { 0xd40c, 0x9d, 0 },
+ { 0xd40d, 0x00, 0 },
+ { 0xd40e, 0x00, 0 },
+ { 0xd40f, 0x00, 0 },
+ { 0xd410, 0x00, 0 },
+ { 0xd411, 0x00, 0 },
+ { 0xd412, 0x00, 0 },
+ { 0xd413, 0x0b, 0 },
+ { 0xd414, 0xb8, 0 },
+ { 0xd415, 0xe8, 0 },
+ { 0xd416, 0x00, 0 },
+ { 0xd417, 0x02, 0 },
+ { 0xd418, 0x07, 0 },
+ { 0xd419, 0xff, 0 },
+ { 0xd41a, 0xd6, 0 },
+ { 0xd41b, 0x24, 0 },
+ { 0xd41c, 0x15, 0 },
+ { 0xd41d, 0x00, 0 },
+ { 0xd41e, 0x00, 0 },
+ { 0xd41f, 0x00, 0 },
+ { 0xd420, 0x18, 0 },
+ { 0xd421, 0x60, 0 },
+ { 0xd422, 0x80, 0 },
+ { 0xd423, 0x06, 0 },
+ { 0xd424, 0xa8, 0 },
+ { 0xd425, 0x63, 0 },
+ { 0xd426, 0xc4, 0 },
+ { 0xd427, 0xb8, 0 },
+ { 0xd428, 0x8c, 0 },
+ { 0xd429, 0x63, 0 },
+ { 0xd42a, 0x00, 0 },
+ { 0xd42b, 0x00, 0 },
+ { 0xd42c, 0xbc, 0 },
+ { 0xd42d, 0x23, 0 },
+ { 0xd42e, 0x00, 0 },
+ { 0xd42f, 0x01, 0 },
+ { 0xd430, 0x10, 0 },
+ { 0xd431, 0x00, 0 },
+ { 0xd432, 0x00, 0 },
+ { 0xd433, 0x1b, 0 },
+ { 0xd434, 0x9d, 0 },
+ { 0xd435, 0x00, 0 },
+ { 0xd436, 0x00, 0 },
+ { 0xd437, 0x00, 0 },
+ { 0xd438, 0xb8, 0 },
+ { 0xd439, 0xe8, 0 },
+ { 0xd43a, 0x00, 0 },
+ { 0xd43b, 0x02, 0 },
+ { 0xd43c, 0x9c, 0 },
+ { 0xd43d, 0xc0, 0 },
+ { 0xd43e, 0x00, 0 },
+ { 0xd43f, 0x00, 0 },
+ { 0xd440, 0x18, 0 },
+ { 0xd441, 0xa0, 0 },
+ { 0xd442, 0x80, 0 },
+ { 0xd443, 0x06, 0 },
+ { 0xd444, 0xe0, 0 },
+ { 0xd445, 0x67, 0 },
+ { 0xd446, 0x30, 0 },
+ { 0xd447, 0x00, 0 },
+ { 0xd448, 0xa8, 0 },
+ { 0xd449, 0xa5, 0 },
+ { 0xd44a, 0xce, 0 },
+ { 0xd44b, 0xb0, 0 },
+ { 0xd44c, 0x19, 0 },
+ { 0xd44d, 0x60, 0 },
+ { 0xd44e, 0x00, 0 },
+ { 0xd44f, 0x01, 0 },
+ { 0xd450, 0xa9, 0 },
+ { 0xd451, 0x6b, 0 },
+ { 0xd452, 0x06, 0 },
+ { 0xd453, 0x14, 0 },
+ { 0xd454, 0xe0, 0 },
+ { 0xd455, 0x83, 0 },
+ { 0xd456, 0x28, 0 },
+ { 0xd457, 0x00, 0 },
+ { 0xd458, 0x9c, 0 },
+ { 0xd459, 0xc6, 0 },
+ { 0xd45a, 0x00, 0 },
+ { 0xd45b, 0x01, 0 },
+ { 0xd45c, 0xe0, 0 },
+ { 0xd45d, 0x63, 0 },
+ { 0xd45e, 0x18, 0 },
+ { 0xd45f, 0x00, 0 },
+ { 0xd460, 0x8c, 0 },
+ { 0xd461, 0x84, 0 },
+ { 0xd462, 0x00, 0 },
+ { 0xd463, 0x00, 0 },
+ { 0xd464, 0xe0, 0 },
+ { 0xd465, 0xa3, 0 },
+ { 0xd466, 0x58, 0 },
+ { 0xd467, 0x00, 0 },
+ { 0xd468, 0xa4, 0 },
+ { 0xd469, 0xc6, 0 },
+ { 0xd46a, 0x00, 0 },
+ { 0xd46b, 0xff, 0 },
+ { 0xd46c, 0xb8, 0 },
+ { 0xd46d, 0x64, 0 },
+ { 0xd46e, 0x00, 0 },
+ { 0xd46f, 0x18, 0 },
+ { 0xd470, 0xbc, 0 },
+ { 0xd471, 0x46, 0 },
+ { 0xd472, 0x00, 0 },
+ { 0xd473, 0x03, 0 },
+ { 0xd474, 0x94, 0 },
+ { 0xd475, 0x85, 0 },
+ { 0xd476, 0x00, 0 },
+ { 0xd477, 0x00, 0 },
+ { 0xd478, 0xb8, 0 },
+ { 0xd479, 0x63, 0 },
+ { 0xd47a, 0x00, 0 },
+ { 0xd47b, 0x98, 0 },
+ { 0xd47c, 0xe0, 0 },
+ { 0xd47d, 0x64, 0 },
+ { 0xd47e, 0x18, 0 },
+ { 0xd47f, 0x00, 0 },
+ { 0xd480, 0x0f, 0 },
+ { 0xd481, 0xff, 0 },
+ { 0xd482, 0xff, 0 },
+ { 0xd483, 0xf0, 0 },
+ { 0xd484, 0xdc, 0 },
+ { 0xd485, 0x05, 0 },
+ { 0xd486, 0x18, 0 },
+ { 0xd487, 0x00, 0 },
+ { 0xd488, 0x9c, 0 },
+ { 0xd489, 0x68, 0 },
+ { 0xd48a, 0x00, 0 },
+ { 0xd48b, 0x01, 0 },
+ { 0xd48c, 0xa5, 0 },
+ { 0xd48d, 0x03, 0 },
+ { 0xd48e, 0x00, 0 },
+ { 0xd48f, 0xff, 0 },
+ { 0xd490, 0xbc, 0 },
+ { 0xd491, 0x48, 0 },
+ { 0xd492, 0x00, 0 },
+ { 0xd493, 0x01, 0 },
+ { 0xd494, 0x0f, 0 },
+ { 0xd495, 0xff, 0 },
+ { 0xd496, 0xff, 0 },
+ { 0xd497, 0xea, 0 },
+ { 0xd498, 0xb8, 0 },
+ { 0xd499, 0xe8, 0 },
+ { 0xd49a, 0x00, 0 },
+ { 0xd49b, 0x02, 0 },
+ { 0xd49c, 0x18, 0 },
+ { 0xd49d, 0x60, 0 },
+ { 0xd49e, 0x00, 0 },
+ { 0xd49f, 0x01, 0 },
+ { 0xd4a0, 0xa8, 0 },
+ { 0xd4a1, 0x63, 0 },
+ { 0xd4a2, 0x06, 0 },
+ { 0xd4a3, 0x14, 0 },
+ { 0xd4a4, 0x07, 0 },
+ { 0xd4a5, 0xff, 0 },
+ { 0xd4a6, 0xe4, 0 },
+ { 0xd4a7, 0x05, 0 },
+ { 0xd4a8, 0x9c, 0 },
+ { 0xd4a9, 0x83, 0 },
+ { 0xd4aa, 0x00, 0 },
+ { 0xd4ab, 0x10, 0 },
+ { 0xd4ac, 0x85, 0 },
+ { 0xd4ad, 0x21, 0 },
+ { 0xd4ae, 0x00, 0 },
+ { 0xd4af, 0x00, 0 },
+ { 0xd4b0, 0x44, 0 },
+ { 0xd4b1, 0x00, 0 },
+ { 0xd4b2, 0x48, 0 },
+ { 0xd4b3, 0x00, 0 },
+ { 0xd4b4, 0x9c, 0 },
+ { 0xd4b5, 0x21, 0 },
+ { 0xd4b6, 0x00, 0 },
+ { 0xd4b7, 0x04, 0 },
+ { 0xd4b8, 0x18, 0 },
+ { 0xd4b9, 0x60, 0 },
+ { 0xd4ba, 0x00, 0 },
+ { 0xd4bb, 0x01, 0 },
+ { 0xd4bc, 0x9c, 0 },
+ { 0xd4bd, 0x80, 0 },
+ { 0xd4be, 0xff, 0 },
+ { 0xd4bf, 0xff, 0 },
+ { 0xd4c0, 0xa8, 0 },
+ { 0xd4c1, 0x63, 0 },
+ { 0xd4c2, 0x09, 0 },
+ { 0xd4c3, 0xef, 0 },
+ { 0xd4c4, 0xd8, 0 },
+ { 0xd4c5, 0x03, 0 },
+ { 0xd4c6, 0x20, 0 },
+ { 0xd4c7, 0x00, 0 },
+ { 0xd4c8, 0x18, 0 },
+ { 0xd4c9, 0x60, 0 },
+ { 0xd4ca, 0x80, 0 },
+ { 0xd4cb, 0x06, 0 },
+ { 0xd4cc, 0xa8, 0 },
+ { 0xd4cd, 0x63, 0 },
+ { 0xd4ce, 0xc9, 0 },
+ { 0xd4cf, 0xef, 0 },
+ { 0xd4d0, 0xd8, 0 },
+ { 0xd4d1, 0x03, 0 },
+ { 0xd4d2, 0x20, 0 },
+ { 0xd4d3, 0x00, 0 },
+ { 0xd4d4, 0x44, 0 },
+ { 0xd4d5, 0x00, 0 },
+ { 0xd4d6, 0x48, 0 },
+ { 0xd4d7, 0x00, 0 },
+ { 0xd4d8, 0x15, 0 },
+ { 0xd4d9, 0x00, 0 },
+ { 0xd4da, 0x00, 0 },
+ { 0xd4db, 0x00, 0 },
+ { 0xd4dc, 0x18, 0 },
+ { 0xd4dd, 0x80, 0 },
+ { 0xd4de, 0x00, 0 },
+ { 0xd4df, 0x01, 0 },
+ { 0xd4e0, 0xa8, 0 },
+ { 0xd4e1, 0x84, 0 },
+ { 0xd4e2, 0x0a, 0 },
+ { 0xd4e3, 0x12, 0 },
+ { 0xd4e4, 0x8c, 0 },
+ { 0xd4e5, 0x64, 0 },
+ { 0xd4e6, 0x00, 0 },
+ { 0xd4e7, 0x00, 0 },
+ { 0xd4e8, 0xbc, 0 },
+ { 0xd4e9, 0x03, 0 },
+ { 0xd4ea, 0x00, 0 },
+ { 0xd4eb, 0x00, 0 },
+ { 0xd4ec, 0x13, 0 },
+ { 0xd4ed, 0xff, 0 },
+ { 0xd4ee, 0xff, 0 },
+ { 0xd4ef, 0xfe, 0 },
+ { 0xd4f0, 0x15, 0 },
+ { 0xd4f1, 0x00, 0 },
+ { 0xd4f2, 0x00, 0 },
+ { 0xd4f3, 0x00, 0 },
+ { 0xd4f4, 0x44, 0 },
+ { 0xd4f5, 0x00, 0 },
+ { 0xd4f6, 0x48, 0 },
+ { 0xd4f7, 0x00, 0 },
+ { 0xd4f8, 0x15, 0 },
+ { 0xd4f9, 0x00, 0 },
+ { 0xd4fa, 0x00, 0 },
+ { 0xd4fb, 0x00, 0 },
+ { 0xd4fc, 0x00, 0 },
+ { 0xd4fd, 0x00, 0 },
+ { 0xd4fe, 0x00, 0 },
+ { 0xd4ff, 0x00, 0 },
+ { 0xd500, 0x00, 0 },
+ { 0xd501, 0x00, 0 },
+ { 0xd502, 0x00, 0 },
+ { 0xd503, 0x00, 0 },
+ { 0x6f0e, 0x33, 0 },
+ { 0x6f0f, 0x33, 0 },
+ { 0x460e, 0x08, 0 },
+ { 0x460f, 0x01, 0 },
+ { 0x4610, 0x00, 0 },
+ { 0x4611, 0x01, 0 },
+ { 0x4612, 0x00, 0 },
+ { 0x4613, 0x01, 0 },
+ { 0x4605, 0x08, 0 },
+ { 0x4608, 0x00, 0 },
+ { 0x4609, 0x08, 0 },
+ { 0x6804, 0x00, 0 },
+ { 0x6805, 0x06, 0 },
+ { 0x6806, 0x00, 0 },
+ { 0x5120, 0x00, 0 },
+ { 0x3510, 0x00, 0 },
+ { 0x3504, 0x00, 0 },
+ { 0x6800, 0x00, 0 },
+ { 0x6f0d, 0x0f, 0 },
+ { 0x5000, 0xff, 0 },
+ { 0x5001, 0xbf, 0 },
+ { 0x5002, 0x7e, 0 },
+ { 0x5003, 0x0c, 0 },
+ { 0x503d, 0x00, 0 },
+ { 0xc450, 0x01, 0 },
+ { 0xc452, 0x04, 0 },
+ { 0xc453, 0x00, 0 },
+ { 0xc454, 0x00, 0 },
+ { 0xc455, 0x00, 0 },
+ { 0xc456, 0x00, 0 },
+ { 0xc457, 0x00, 0 },
+ { 0xc458, 0x00, 0 },
+ { 0xc459, 0x00, 0 },
+ { 0xc45b, 0x00, 0 },
+ { 0xc45c, 0x00, 0 },
+ { 0xc45d, 0x00, 0 },
+ { 0xc45e, 0x00, 0 },
+ { 0xc45f, 0x00, 0 },
+ { 0xc460, 0x00, 0 },
+ { 0xc461, 0x01, 0 },
+ { 0xc462, 0x01, 0 },
+ { 0xc464, 0x88, 0 },
+ { 0xc465, 0x00, 0 },
+ { 0xc466, 0x8a, 0 },
+ { 0xc467, 0x00, 0 },
+ { 0xc468, 0x86, 0 },
+ { 0xc469, 0x00, 0 },
+ { 0xc46a, 0x40, 0 },
+ { 0xc46b, 0x50, 0 },
+ { 0xc46c, 0x30, 0 },
+ { 0xc46d, 0x28, 0 },
+ { 0xc46e, 0x60, 0 },
+ { 0xc46f, 0x40, 0 },
+ { 0xc47c, 0x01, 0 },
+ { 0xc47d, 0x38, 0 },
+ { 0xc47e, 0x00, 0 },
+ { 0xc47f, 0x00, 0 },
+ { 0xc480, 0x00, 0 },
+ { 0xc481, 0xff, 0 },
+ { 0xc482, 0x00, 0 },
+ { 0xc483, 0x40, 0 },
+ { 0xc484, 0x00, 0 },
+ { 0xc485, 0x18, 0 },
+ { 0xc486, 0x00, 0 },
+ { 0xc487, 0x18, 0 },
+ { 0xc488, 0x34, 0 },
+ { 0xc489, 0x00, 0 },
+ { 0xc48a, 0x34, 0 },
+ { 0xc48b, 0x00, 0 },
+ { 0xc48c, 0x00, 0 },
+ { 0xc48d, 0x04, 0 },
+ { 0xc48e, 0x00, 0 },
+ { 0xc48f, 0x04, 0 },
+ { 0xc490, 0x07, 0 },
+ { 0xc492, 0x20, 0 },
+ { 0xc493, 0x08, 0 },
+ { 0xc498, 0x02, 0 },
+ { 0xc499, 0x00, 0 },
+ { 0xc49a, 0x02, 0 },
+ { 0xc49b, 0x00, 0 },
+ { 0xc49c, 0x02, 0 },
+ { 0xc49d, 0x00, 0 },
+ { 0xc49e, 0x02, 0 },
+ { 0xc49f, 0x60, 0 },
+ { 0xc4a0, 0x03, 0 },
+ { 0xc4a1, 0x00, 0 },
+ { 0xc4a2, 0x04, 0 },
+ { 0xc4a3, 0x00, 0 },
+ { 0xc4a4, 0x00, 0 },
+ { 0xc4a5, 0x10, 0 },
+ { 0xc4a6, 0x00, 0 },
+ { 0xc4a7, 0x40, 0 },
+ { 0xc4a8, 0x00, 0 },
+ { 0xc4a9, 0x80, 0 },
+ { 0xc4aa, 0x0d, 0 },
+ { 0xc4ab, 0x00, 0 },
+ { 0xc4ac, 0x0f, 0 },
+ { 0xc4ad, 0xc0, 0 },
+ { 0xc4b4, 0x01, 0 },
+ { 0xc4b5, 0x01, 0 },
+ { 0xc4b6, 0x00, 0 },
+ { 0xc4b7, 0x01, 0 },
+ { 0xc4b8, 0x00, 0 },
+ { 0xc4b9, 0x01, 0 },
+ { 0xc4ba, 0x01, 0 },
+ { 0xc4bb, 0x00, 0 },
+ { 0xc4bc, 0x01, 0 },
+ { 0xc4bd, 0x60, 0 },
+ { 0xc4be, 0x02, 0 },
+ { 0xc4bf, 0x33, 0 },
+ { 0xc4c8, 0x03, 0 },
+ { 0xc4c9, 0xd0, 0 },
+ { 0xc4ca, 0x0e, 0 },
+ { 0xc4cb, 0x00, 0 },
+ { 0xc4cc, 0x10, 0 },
+ { 0xc4cd, 0x18, 0 },
+ { 0xc4ce, 0x10, 0 },
+ { 0xc4cf, 0x18, 0 },
+ { 0xc4d0, 0x04, 0 },
+ { 0xc4d1, 0x80, 0 },
+ { 0xc4e0, 0x04, 0 },
+ { 0xc4e1, 0x02, 0 },
+ { 0xc4e2, 0x01, 0 },
+ { 0xc4e4, 0x10, 0 },
+ { 0xc4e5, 0x20, 0 },
+ { 0xc4e6, 0x30, 0 },
+ { 0xc4e7, 0x40, 0 },
+ { 0xc4e8, 0x50, 0 },
+ { 0xc4e9, 0x60, 0 },
+ { 0xc4ea, 0x70, 0 },
+ { 0xc4eb, 0x80, 0 },
+ { 0xc4ec, 0x90, 0 },
+ { 0xc4ed, 0xa0, 0 },
+ { 0xc4ee, 0xb0, 0 },
+ { 0xc4ef, 0xc0, 0 },
+ { 0xc4f0, 0xd0, 0 },
+ { 0xc4f1, 0xe0, 0 },
+ { 0xc4f2, 0xf0, 0 },
+ { 0xc4f3, 0x80, 0 },
+ { 0xc4f4, 0x00, 0 },
+ { 0xc4f5, 0x20, 0 },
+ { 0xc4f6, 0x02, 0 },
+ { 0xc4f7, 0x00, 0 },
+ { 0xc4f8, 0x04, 0 },
+ { 0xc4f9, 0x0b, 0 },
+ { 0xc4fa, 0x00, 0 },
+ { 0xc4fb, 0x00, 0 },
+ { 0xc4fc, 0x01, 0 },
+ { 0xc4fd, 0x00, 0 },
+ { 0xc4fe, 0x04, 0 },
+ { 0xc4ff, 0x02, 0 },
+ { 0xc500, 0x48, 0 },
+ { 0xc501, 0x74, 0 },
+ { 0xc502, 0x58, 0 },
+ { 0xc503, 0x80, 0 },
+ { 0xc504, 0x05, 0 },
+ { 0xc505, 0x80, 0 },
+ { 0xc506, 0x03, 0 },
+ { 0xc507, 0x80, 0 },
+ { 0xc508, 0x01, 0 },
+ { 0xc509, 0xc0, 0 },
+ { 0xc50a, 0x01, 0 },
+ { 0xc50b, 0xa0, 0 },
+ { 0xc50c, 0x01, 0 },
+ { 0xc50d, 0x2c, 0 },
+ { 0xc50e, 0x01, 0 },
+ { 0xc50f, 0x0a, 0 },
+ { 0xc510, 0x00, 0 },
+ { 0xc511, 0x01, 0 },
+ { 0xc512, 0x01, 0 },
+ { 0xc513, 0x80, 0 },
+ { 0xc514, 0x04, 0 },
+ { 0xc515, 0x00, 0 },
+ { 0xc518, 0x03, 0 },
+ { 0xc519, 0x48, 0 },
+ { 0xc51a, 0x07, 0 },
+ { 0xc51b, 0x70, 0 },
+ { 0xc2e0, 0x00, 0 },
+ { 0xc2e1, 0x51, 0 },
+ { 0xc2e2, 0x00, 0 },
+ { 0xc2e3, 0xd6, 0 },
+ { 0xc2e4, 0x01, 0 },
+ { 0xc2e5, 0x5e, 0 },
+ { 0xc2e9, 0x01, 0 },
+ { 0xc2ea, 0x7a, 0 },
+ { 0xc2eb, 0x90, 0 },
+ { 0xc2ed, 0x00, 0 },
+ { 0xc2ee, 0x7a, 0 },
+ { 0xc2ef, 0x64, 0 },
+ { 0xc308, 0x00, 0 },
+ { 0xc309, 0x00, 0 },
+ { 0xc30a, 0x00, 0 },
+ { 0xc30c, 0x00, 0 },
+ { 0xc30d, 0x01, 0 },
+ { 0xc30e, 0x00, 0 },
+ { 0xc30f, 0x00, 0 },
+ { 0xc310, 0x01, 0 },
+ { 0xc311, 0x60, 0 },
+ { 0xc312, 0xff, 0 },
+ { 0xc313, 0x08, 0 },
+ { 0xc314, 0x01, 0 },
+ { 0xc315, 0x7f, 0 },
+ { 0xc316, 0xff, 0 },
+ { 0xc317, 0x0b, 0 },
+ { 0xc318, 0x00, 0 },
+ { 0xc319, 0x0c, 0 },
+ { 0xc31a, 0x00, 0 },
+ { 0xc31b, 0xe0, 0 },
+ { 0xc31c, 0x00, 0 },
+ { 0xc31d, 0x14, 0 },
+ { 0xc31e, 0x00, 0 },
+ { 0xc31f, 0xc5, 0 },
+ { 0xc320, 0xff, 0 },
+ { 0xc321, 0x4b, 0 },
+ { 0xc322, 0xff, 0 },
+ { 0xc323, 0xf0, 0 },
+ { 0xc324, 0xff, 0 },
+ { 0xc325, 0xe8, 0 },
+ { 0xc326, 0x00, 0 },
+ { 0xc327, 0x46, 0 },
+ { 0xc328, 0xff, 0 },
+ { 0xc329, 0xd2, 0 },
+ { 0xc32a, 0xff, 0 },
+ { 0xc32b, 0xe4, 0 },
+ { 0xc32c, 0xff, 0 },
+ { 0xc32d, 0xbb, 0 },
+ { 0xc32e, 0x00, 0 },
+ { 0xc32f, 0x61, 0 },
+ { 0xc330, 0xff, 0 },
+ { 0xc331, 0xf9, 0 },
+ { 0xc332, 0x00, 0 },
+ { 0xc333, 0xd9, 0 },
+ { 0xc334, 0x00, 0 },
+ { 0xc335, 0x2e, 0 },
+ { 0xc336, 0x00, 0 },
+ { 0xc337, 0xb1, 0 },
+ { 0xc338, 0xff, 0 },
+ { 0xc339, 0x64, 0 },
+ { 0xc33a, 0xff, 0 },
+ { 0xc33b, 0xeb, 0 },
+ { 0xc33c, 0xff, 0 },
+ { 0xc33d, 0xe8, 0 },
+ { 0xc33e, 0x00, 0 },
+ { 0xc33f, 0x48, 0 },
+ { 0xc340, 0xff, 0 },
+ { 0xc341, 0xd0, 0 },
+ { 0xc342, 0xff, 0 },
+ { 0xc343, 0xed, 0 },
+ { 0xc344, 0xff, 0 },
+ { 0xc345, 0xad, 0 },
+ { 0xc346, 0x00, 0 },
+ { 0xc347, 0x66, 0 },
+ { 0xc348, 0x01, 0 },
+ { 0xc349, 0x00, 0 },
+ { 0x6700, 0x04, 0 },
+ { 0x6701, 0x7b, 0 },
+ { 0x6702, 0xfd, 0 },
+ { 0x6703, 0xf9, 0 },
+ { 0x6704, 0x3d, 0 },
+ { 0x6705, 0x71, 0 },
+ { 0x6706, 0x78, 0 },
+ { 0x6708, 0x05, 0 },
+ { 0x6f06, 0x6f, 0 },
+ { 0x6f07, 0x00, 0 },
+ { 0x6f0a, 0x6f, 0 },
+ { 0x6f0b, 0x00, 0 },
+ { 0x6f00, 0x03, 0 },
+ { 0xc34c, 0x01, 0 },
+ { 0xc34d, 0x00, 0 },
+ { 0xc34e, 0x46, 0 },
+ { 0xc34f, 0x55, 0 },
+ { 0xc350, 0x00, 0 },
+ { 0xc351, 0x40, 0 },
+ { 0xc352, 0x00, 0 },
+ { 0xc353, 0xff, 0 },
+ { 0xc354, 0x04, 0 },
+ { 0xc355, 0x08, 0 },
+ { 0xc356, 0x01, 0 },
+ { 0xc357, 0xef, 0 },
+ { 0xc358, 0x30, 0 },
+ { 0xc359, 0x01, 0 },
+ { 0xc35a, 0x64, 0 },
+ { 0xc35b, 0x46, 0 },
+ { 0xc35c, 0x00, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x3042, 0xf0, 0 },
+ { 0x301b, 0xf0, 0 },
+ { 0x301c, 0xf0, 0 },
+ { 0x301a, 0xf0, 0 },
+ { 0xceb0, 0x00, 0 },
+ { 0xceb1, 0x00, 0 },
+ { 0xceb2, 0x00, 0 },
+ { 0xceb3, 0x00, 0 },
+ { 0xceb4, 0x00, 0 },
+ { 0xceb5, 0x00, 0 },
+ { 0xceb6, 0x00, 0 },
+ { 0xceb7, 0x00, 0 },
+ { 0xc4bc, 0x01, 0 },
+ { 0xc4bd, 0x60, 0 },
+
+ { 0x4709, 0x10, 0 },/* dvp swap */
+ { 0x4300, 0x3a, 0 },/* YUV order UYVY */
+ { 0x3832, 0x01, 0 },/* fsin */
+ { 0x3833, 0x1A, 0 },
+ { 0x3834, 0x03, 0 },
+ { 0x3835, 0x48, 0 },
+ { 0x302E, 0x01, 0 },
+};
+
+struct ov10635_mode_info {
+ enum ov10635_mode mode;
+ u32 width;
+ u32 height;
+ struct reg_value *init_data_ptr;
+ u32 init_data_size;
+};
+
+static struct reg_value ov10635_setting_30fps_WXGA_1280_800[] = {
+ { 0x3024, 0x01, 0 },
+ { 0x3003, 0x20, 0 },
+ { 0x3004, 0x21, 0 },
+ { 0x3005, 0x20, 0 },
+ { 0x3006, 0x91, 0 },
+ /* 1280x800 */
+ { 0x3808, 0x05, 0 },
+ { 0x3809, 0x00, 0 },
+ { 0x380a, 0x03, 0 },
+ { 0x380b, 0x20, 0 },
+};
+
+static struct reg_value ov10635_setting_30fps_720P_1280_720[] = {
+ { 0x3024, 0x01, 0 },
+ { 0x3003, 0x20, 0 },
+ { 0x3004, 0x21, 0 },
+ { 0x3005, 0x20, 0 },
+ { 0x3006, 0x91, 0 },
+ /* 1280x720 */
+ { 0x3808, 0x05, 0 },
+ { 0x3809, 0x00, 0 },
+ { 0x380a, 0x02, 0 },
+ { 0x380b, 0xD0, 0 },
+};
+
+static struct reg_value ov10635_setting_30fps_WVGA_752_480[] = {
+ { 0x3024, 0x01, 0 },
+ { 0x3003, 0x20, 0 },
+ { 0x3004, 0x21, 0 },
+ { 0x3005, 0x20, 0 },
+ { 0x3006, 0x91, 0 },
+ /* 752x480 */
+ { 0x3808, 0x02, 0 },
+ { 0x3809, 0xF0, 0 },
+ { 0x380a, 0x01, 0 },
+ { 0x380b, 0xE0, 0 },
+};
+
+static struct reg_value ov10635_setting_30fps_VGA_640_480[] = {
+ { 0x3024, 0x01, 0 },
+ { 0x3003, 0x20, 0 },
+ { 0x3004, 0x21, 0 },
+ { 0x3005, 0x20, 0 },
+ { 0x3006, 0x91, 0 },
+ /* 640x480 */
+ { 0x3808, 0x02, 0 },
+ { 0x3809, 0x80, 0 },
+ { 0x380a, 0x01, 0 },
+ { 0x380b, 0xE0, 0 },
+};
+
+static struct reg_value ov10635_setting_30fps_CIF_352_288[] = {
+ { 0x3024, 0x01, 0 },
+ { 0x3003, 0x20, 0 },
+ { 0x3004, 0x21, 0 },
+ { 0x3005, 0x20, 0 },
+ { 0x3006, 0x91, 0 },
+ /* 352x288 */
+ { 0x3808, 0x01, 0 },
+ { 0x3809, 0x60, 0 },
+ { 0x380a, 0x01, 0 },
+ { 0x380b, 0x20, 0 },
+};
+
+static struct reg_value ov10635_setting_30fps_QVGA_320_240[] = {
+ { 0x3024, 0x01, 0 },
+ { 0x3003, 0x20, 0 },
+ { 0x3004, 0x21, 0 },
+ { 0x3005, 0x20, 0 },
+ { 0x3006, 0x91, 0 },
+ /* 320x240 */
+ { 0x3808, 0x01, 0 },
+ { 0x3809, 0x40, 0 },
+ { 0x380a, 0x00, 0 },
+ { 0x380b, 0xF0, 0 },
+};
+
+static struct ov10635_mode_info ov10635_mode_info_data[2][ov10635_mode_MAX + 1] = {
+ /* 15fps not support */
+ {
+ { ov10635_mode_WXGA_1280_800, 0, 0, NULL, 0 },
+ { ov10635_mode_720P_1280_720, 0, 0, NULL, 0 },
+ { ov10635_mode_WVGA_752_480, 0, 0, NULL, 0 },
+ { ov10635_mode_VGA_640_480, 0, 0, NULL, 0 },
+ { ov10635_mode_CIF_352_288, 0, 0, NULL, 0},
+ { ov10635_mode_QVGA_320_240, 0, 0, NULL, 0},
+ },
+ /* 30fps */
+ {
+ { ov10635_mode_WXGA_1280_800, 1280, 800,
+ ov10635_setting_30fps_WXGA_1280_800,
+ ARRAY_SIZE(ov10635_setting_30fps_WXGA_1280_800)
+ },
+ { ov10635_mode_720P_1280_720, 1280, 720,
+ ov10635_setting_30fps_720P_1280_720,
+ ARRAY_SIZE(ov10635_setting_30fps_720P_1280_720)
+ },
+ { ov10635_mode_WVGA_752_480, 752, 480,
+ ov10635_setting_30fps_WVGA_752_480,
+ ARRAY_SIZE(ov10635_setting_30fps_WVGA_752_480)
+ },
+ { ov10635_mode_VGA_640_480, 640, 480,
+ ov10635_setting_30fps_VGA_640_480,
+ ARRAY_SIZE(ov10635_setting_30fps_VGA_640_480)
+ },
+ { ov10635_mode_CIF_352_288, 352, 288,
+ ov10635_setting_30fps_CIF_352_288,
+ ARRAY_SIZE(ov10635_setting_30fps_CIF_352_288)
+ },
+ { ov10635_mode_QVGA_320_240, 320, 240,
+ ov10635_setting_30fps_QVGA_320_240,
+ ARRAY_SIZE(ov10635_setting_30fps_QVGA_320_240)
+ },
+ }
+};
+
+static inline struct sensor_data *subdev_to_sensor_data(struct v4l2_subdev *sd)
+{
+ return container_of(sd, struct sensor_data, subdev);
+}
+
+static enum ov10635_frame_rate to_ov10635_frame_rate(struct v4l2_fract *timeperframe)
+{
+ enum ov10635_frame_rate rate;
+ u32 tgt_fps; /* target frames per secound */
+
+ tgt_fps = timeperframe->denominator / timeperframe->numerator;
+
+ if (tgt_fps == 30)
+ rate = OV10635_30_FPS;
+ else if (tgt_fps == 15)
+ rate = OV10635_15_FPS;
+ else
+ rate = -EINVAL;
+
+ return rate;
+}
+
+static inline int ov10635_read_reg(struct sensor_data *max9286_data, int index,
+ unsigned short reg, unsigned char *val)
+{
+ struct i2c_client *client = max9286_data->i2c_client;
+ struct device *dev = &client->dev;
+ unsigned char u8_buf[2] = { 0 };
+ unsigned int buf_len = 2;
+ int retry, timeout = 10;
+ unsigned char u8_val = 0;
+
+ u8_buf[0] = (reg >> 8) & 0xFF;
+ u8_buf[1] = reg & 0xFF;
+
+ client->addr = ADDR_OV_SENSOR + index;
+
+ for (retry = 0; retry < timeout; retry++) {
+ if (i2c_master_send(client, u8_buf, buf_len) < 0) {
+ dev_dbg(dev, "%s:read reg error on send: reg=0x%x, retry = %d.\n", __func__, reg, retry);
+ msleep(5);
+ continue;
+ }
+ if (i2c_master_recv(client, &u8_val, 1) != 1) {
+ dev_dbg(dev, "%s:read reg error on recv: reg=0x%x, retry = %d.\n", __func__, reg, retry);
+ msleep(5);
+ continue;
+ }
+ break;
+ }
+
+ if (retry >= timeout) {
+ dev_info(dev, "%s:read reg error: reg=0x%x.\n", __func__, reg);
+ return -1;
+ }
+
+ *val = u8_val;
+
+ return u8_val;
+}
+
+static inline int ov10635_write_reg(struct sensor_data *max9286_data, int index,
+ unsigned short reg, unsigned char val)
+{
+ struct i2c_client *client = max9286_data->i2c_client;
+ struct device *dev = &client->dev;
+ unsigned char u8_buf[3] = { 0 };
+ unsigned int buf_len = 3;
+ int retry, timeout = 10;
+
+ u8_buf[0] = (reg >> 8) & 0xFF;
+ u8_buf[1] = reg & 0xFF;
+ u8_buf[2] = val;
+
+ client->addr = ADDR_OV_SENSOR + index;
+ for (retry = 0; retry < timeout; retry++) {
+ if (i2c_master_send(client, u8_buf, buf_len) < 0) {
+ dev_dbg(dev, "%s:write reg error: reg=0x%x, val=0x%x, retry = %d.\n", __func__, reg, val, retry);
+ msleep(5);
+ continue;
+ }
+ break;
+ }
+
+ if (retry >= timeout) {
+ dev_info(dev, "%s:write reg error: reg=0x%x, val=0x%x.\n",
+ __func__, reg, val);
+ return -1;
+ }
+
+ return 0;
+}
+
+static int ov10635_check_device(struct sensor_data *max9286_data, int index)
+{
+ struct i2c_client *client = max9286_data->i2c_client;
+ struct device *dev = &client->dev;
+ unsigned char reg = 0;
+
+ ov10635_read_reg(max9286_data, index, OV10635_REG_PID, &reg);
+ if (reg != 0xA6) {
+ dev_err(dev, "%s: OV10635 hasn't been found, reg[0x%x] = 0x%x., index=%d\n",
+ __func__, OV10635_REG_PID, reg, index);
+ return -1;
+ }
+ ov10635_read_reg(max9286_data, index, OV10635_REG_VER, &reg);
+ if (reg != 0x35) {
+ dev_err(dev, "%s: OV10635 hasn't been found, reg[0x%x] = 0x%x.\n",
+ __func__, OV10635_REG_VER, reg);
+ return -1;
+ }
+ dev_info(dev, "%s: OV10635 index=%d was found.\n", __func__, index);
+
+ return 0;
+}
+
+static int ov10635_download_firmware(struct sensor_data *max9286_data, int index,
+ struct reg_value *reg_setting, s32 arysize)
+{
+ register u32 delay_ms = 0;
+ register u16 reg_addr = 0;
+ register u8 val = 0;
+ int i, retval = 0;
+
+ for (i = 0; i < arysize; ++i, ++reg_setting) {
+ delay_ms = reg_setting->delay_ms;
+ reg_addr = reg_setting->reg_addr;
+ val = reg_setting->val;
+
+ retval = ov10635_write_reg(max9286_data, index, reg_addr, val);
+ if (retval < 0)
+ goto err;
+
+ if (delay_ms)
+ msleep(delay_ms);
+ }
+err:
+ return retval;
+}
+
+static int ov10635_initialize(struct sensor_data *max9286_data, int index)
+{
+ struct device *dev = &max9286_data->i2c_client->dev; int i, array_size;
+ int retval;
+
+ dev_info(dev, "%s: index = %d.\n", __func__, index);
+ array_size = ARRAY_SIZE(ov10635_init_data);
+ for (i = 0; i < array_size; i++) {
+ retval = ov10635_write_reg(max9286_data, index,
+ ov10635_init_data[i].reg_addr,
+ ov10635_init_data[i].val);
+ if (retval < 0)
+ break;
+ if (ov10635_init_data[i].delay_ms != 0)
+ msleep(ov10635_init_data[i].delay_ms);
+ }
+
+ return 0;
+}
+
+static inline int max9271_read_reg(struct sensor_data *max9286_data, int index, u8 reg)
+{
+ struct device *dev = &max9286_data->i2c_client->dev;
+ int val;
+ int retry, timeout = 10;
+
+ max9286_data->i2c_client->addr = ADDR_MAX9271 + index;
+ for (retry = 0; retry < timeout; retry++) {
+ val = i2c_smbus_read_byte_data(max9286_data->i2c_client, reg);
+ if (val < 0)
+ msleep(5);
+ else
+ break;
+ }
+
+ if (retry >= timeout) {
+ dev_info(dev, "%s:read reg error: reg=%2x\n", __func__, reg);
+ return -1;
+ }
+
+ return val;
+}
+
+static int max9271_write_reg(struct sensor_data *max9286_data, int index, u8 reg, u8 val)
+{
+ struct i2c_client *client = max9286_data->i2c_client;
+ struct device *dev = &client->dev;
+ s32 ret;
+ int retry, timeout = 10;
+
+ max9286_data->i2c_client->addr = ADDR_MAX9271 + index;
+ for (retry = 0; retry < timeout; retry++) {
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+ if (val < 0)
+ msleep(5);
+ else
+ break;
+ }
+ dev_dbg(dev, "%s: addr %02x reg %02x val %02x\n", __func__, client->addr, reg, val);
+
+ if (retry >= timeout) {
+ dev_info(dev, "%s:write reg error:reg=%2x,val=%2x\n", __func__, reg, val);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Read one register from a MAX9286 i2c slave device.
+ *
+ * @param *reg: register in the device we wish to access.
+ *
+ * @return 0 if success, an error code otherwise.
+ */
+static inline int max9286_read_reg(struct sensor_data *max9286_data, u8 reg)
+{
+ int val;
+
+ max9286_data->i2c_client->addr = ADDR_MAX9286;
+ val = i2c_smbus_read_byte_data(max9286_data->i2c_client, reg);
+ if (val < 0) {
+ dev_info(&max9286_data->i2c_client->dev,
+ "%s:read reg error: reg=%2x\n", __func__, reg);
+ return -1;
+ }
+ return val;
+}
+
+/* Write one register of a MAX9286 i2c slave device.
+ *
+ * @param *reg: register in the device we wish to access.
+ *
+ * @return 0 if success, an error code otherwise.
+ */
+static int max9286_write_reg(struct sensor_data *max9286_data, u8 reg, u8 val)
+{
+ struct i2c_client *client = max9286_data->i2c_client;
+ struct device *dev = &client->dev;
+ s32 ret;
+
+ client->addr = ADDR_MAX9286;
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+
+ dev_dbg(dev, "addr %02x reg %02x val %02x\n", client->addr, reg, val);
+
+ if (ret < 0) {
+ dev_info(dev, "write reg error:reg=%2x,val=%2x\n", reg, val);
+ return -1;
+ }
+ return 0;
+}
+
+#ifdef debug
+static void max9271_dump_registers(struct sensor_data *max9286_data, int index)
+{
+ unsigned char i;
+
+ pr_info("max9271_dump_registers: index = %d.\r\n", index);
+ for (i = 0; i < 0x20; i++)
+ pr_info("MAX9271 Reg 0x%02x = 0x%x.\r\n",
+ i, max9271_read_reg(max9286_data, index, i));
+}
+
+static void max9286_dump_registers(struct sensor_data *max9286_data)
+{
+ unsigned char i;
+
+ pr_info("Dump MAX9286 registers:\r\n");
+ for (i = 0; i < 0x72; i++)
+ pr_info("MAX9286 Reg 0x%02x = 0x%x.\r\n",
+ i, max9286_read_reg(max9286_data, i));
+}
+#else
+static void max9271_dump_registers(struct sensor_data *max9286_data, int index)
+{
+}
+#endif
+
+static void max9286_hw_reset(struct sensor_data *max9286_data)
+{
+ gpiod_set_value_cansleep(max9286_data->pwn_gpio, 0);
+ udelay(200);
+ gpiod_set_value_cansleep(max9286_data->pwn_gpio, 1);
+ msleep(1);
+}
+
+static int max9286_hardware_preinit(struct sensor_data *max9286_data)
+{
+ u8 reg;
+
+ dev_info(&max9286_data->i2c_client->dev, "In %s()\n", __func__);
+
+ /* Disable CSI Output */
+ max9286_write_reg(max9286_data, 0x15, 0x03);
+
+ /* Enable PRBS test */
+ max9286_write_reg(max9286_data, 0x0E, 0x5F);
+ msleep(10);
+
+ /* Enable Custom Reverse Channel & First Pulse Length STEP 1 */
+ max9286_write_reg(max9286_data, 0x3F, 0x4F);
+ msleep(2); /* STEP 2 */
+
+ /* Reverse Channel Amplitude to mid level and transition time */
+ max9286_write_reg(max9286_data, 0x3B, 0x1E); /* STEP 3 */
+ msleep(2); /* STEP 4 */
+
+ /* Enable MAX9271 Configuration Link */
+ max9271_write_reg(max9286_data, 0, 0x04, 0x43); /* STEP 5 */
+ msleep(2); /* STEP 6 */
+
+ /* Increase serializer reverse channel input thresholds */
+ max9271_write_reg(max9286_data, 0, 0x08, 0x01); /* STEP 7 */
+ msleep(2); /* STEP 8 */
+
+ /* Reverse Channel Amplitude level */
+ max9286_write_reg(max9286_data, 0x3B, 0x19); /* STEP 9 */
+ msleep(5); /* STEP 10 */
+
+ /* Set YUV422 8 bits mode, Double Data Rate, 4 data lane */
+ max9286_write_reg(max9286_data, 0x12, 0xF3); /* STEP 12 */
+
+ max9286_write_reg(max9286_data, 0x01, 0x02); /* STEP 13 */
+ /* Enable All Link 0-3 */
+ max9286_write_reg(max9286_data, 0x00, 0xef); /* STEP 14 */
+
+ /* Frame Sync */
+ /* Automatic Mode */
+ max9286_write_reg(max9286_data, 0x01, 0x02);/* STEP 13 */
+ msleep(200);
+ /* Detect link */
+ max9286_data->sensor_num = 0;
+ reg = max9286_read_reg(max9286_data, 0x49);
+ max9286_data->sensor_is_there = ((reg >> 4) & 0xF) | (reg & 0xF);
+ if (max9286_data->sensor_is_there & (0x1 << 0))
+ max9286_data->sensor_num += 1;
+ if (max9286_data->sensor_is_there & (0x1 << 1))
+ max9286_data->sensor_num += 1;
+ if (max9286_data->sensor_is_there & (0x1 << 2))
+ max9286_data->sensor_num += 1;
+ if (max9286_data->sensor_is_there & (0x1 << 3))
+ max9286_data->sensor_num += 1;
+ pr_info("max9286_mipi: reg = 0x%02x.\n", reg);
+ pr_info("max9286_mipi: sensor number = %d.\n", max9286_data->sensor_num);
+
+ if (max9286_data->sensor_num == 0) {
+ pr_err("%s: no camera connected.\n", __func__);
+ return -1;
+ }
+
+ return 0;
+}
+
+static void max9286_camera_reorder(struct sensor_data *max9286_data)
+{
+ u8 reg;
+
+ reg = 0xE4;
+ if (max9286_data->sensor_num == 1) {
+ switch (max9286_data->sensor_is_there) {
+ case 0x8:
+ reg = 0x27;
+ break;
+ case 0x4:
+ reg = 0xC6;
+ break;
+ case 0x2:
+ reg = 0xE1;
+ break;
+ case 0x1:
+ default:
+ reg = 0xE4;
+ break;
+ }
+ } else if (max9286_data->sensor_num == 2) {
+ switch (max9286_data->sensor_is_there) {
+ case 0xC:
+ reg = 0x4E;
+ break;
+ case 0xA:
+ reg = 0x72;
+ break;
+ case 0x9:
+ reg = 0x78;
+ break;
+ case 0x6:
+ reg = 0xD2;
+ break;
+ case 0x5:
+ reg = 0xD8;
+ break;
+ case 0x3:
+ default:
+ reg = 0xE4;
+ break;
+ }
+ } else if (max9286_data->sensor_num == 3) {
+ switch (max9286_data->sensor_is_there) {
+ case 0xE:
+ reg = 0x93;
+ break;
+ case 0xD:
+ reg = 0x9C;
+ break;
+ case 0xB:
+ reg = 0xB4;
+ break;
+ case 0x7:
+ default:
+ reg = 0xE4;
+ break;
+ }
+ }
+ max9286_write_reg(max9286_data, 0x0B, reg);
+}
+
+static int max9286_hardware_init(struct sensor_data *max9286_data)
+{
+ int retval = 0;
+ int i;
+ u8 reg, sensor_addr = 0;
+
+ dev_info(&max9286_data->i2c_client->dev, "In %s()\n", __func__);
+
+ /* Disable PRBS test */
+ max9286_write_reg(max9286_data, 0x0E, 0x50);
+
+ /* reorder camera */
+ max9286_camera_reorder(max9286_data);
+
+ /* Enable all links */
+ reg = 0xE0 | max9286_data->sensor_is_there;
+ max9286_write_reg(max9286_data, 0x00, reg);
+
+ /* Set up links */
+ sensor_addr = ADDR_OV_SENSOR;
+ max9271_write_reg(max9286_data, 0, 0x07, 0x84);
+ /* STEP 15-46 */
+ reg = 0;
+ for (i = 1; i <= MAX9271_MAX_SENSOR_NUM; i++) {
+ if (((0x1 << (i - 1)) & max9286_data->sensor_is_there) == 0)
+ continue;
+
+ /* Enable Link control channel */
+ reg |= (0x11 << (i - 1));
+ max9286_write_reg(max9286_data, 0x0A, reg);/* STEP 15 */
+
+ /* Set MAX9271 new address for link 0 */
+ max9271_write_reg(max9286_data, 0, 0x00, (ADDR_MAX9271 + i) << 1);
+ msleep(2);
+
+ max9271_write_reg(max9286_data, i, 0x01, ADDR_MAX9286 << 1);
+ max9271_write_reg(max9286_data, i, 0x09, (sensor_addr + i) << 1);
+ max9271_write_reg(max9286_data, i, 0x0A, sensor_addr << 1);
+ max9271_write_reg(max9286_data, i, 0x0B, ADDR_MAX9271_ALL << 1);
+ max9271_write_reg(max9286_data, i, 0x0C, (ADDR_MAX9271 + i) << 1);
+
+ msleep(1);
+ pr_info("max9286_mipi: initialized sensor = 0x%02x.\n", i);
+ max9271_dump_registers(max9286_data, i);
+ }
+ max9286_write_reg(max9286_data, 0x0A, reg);
+ max9286_write_reg(max9286_data, 0x0A, reg);
+
+ /* Disable Local Auto I2C ACK */
+ max9286_write_reg(max9286_data, 0x34, 0x36); /* STEP 48 */
+
+ /* Initialize Camera Sensor */
+ /* STEP 49 */
+ if (max9286_data->sensor_is_there & (0x1 << 0)) {
+ retval = ov10635_check_device(max9286_data, 1);
+ if (retval < 0)
+ return retval;
+ ov10635_initialize(max9286_data, 0);
+ }
+
+ if (max9286_data->sensor_is_there & (0x1 << 1)) {
+ retval = ov10635_check_device(max9286_data, 2);
+ if (retval < 0)
+ return retval;
+ ov10635_initialize(max9286_data, 1);
+ }
+
+ if (max9286_data->sensor_is_there & (0x1 << 2)) {
+ retval = ov10635_check_device(max9286_data, 3);
+ if (retval < 0)
+ return retval;
+ ov10635_initialize(max9286_data, 2);
+ }
+
+ if (max9286_data->sensor_is_there & (0x1 << 3)) {
+ retval = ov10635_check_device(max9286_data, 4);
+ if (retval < 0)
+ return retval;
+ ov10635_initialize(max9286_data, 3);
+ }
+
+ /* Enable Local Auto I2C ACK */
+ max9286_write_reg(max9286_data, 0x34, 0xB6); /* STEP 50 */
+
+ /* MAX9271: Enable Serial Links and Disable Configuration Link */
+ max9271_write_reg(max9286_data, ADDR_MAX9271_ALL - ADDR_MAX9271, 0x04, 0x83); /* STEP 51 */
+ /* Wait for more than 2 frame time */
+ msleep(1000); /* STEP 52 */
+
+ /* Enable CSI output, set virtual channel according to the link number */
+ max9286_write_reg(max9286_data, 0x15, 0x9B); /* STEP 52 */
+ msleep(10);
+ return retval;
+}
+
+static int ov10635_change_mode(struct sensor_data *max9286_data)
+{
+ struct reg_value *reg_setting = NULL;
+ enum ov10635_mode mode = max9286_data->current_mode;
+ enum ov10635_frame_rate rate =
+ to_ov10635_frame_rate(&max9286_data->frame_interval);
+ int arysize = 0, retval = 0;
+
+ if (mode > ov10635_mode_MAX || mode < ov10635_mode_MIN) {
+ pr_err("Wrong ov10635 mode detected!\n");
+ return -1;
+ }
+
+ reg_setting = ov10635_mode_info_data[rate][mode].init_data_ptr;
+ arysize = ov10635_mode_info_data[rate][mode].init_data_size;
+
+ max9286_data->format.width = ov10635_mode_info_data[rate][mode].width;
+ max9286_data->format.height = ov10635_mode_info_data[rate][mode].height;
+
+ if (max9286_data->format.width == 0 ||
+ max9286_data->format.height == 0 ||
+ !reg_setting || arysize == 0) {
+ pr_err("Not support mode=%d %s\n", mode,
+ (rate == 0) ? "15(fps)" : "30(fps)");
+ return -EINVAL;
+ }
+
+ retval = ov10635_download_firmware(max9286_data, 0, reg_setting, arysize);
+
+ return retval;
+}
+
+static int max9286_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ struct sensor_data *max9286_data = subdev_to_sensor_data(sd);
+
+ code->code = max9286_data->format.code;
+ return 0;
+}
+
+/*
+ * max9286_enum_framesizes - V4L2 sensor interface handler for
+ * VIDIOC_ENUM_FRAMESIZES ioctl
+ * @s: pointer to standard V4L2 device structure
+ * @fsize: standard V4L2 VIDIOC_ENUM_FRAMESIZES ioctl structure
+ *
+ * Return 0 if successful, otherwise -EINVAL.
+ */
+static int max9286_enum_framesizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ if (fse->index > ov10635_mode_MAX)
+ return -EINVAL;
+
+ fse->max_width = max(ov10635_mode_info_data[0][fse->index].width,
+ ov10635_mode_info_data[1][fse->index].width);
+ fse->min_width = fse->max_width;
+
+ fse->max_height = max(ov10635_mode_info_data[0][fse->index].height,
+ ov10635_mode_info_data[1][fse->index].height);
+ fse->min_height = fse->max_height;
+
+ return 0;
+}
+
+static int max9286_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ int i, j, count;
+
+ if (fie->index < 0 || fie->index > ov10635_mode_MAX)
+ return -EINVAL;
+
+ if (fie->width == 0 || fie->height == 0 || fie->code == 0) {
+ pr_warn("Please assign pixel format, width and height.\n");
+ return -EINVAL;
+ }
+
+ fie->interval.numerator = 1;
+
+ /* TODO Reserved to extension */
+ count = 0;
+ for (i = 0; i < ARRAY_SIZE(ov10635_framerates); i++) {
+ for (j = 0; j < (ov10635_mode_MAX + 1); j++) {
+ if (fie->width == ov10635_mode_info_data[i][j].width &&
+ fie->height == ov10635_mode_info_data[i][j].height &&
+ ov10635_mode_info_data[i][j].init_data_ptr)
+ count++;
+
+ if (fie->index == (count - 1)) {
+ fie->interval.denominator = ov10635_framerates[i];
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int max9286_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct sensor_data *max9286_data = subdev_to_sensor_data(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ if (fmt->pad)
+ return -EINVAL;
+
+ mf->code = max9286_data->format.code;
+ mf->width = max9286_data->format.width;
+ mf->height = max9286_data->format.height;
+ mf->colorspace = max9286_data->format.colorspace;
+ mf->field = max9286_data->format.field;
+ mf->reserved[0] = max9286_data->format.reserved[0];
+
+ return 0;
+}
+
+static struct ov10635_mode_info *get_max_resolution(enum ov10635_frame_rate rate)
+{
+ u32 max_width;
+ enum ov10635_mode mode;
+ int i;
+
+ mode = 0;
+ max_width = ov10635_mode_info_data[rate][0].width;
+
+ for (i = 0; i < (ov10635_mode_MAX + 1); i++) {
+ if (ov10635_mode_info_data[rate][i].width > max_width) {
+ max_width = ov10635_mode_info_data[rate][i].width;
+ mode = i;
+ }
+ }
+ return &ov10635_mode_info_data[rate][mode];
+}
+
+static struct ov10635_mode_info *match(struct v4l2_mbus_framefmt *fmt,
+ enum ov10635_frame_rate rate)
+{
+ struct ov10635_mode_info *info;
+ int i;
+
+ for (i = 0; i < (ov10635_mode_MAX + 1); i++) {
+ if (fmt->width == ov10635_mode_info_data[rate][i].width &&
+ fmt->height == ov10635_mode_info_data[rate][i].height) {
+ info = &ov10635_mode_info_data[rate][i];
+ break;
+ }
+ }
+ if (i == ov10635_mode_MAX + 1)
+ info = NULL;
+
+ return info;
+}
+
+static bool try_to_find_resolution(struct sensor_data *sensor,
+ const enum ov10635_frame_rate fr,
+ struct v4l2_mbus_framefmt *mf)
+{
+ enum ov10635_mode mode = sensor->current_mode;
+ enum ov10635_frame_rate frame_rate = fr;
+ struct device *dev = &sensor->i2c_client->dev;
+ struct ov10635_mode_info *info;
+ bool found = false;
+
+ if ((mf->width == ov10635_mode_info_data[frame_rate][mode].width) &&
+ (mf->height == ov10635_mode_info_data[frame_rate][mode].height)) {
+ info = &ov10635_mode_info_data[frame_rate][mode];
+ found = true;
+ } else {
+ /* get mode info according to frame user's width and height */
+ info = match(mf, frame_rate);
+ if (!info) {
+ frame_rate ^= 0x1;
+ info = match(mf, frame_rate);
+ if (info) {
+ sensor->current_mode = -1;
+ dev_err(dev, "%s %dx%d only support %s(fps)\n",
+ __func__,
+ info->width, info->height,
+ (frame_rate == 0) ? "15fps" : "30fps");
+ return false;
+ }
+ goto max_resolution;
+ }
+ found = true;
+ }
+
+ /* get max resolution to resize */
+max_resolution:
+ if (!found) {
+ frame_rate ^= 0x1;
+ info = get_max_resolution(frame_rate);
+ }
+
+ sensor->current_mode = info->mode;
+ sensor->frame_interval.denominator = (frame_rate) ? 30 : 15;
+ sensor->format.width = info->width;
+ sensor->format.height = info->height;
+
+ return found;
+}
+
+static int max9286_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct sensor_data *max9286_data = subdev_to_sensor_data(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ enum ov10635_frame_rate frame_rate = max9286_data->current_fr;
+ int ret;
+
+ if (fmt->pad)
+ return -EINVAL;
+
+ mf->code = max9286_data->format.code;
+ mf->colorspace = max9286_data->format.colorspace;
+ mf->field = V4L2_FIELD_NONE;
+
+ try_to_find_resolution(max9286_data, frame_rate, mf);
+
+ if (fmt->which == V4L2_SUBDEV_FORMAT_TRY)
+ return 0;
+
+ ret = ov10635_change_mode(max9286_data);
+
+ return ret;
+}
+
+static int max9286_get_frame_desc(struct v4l2_subdev *sd, unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ return 0;
+}
+
+static int max9286_set_frame_desc(struct v4l2_subdev *sd,
+ unsigned int pad,
+ struct v4l2_mbus_frame_desc *fd)
+{
+ return 0;
+}
+
+static int max9286_set_power(struct v4l2_subdev *sd, int on)
+{
+ return 0;
+}
+
+static int ov10635_try_frame_interval(struct sensor_data *sensor,
+ struct v4l2_fract *fi,
+ u32 width, u32 height)
+{
+ enum ov10635_frame_rate rate = OV10635_15_FPS;
+ int minfps, maxfps, best_fps, fps;
+ int i;
+
+ minfps = ov10635_framerates[OV10635_15_FPS];
+ maxfps = ov10635_framerates[OV10635_30_FPS];
+
+ if (fi->numerator == 0) {
+ fi->denominator = ov10635_framerates[OV10635_30_FPS];
+ fi->numerator = 1;
+ rate = OV10635_30_FPS;
+ goto out;
+ }
+
+ fps = clamp_val(DIV_ROUND_CLOSEST(fi->denominator, fi->numerator),
+ minfps, maxfps);
+
+ best_fps = minfps;
+ for (i = 0; i < ARRAY_SIZE(ov10635_framerates); i++) {
+ int curr_fps = ov10635_framerates[i];
+
+ if (abs(curr_fps - fps) < abs(best_fps - fps)) {
+ best_fps = curr_fps;
+ rate = i;
+ }
+ }
+
+ fi->numerator = 1;
+ fi->denominator = best_fps;
+
+out:
+ return rate;
+}
+
+static int max9286_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct sensor_data *max9286_data = subdev_to_sensor_data(sd);
+
+ mutex_lock(&max9286_data->lock);
+ fi->interval = max9286_data->frame_interval;
+ mutex_unlock(&max9286_data->lock);
+
+ return 0;
+}
+
+static int max9286_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *fi)
+{
+ struct sensor_data *max9286_data = subdev_to_sensor_data(sd);
+ enum ov10635_mode mode = max9286_data->current_mode;
+ enum ov10635_frame_rate fr = max9286_data->current_fr;
+ struct v4l2_mbus_framefmt mf;
+ bool found = false;
+ int frame_rate, ret = 0;
+
+ if (fi->pad != 0)
+ return -EINVAL;
+
+ mutex_lock(&max9286_data->lock);
+
+ memset(&mf, 0, sizeof(mf));
+ mf.width = ov10635_mode_info_data[fr][mode].width;
+ mf.height = ov10635_mode_info_data[fr][mode].height;
+ frame_rate = ov10635_try_frame_interval(max9286_data, &fi->interval,
+ mf.width, mf.height);
+ if (frame_rate < 0) {
+ fi->interval = max9286_data->frame_interval;
+ goto out;
+ }
+
+ mf.width = ov10635_mode_info_data[frame_rate][mode].width;
+ mf.height = ov10635_mode_info_data[frame_rate][mode].height;
+ found = try_to_find_resolution(max9286_data, frame_rate, &mf);
+ if (!found) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ max9286_data->current_fr = frame_rate;
+ max9286_data->frame_interval = fi->interval;
+
+out:
+ mutex_unlock(&max9286_data->lock);
+ return ret;
+}
+
+static int max9286_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct sensor_data *max9286_data = subdev_to_sensor_data(sd);
+
+ dev_dbg(sd->dev, "%s\n", __func__);
+ if (enable) {
+ if (!max9286_data->running++) {
+ /*
+ * Enable CSI output, set virtual channel
+ * according to the link number
+ */
+ max9286_write_reg(max9286_data, 0x15, 0x9B);
+ }
+
+ } else {
+
+ if (!--max9286_data->running) {
+ /* Disable CSI Output */
+ max9286_write_reg(max9286_data, 0x15, 0x03);
+ }
+ }
+
+ return 0;
+}
+
+static int max9286_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote,
+ u32 flags)
+{
+ return 0;
+}
+
+static const struct v4l2_subdev_pad_ops max9286_pad_ops = {
+ .enum_mbus_code = max9286_enum_mbus_code,
+ .enum_frame_size = max9286_enum_framesizes,
+ .enum_frame_interval = max9286_enum_frame_interval,
+ .get_fmt = max9286_get_fmt,
+ .set_fmt = max9286_set_fmt,
+ .get_frame_desc = max9286_get_frame_desc,
+ .set_frame_desc = max9286_set_frame_desc,
+};
+
+static const struct v4l2_subdev_core_ops max9286_core_ops = {
+ .s_power = max9286_set_power,
+};
+
+static const struct v4l2_subdev_video_ops max9286_video_ops = {
+ .g_frame_interval = max9286_g_frame_interval,
+ .s_frame_interval = max9286_s_frame_interval,
+ .s_stream = max9286_s_stream,
+};
+
+static const struct v4l2_subdev_ops max9286_subdev_ops = {
+ .core = &max9286_core_ops,
+ .pad = &max9286_pad_ops,
+ .video = &max9286_video_ops,
+};
+
+static const struct media_entity_operations max9286_sd_media_ops = {
+ .link_setup = max9286_link_setup,
+};
+
+ssize_t analog_test_pattern_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct sensor_data *max9286_data = subdev_to_sensor_data(sd);
+ u8 val = 0;
+
+ ov10635_read_reg(max9286_data, 0, 0x370A, &val);
+ return sprintf(buf, "%s\n", (val & 0x4) ? "enabled" : "disabled");
+}
+
+static ssize_t analog_test_pattern_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct sensor_data *max9286_data = subdev_to_sensor_data(sd);
+ char enabled[32];
+
+ if (sscanf(buf, "%s", enabled) > 0) {
+ if (strcmp(enabled, "enable") == 0)
+ ov10635_write_reg(max9286_data, 0, 0x370A, 0x4);
+ else
+ ov10635_write_reg(max9286_data, 0, 0x370A, 0x0);
+ return count;
+ }
+ return -EINVAL;
+}
+
+static DEVICE_ATTR_RW(analog_test_pattern);
+
+/*!
+ * max9286 I2C probe function
+ *
+ * @param adapter struct i2c_adapter *
+ * @return Error code indicating success or failure
+ */
+static int max9286_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct sensor_data *max9286_data;
+ struct v4l2_subdev *sd;
+ int retval;
+
+ max9286_data = devm_kzalloc(dev, sizeof(*max9286_data), GFP_KERNEL);
+ if (!max9286_data)
+ return -ENOMEM;
+
+ /* Set initial values for the sensor struct. */
+ max9286_data->sensor_clk = devm_clk_get(dev, "capture_mclk");
+ if (IS_ERR(max9286_data->sensor_clk)) {
+ /* assuming clock enabled by default */
+ dev_err(dev, "clock-frequency missing or invalid\n");
+ return PTR_ERR(max9286_data->sensor_clk);
+ }
+
+ retval = of_property_read_u32(dev->of_node, "mclk", &max9286_data->mclk);
+ if (retval) {
+ dev_err(dev, "mclk missing or invalid\n");
+ return retval;
+ }
+
+ retval = of_property_read_u32(dev->of_node, "mclk_source", (u32 *)&max9286_data->mclk_source);
+ if (retval) {
+ dev_err(dev, "mclk_source missing or invalid\n");
+ return retval;
+ }
+
+ /* request power down pin */
+ max9286_data->pwn_gpio = devm_gpiod_get_optional(dev, "pwn-gpios",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(max9286_data->pwn_gpio))
+ return PTR_ERR(max9286_data->pwn_gpio);
+
+ max9286_hw_reset(max9286_data);
+
+ clk_prepare_enable(max9286_data->sensor_clk);
+
+ mutex_init(&max9286_data->lock);
+
+ max9286_data->i2c_client = client;
+ max9286_data->format.code = MEDIA_BUS_FMT_YUYV8_1X16;
+ max9286_data->format.width = ov10635_mode_info_data[1][0].width;
+ max9286_data->format.height = ov10635_mode_info_data[1][0].height;
+ max9286_data->format.colorspace = V4L2_COLORSPACE_JPEG;
+
+ /*
+ * Pass mipi phy clock rate Mbps
+ * fcsi2 = PCLk * WIDTH * CHANNELS / LANES
+ * fsci2 = 72MPCLK * 8 bit * 4 channels / 4 lanes
+ */
+ max9286_data->format.reserved[0] = 72 * 8;
+ max9286_data->format.field = V4L2_FIELD_NONE;
+ max9286_data->current_mode = 0;
+ max9286_data->frame_interval.denominator = 30;
+ max9286_data->frame_interval.numerator = 1;
+ max9286_data->is_mipi = 1;
+
+ retval = max9286_read_reg(max9286_data, 0x1e);
+ if (retval != 0x40) {
+ pr_warn("max9286 is not found, chip id reg 0x1e = 0x(%x)\n", retval);
+ clk_disable_unprepare(max9286_data->sensor_clk);
+ return -ENODEV;
+ }
+
+ max9286_hardware_preinit(max9286_data);
+
+ if (max9286_data->sensor_num == 0) {
+ pr_warn("cameras are not found,\n");
+ clk_disable_unprepare(max9286_data->sensor_clk);
+ return -ENODEV;
+ }
+
+ max9286_data->frame_interval.denominator = 30;
+ max9286_data->frame_interval.numerator = 1;
+ max9286_data->v_channel = 0;
+ max9286_data->cap_mode.clip_top = 0;
+ max9286_data->cap_mode.clip_left = 0;
+
+ max9286_data->cap_mode.clip_height = 800;
+ max9286_data->cap_mode.clip_width = 1280;
+
+ max9286_data->cap_mode.hlen = max9286_data->cap_mode.clip_width;
+
+ max9286_data->cap_mode.hfp = 0;
+ max9286_data->cap_mode.hbp = 0;
+ max9286_data->cap_mode.hsync = 625;
+ max9286_data->cap_mode.vlen = 800;
+ max9286_data->cap_mode.vfp = 0;
+ max9286_data->cap_mode.vbp = 0;
+ max9286_data->cap_mode.vsync = 40;
+ max9286_data->cap_mode.vlen1 = 0;
+ max9286_data->cap_mode.vfp1 = 0;
+ max9286_data->cap_mode.vbp1 = 0;
+ max9286_data->cap_mode.vsync1 = 0;
+ max9286_data->cap_mode.pixelclock = 27000000;
+
+ sd = &max9286_data->subdev;
+ v4l2_i2c_subdev_init(sd, client, &max9286_subdev_ops);
+ sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+
+ sd->entity.function = MEDIA_ENT_F_CAM_SENSOR;
+ max9286_data->pads[MIPI_CSI2_SENS_VC0_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ max9286_data->pads[MIPI_CSI2_SENS_VC1_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ max9286_data->pads[MIPI_CSI2_SENS_VC2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ max9286_data->pads[MIPI_CSI2_SENS_VC3_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ retval = media_entity_pads_init(&sd->entity, MIPI_CSI2_SENS_VCX_PADS_NUM,
+ max9286_data->pads);
+ if (retval < 0)
+ return retval;
+
+ max9286_data->subdev.entity.ops = &max9286_sd_media_ops;
+ retval = v4l2_async_register_subdev(&max9286_data->subdev);
+ if (retval < 0) {
+ dev_err(dev, "Async register failed, ret=(%d)\n", retval);
+ media_entity_cleanup(&sd->entity);
+ }
+
+ retval = max9286_hardware_init(max9286_data);
+ if (retval < 0) {
+ dev_err(dev, "camera init failed\n");
+ clk_disable_unprepare(max9286_data->sensor_clk);
+ media_entity_cleanup(&sd->entity);
+ v4l2_async_unregister_subdev(sd);
+ return retval;
+ }
+
+ max9286_data->running = 0;
+
+ /* Disable CSI Output */
+ max9286_write_reg(max9286_data, 0x15, 0x03);
+
+ /*Create device attr in sys */
+ retval = device_create_file(&client->dev, &dev_attr_analog_test_pattern);
+ if (retval < 0) {
+ dev_err(dev, "%s: create device file fail\n", __func__);
+ return retval;
+ }
+
+ dev_info(dev, "max9286_mipi is found, name %s\n", sd->name);
+ return retval;
+}
+
+/*!
+ * max9286 I2C detach function
+ *
+ * @param client struct i2c_client *
+ * @return Error code indicating success or failure
+ */
+static int max9286_remove(struct i2c_client *client)
+{
+ struct v4l2_subdev *sd = i2c_get_clientdata(client);
+ struct sensor_data *max9286_data = subdev_to_sensor_data(sd);
+
+ clk_disable_unprepare(max9286_data->sensor_clk);
+ device_remove_file(&client->dev, &dev_attr_analog_test_pattern);
+ media_entity_cleanup(&sd->entity);
+ v4l2_async_unregister_subdev(sd);
+
+ return 0;
+}
+
+static const struct i2c_device_id max9286_id[] = {
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, max9286_id);
+
+static const struct of_device_id max9286_of_match[] = {
+ { .compatible = "maxim,max9286_mipi" },
+ { /* sentinel */ }
+};
+
+static struct i2c_driver max9286_i2c_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "max9286_mipi",
+ .of_match_table = of_match_ptr(max9286_of_match),
+ },
+ .probe = max9286_probe,
+ .remove = max9286_remove,
+ .id_table = max9286_id,
+};
+
+module_i2c_driver(max9286_i2c_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MAX9286 GSML Deserializer Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_ALIAS("CSI");
diff --git a/drivers/staging/media/imx/imx8-common.h b/drivers/staging/media/imx/imx8-common.h
new file mode 100644
index 000000000000..ddfbcc0fd7bf
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-common.h
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * V4L2 Capture ISI subdev for i.MX8QXP/QM platform
+ *
+ * ISI is a Image Sensor Interface of i.MX8QXP/QM platform, which
+ * used to process image from camera sensor to memory or DC
+ *
+ * Copyright (c) 2019 NXP Semiconductor
+ *
+ */
+
+#ifndef __MXC_COMMON_H__
+#define __MXC_COMMON_H__
+
+#define ISI_OF_NODE_NAME "isi"
+#define MIPI_CSI2_OF_NODE_NAME "csi"
+#define PARALLEL_OF_NODE_NAME "pcsi"
+
+#define MXC_ISI_MAX_DEVS 8
+#define MXC_MIPI_CSI2_MAX_DEVS 2
+#define MXC_MAX_SENSORS 3
+
+/* ISI PADS */
+#define MXC_ISI_SD_PAD_SINK_MIPI0_VC0 0
+#define MXC_ISI_SD_PAD_SINK_MIPI0_VC1 1
+#define MXC_ISI_SD_PAD_SINK_MIPI0_VC2 2
+#define MXC_ISI_SD_PAD_SINK_MIPI0_VC3 3
+#define MXC_ISI_SD_PAD_SINK_MIPI1_VC0 4
+#define MXC_ISI_SD_PAD_SINK_MIPI1_VC1 5
+#define MXC_ISI_SD_PAD_SINK_MIPI1_VC2 6
+#define MXC_ISI_SD_PAD_SINK_MIPI1_VC3 7
+
+#define MXC_ISI_SD_PAD_SINK_DC0 8
+#define MXC_ISI_SD_PAD_SINK_DC1 9
+#define MXC_ISI_SD_PAD_SINK_HDMI 10
+#define MXC_ISI_SD_PAD_SINK_MEM 11
+#define MXC_ISI_SD_PAD_SOURCE_MEM 12
+#define MXC_ISI_SD_PAD_SOURCE_DC0 13
+#define MXC_ISI_SD_PAD_SOURCE_DC1 14
+#define MXC_ISI_SD_PAD_SINK_PARALLEL_CSI 15
+#define MXC_ISI_SD_PADS_NUM 16
+
+/* MIPI CSI PADS */
+#define MXC_MIPI_CSI2_VC0_PAD_SINK 0
+#define MXC_MIPI_CSI2_VC1_PAD_SINK 1
+#define MXC_MIPI_CSI2_VC2_PAD_SINK 2
+#define MXC_MIPI_CSI2_VC3_PAD_SINK 3
+
+#define MXC_MIPI_CSI2_VC0_PAD_SOURCE 4
+#define MXC_MIPI_CSI2_VC1_PAD_SOURCE 5
+#define MXC_MIPI_CSI2_VC2_PAD_SOURCE 6
+#define MXC_MIPI_CSI2_VC3_PAD_SOURCE 7
+#define MXC_MIPI_CSI2_VCX_PADS_NUM 8
+
+/* Parallel CSI PADS */
+#define MXC_PARALLEL_CSI_PAD_SOURCE 0
+#define MXC_PARALLEL_CSI_PAD_SINK 1
+#define MXC_PARALLEL_CSI_PADS_NUM 2
+
+#define ISI_2K 2048
+
+enum {
+ IN_PORT,
+ SUB_IN_PORT,
+ OUT_PORT,
+ MAX_PORTS,
+};
+
+enum isi_input_interface {
+ ISI_INPUT_INTERFACE_DC0 = 0,
+ ISI_INPUT_INTERFACE_DC1,
+ ISI_INPUT_INTERFACE_MIPI0_CSI2,
+ ISI_INPUT_INTERFACE_MIPI1_CSI2,
+ ISI_INPUT_INTERFACE_HDMI,
+ ISI_INPUT_INTERFACE_MEM,
+ ISI_INPUT_INTERFACE_PARALLEL_CSI,
+ ISI_INPUT_INTERFACE_MAX,
+};
+
+enum isi_input_sub_interface {
+ ISI_INPUT_SUB_INTERFACE_VC0 = 0,
+ ISI_INPUT_SUB_INTERFACE_VC1,
+ ISI_INPUT_SUB_INTERFACE_VC2,
+ ISI_INPUT_SUB_INTERFACE_VC3,
+};
+
+enum isi_output_interface {
+ ISI_OUTPUT_INTERFACE_DC0 = 0,
+ ISI_OUTPUT_INTERFACE_DC1,
+ ISI_OUTPUT_INTERFACE_MEM,
+ ISI_OUTPUT_INTERFACE_MAX,
+};
+
+enum mxc_isi_buf_id {
+ MXC_ISI_BUF1 = 0x0,
+ MXC_ISI_BUF2,
+};
+
+#endif /* MXC_ISI_CORE_H_ */
diff --git a/drivers/staging/media/imx/imx8-isi-cap.c b/drivers/staging/media/imx/imx8-isi-cap.c
new file mode 100644
index 000000000000..6ba5b3a86f49
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-isi-cap.c
@@ -0,0 +1,1795 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Capture ISI subdev driver for i.MX8QXP/QM platform
+ *
+ * ISI is a Image Sensor Interface of i.MX8QXP/QM platform, which
+ * used to process image from camera sensor to memory or DC
+ *
+ * Copyright (c) 2019 NXP Semiconductor
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/of_graph.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "imx8-isi-hw.h"
+#include "imx8-common.h"
+
+#define sd_to_cap_dev(ptr) container_of(ptr, struct mxc_isi_cap_dev, sd)
+
+struct mxc_isi_fmt mxc_isi_out_formats[] = {
+ {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = { 16 },
+ .color = MXC_ISI_OUT_FMT_RGB565,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_RGB565_1X16,
+ }, {
+ .name = "RGB24",
+ .fourcc = V4L2_PIX_FMT_RGB24,
+ .depth = { 24 },
+ .color = MXC_ISI_OUT_FMT_BGR32P,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_RGB888_1X24,
+ }, {
+ .name = "BGR24",
+ .fourcc = V4L2_PIX_FMT_BGR24,
+ .depth = { 24 },
+ .color = MXC_ISI_OUT_FMT_RGB32P,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_BGR888_1X24,
+ }, {
+ .name = "YUYV-16",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = { 16 },
+ .color = MXC_ISI_OUT_FMT_YUV422_1P8P,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_1X16,
+ }, {
+ .name = "YUV32 (X-Y-U-V)",
+ .fourcc = V4L2_PIX_FMT_YUV32,
+ .depth = { 32 },
+ .color = MXC_ISI_OUT_FMT_YUV444_1P8,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_AYUV8_1X32,
+ }, {
+ .name = "NV12 (YUYV)",
+ .fourcc = V4L2_PIX_FMT_NV12,
+ .depth = { 8, 8 },
+ .color = MXC_ISI_OUT_FMT_YUV420_2P8P,
+ .memplanes = 2,
+ .colplanes = 2,
+ .mbus_code = MEDIA_BUS_FMT_YUYV8_1X16,
+ }, {
+ .name = "YUV444M (Y-U-V)",
+ .fourcc = V4L2_PIX_FMT_YUV444M,
+ .depth = { 8, 8, 8 },
+ .color = MXC_ISI_OUT_FMT_YUV444_3P8P,
+ .memplanes = 3,
+ .colplanes = 3,
+ .mbus_code = MEDIA_BUS_FMT_YUV8_1X24,
+ }, {
+ .name = "xBGR32",
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .depth = { 32 },
+ .color = MXC_ISI_OUT_FMT_XRGB32,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_RGB888_1X24,
+ }, {
+ .name = "ABGR32",
+ .fourcc = V4L2_PIX_FMT_ABGR32,
+ .depth = { 32 },
+ .color = MXC_ISI_OUT_FMT_ARGB32,
+ .memplanes = 1,
+ .colplanes = 1,
+ .mbus_code = MEDIA_BUS_FMT_RGB888_1X24,
+ }
+};
+
+/*
+ * Pixel link input format
+ */
+struct mxc_isi_fmt mxc_isi_src_formats[] = {
+ {
+ .name = "RGB32",
+ .fourcc = V4L2_PIX_FMT_RGB32,
+ .depth = { 32 },
+ .memplanes = 1,
+ .colplanes = 1,
+ }, {
+ .name = "YUV32 (X-Y-U-V)",
+ .fourcc = V4L2_PIX_FMT_YUV32,
+ .depth = { 32 },
+ .memplanes = 1,
+ .colplanes = 1,
+ }
+};
+
+struct mxc_isi_fmt *mxc_isi_get_format(unsigned int index)
+{
+ return &mxc_isi_out_formats[index];
+}
+
+/*
+ * lookup mxc_isi color format by fourcc or media bus format
+ */
+struct mxc_isi_fmt *mxc_isi_find_format(const u32 *pixelformat,
+ const u32 *mbus_code, int index)
+{
+ struct mxc_isi_fmt *fmt, *def_fmt = NULL;
+ unsigned int i;
+ int id = 0;
+
+ if (index >= (int)ARRAY_SIZE(mxc_isi_out_formats))
+ return NULL;
+
+ for (i = 0; i < ARRAY_SIZE(mxc_isi_out_formats); i++) {
+ fmt = &mxc_isi_out_formats[i];
+ if (pixelformat && fmt->fourcc == *pixelformat)
+ return fmt;
+ if (mbus_code && fmt->mbus_code == *mbus_code)
+ return fmt;
+ if (index == id)
+ def_fmt = fmt;
+ id++;
+ }
+ return def_fmt;
+}
+
+struct mxc_isi_fmt *mxc_isi_get_src_fmt(struct v4l2_subdev_format *sd_fmt)
+{
+ u32 index;
+
+ /* two fmt RGB32 and YUV444 from pixellink */
+ if (sd_fmt->format.code == MEDIA_BUS_FMT_YUYV8_1X16 ||
+ sd_fmt->format.code == MEDIA_BUS_FMT_YVYU8_2X8 ||
+ sd_fmt->format.code == MEDIA_BUS_FMT_AYUV8_1X32 ||
+ sd_fmt->format.code == MEDIA_BUS_FMT_UYVY8_2X8 ||
+ sd_fmt->format.code == MEDIA_BUS_FMT_YUYV8_2X8)
+ index = 1;
+ else
+ index = 0;
+ return &mxc_isi_src_formats[index];
+}
+
+static inline struct mxc_isi_buffer *to_isi_buffer(struct vb2_v4l2_buffer *v4l2_buf)
+{
+ return container_of(v4l2_buf, struct mxc_isi_buffer, v4l2_buf);
+}
+
+/*
+ * mxc_isi_pipeline_enable() - Enable streaming on a pipeline
+ */
+static int mxc_isi_pipeline_enable(struct mxc_isi_cap_dev *isi_cap, bool enable)
+{
+ struct device *dev = &isi_cap->pdev->dev;
+ struct media_entity *entity = &isi_cap->vdev.entity;
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_graph graph;
+ struct v4l2_subdev *subdev;
+ int ret = 0;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ ret = media_graph_walk_init(&graph, entity->graph_obj.mdev);
+ if (ret) {
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+ media_graph_walk_start(&graph, entity);
+
+ while ((entity = media_graph_walk_next(&graph))) {
+ if (!entity) {
+ dev_dbg(dev, "entity is NULL\n");
+ continue;
+ }
+
+ if (!is_media_entity_v4l2_subdev(entity)) {
+ dev_dbg(dev, "%s is no v4l2 subdev\n", entity->name);
+ continue;
+ }
+
+ subdev = media_entity_to_v4l2_subdev(entity);
+ if (!subdev) {
+ dev_dbg(dev, "%s subdev is NULL\n", entity->name);
+ continue;
+ }
+
+ ret = v4l2_subdev_call(subdev, video, s_stream, enable);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(dev, "subdev %s s_stream failed\n", subdev->name);
+ break;
+ }
+ }
+ mutex_unlock(&mdev->graph_mutex);
+ media_graph_walk_cleanup(&graph);
+
+ return ret;
+}
+
+static int mxc_isi_update_buf_paddr(struct mxc_isi_buffer *buf, int memplanes)
+{
+ struct frame_addr *paddr = &buf->paddr;
+ struct vb2_buffer *vb2 = &buf->v4l2_buf.vb2_buf;
+
+ paddr->cb = 0;
+ paddr->cr = 0;
+
+ switch (memplanes) {
+ case 3:
+ paddr->cr = vb2_dma_contig_plane_dma_addr(vb2, 2);
+ /* fall through */
+ case 2:
+ paddr->cb = vb2_dma_contig_plane_dma_addr(vb2, 1);
+ /* fall through */
+ case 1:
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb2, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void mxc_isi_cap_frame_write_done(struct mxc_isi_dev *mxc_isi)
+{
+ struct mxc_isi_cap_dev *isi_cap = mxc_isi->isi_cap;
+ struct device *dev = &isi_cap->pdev->dev;
+ struct mxc_isi_buffer *buf;
+ struct vb2_buffer *vb2;
+
+ if (list_empty(&isi_cap->out_active)) {
+ dev_warn(dev, "trying to access empty active list\n");
+ return;
+ }
+
+ buf = list_first_entry(&isi_cap->out_active, struct mxc_isi_buffer, list);
+
+ /*
+ * Skip frame when buffer number is not match ISI trigger
+ * buffer
+ */
+ if ((is_buf_active(mxc_isi, 1) && buf->id == MXC_ISI_BUF1) ||
+ (is_buf_active(mxc_isi, 2) && buf->id == MXC_ISI_BUF2)) {
+ dev_dbg(dev, "status=0x%x id=%d\n", mxc_isi->status, buf->id);
+ return;
+ }
+
+ if (buf->discard) {
+ list_move_tail(isi_cap->out_active.next, &isi_cap->out_discard);
+ } else {
+ vb2 = &buf->v4l2_buf.vb2_buf;
+ list_del_init(&buf->list);
+ buf->v4l2_buf.vb2_buf.timestamp = ktime_get_ns();
+ vb2_buffer_done(&buf->v4l2_buf.vb2_buf, VB2_BUF_STATE_DONE);
+ }
+
+ isi_cap->frame_count++;
+
+ if (list_empty(&isi_cap->out_pending)) {
+ if (list_empty(&isi_cap->out_discard)) {
+ dev_warn(dev, "trying to access empty discard list\n");
+ return;
+ }
+
+ buf = list_first_entry(&isi_cap->out_discard,
+ struct mxc_isi_buffer, list);
+ buf->v4l2_buf.sequence = isi_cap->frame_count;
+ mxc_isi_channel_set_outbuf(mxc_isi, buf);
+ list_move_tail(isi_cap->out_discard.next, &isi_cap->out_active);
+ return;
+ }
+
+ /* ISI channel output buffer */
+ buf = list_first_entry(&isi_cap->out_pending, struct mxc_isi_buffer, list);
+ buf->v4l2_buf.sequence = isi_cap->frame_count;
+ mxc_isi_channel_set_outbuf(mxc_isi, buf);
+ vb2 = &buf->v4l2_buf.vb2_buf;
+ vb2->state = VB2_BUF_STATE_ACTIVE;
+ list_move_tail(isi_cap->out_pending.next, &isi_cap->out_active);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_cap_frame_write_done);
+
+static int cap_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers,
+ unsigned int *num_planes,
+ unsigned int sizes[],
+ struct device *alloc_devs[])
+{
+ struct mxc_isi_cap_dev *isi_cap = vb2_get_drv_priv(q);
+ struct mxc_isi_frame *dst_f = &isi_cap->dst_f;
+ struct mxc_isi_fmt *fmt = dst_f->fmt;
+ unsigned long wh;
+ int i;
+
+ if (!fmt)
+ return -EINVAL;
+
+ for (i = 0; i < fmt->memplanes; i++)
+ alloc_devs[i] = &isi_cap->pdev->dev;
+
+ wh = dst_f->width * dst_f->height;
+
+ *num_planes = fmt->memplanes;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ unsigned int size = (wh * fmt->depth[i]) / 8;
+
+ if (i == 1 && fmt->fourcc == V4L2_PIX_FMT_NV12)
+ size >>= 1;
+ sizes[i] = max_t(u32, size, dst_f->sizeimage[i]);
+ }
+ dev_dbg(&isi_cap->pdev->dev, "%s, buf_n=%d, size=%d\n",
+ __func__, *num_buffers, sizes[0]);
+
+ return 0;
+}
+
+static int cap_vb2_buffer_prepare(struct vb2_buffer *vb2)
+{
+ struct vb2_queue *q = vb2->vb2_queue;
+ struct mxc_isi_cap_dev *isi_cap = vb2_get_drv_priv(q);
+ struct mxc_isi_frame *dst_f = &isi_cap->dst_f;
+ int i;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ if (!isi_cap->dst_f.fmt)
+ return -EINVAL;
+
+ for (i = 0; i < dst_f->fmt->memplanes; i++) {
+ unsigned long size = dst_f->sizeimage[i];
+
+ if (vb2_plane_size(vb2, i) < size) {
+ v4l2_err(&isi_cap->vdev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb2, i), size);
+ return -EINVAL;
+ }
+
+ vb2_set_plane_payload(vb2, i, size);
+ }
+
+ return 0;
+}
+
+static void cap_vb2_buffer_queue(struct vb2_buffer *vb2)
+{
+ struct vb2_v4l2_buffer *v4l2_buf = to_vb2_v4l2_buffer(vb2);
+ struct mxc_isi_buffer *buf = to_isi_buffer(v4l2_buf);
+ struct mxc_isi_cap_dev *isi_cap = vb2_get_drv_priv(vb2->vb2_queue);
+ unsigned long flags;
+
+ spin_lock_irqsave(&isi_cap->slock, flags);
+
+ mxc_isi_update_buf_paddr(buf, isi_cap->dst_f.fmt->mdataplanes);
+ list_add_tail(&buf->list, &isi_cap->out_pending);
+
+ spin_unlock_irqrestore(&isi_cap->slock, flags);
+}
+
+static int cap_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mxc_isi_cap_dev *isi_cap = vb2_get_drv_priv(q);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_cap->pdev);
+ struct mxc_isi_buffer *buf;
+ struct vb2_buffer *vb2;
+ unsigned long flags;
+ int i, j;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ if (count < 2)
+ return -ENOBUFS;
+
+ if (!mxc_isi)
+ return -EINVAL;
+
+ /* Create a buffer for discard operation */
+ for (i = 0; i < isi_cap->pix.num_planes; i++) {
+ isi_cap->discard_size[i] = isi_cap->dst_f.sizeimage[i];
+ isi_cap->discard_buffer[i] =
+ dma_alloc_coherent(&isi_cap->pdev->dev,
+ PAGE_ALIGN(isi_cap->discard_size[i]),
+ &isi_cap->discard_buffer_dma[i],
+ GFP_DMA | GFP_KERNEL);
+ if (!isi_cap->discard_buffer[i]) {
+ for (j = 0; j < i; j++) {
+ dma_free_coherent(&isi_cap->pdev->dev,
+ PAGE_ALIGN(isi_cap->discard_size[j]),
+ isi_cap->discard_buffer[j],
+ isi_cap->discard_buffer_dma[j]);
+ dev_err(&isi_cap->pdev->dev,
+ "alloc dma buffer(%d) fail\n", j);
+ }
+ return -ENOMEM;
+ }
+ dev_dbg(&isi_cap->pdev->dev,
+ "%s: num_plane=%d discard_size=%d discard_buffer=%p\n"
+ , __func__, i,
+ PAGE_ALIGN((int)isi_cap->discard_size[i]),
+ isi_cap->discard_buffer[i]);
+ }
+
+ spin_lock_irqsave(&isi_cap->slock, flags);
+
+ /* add two list member to out_discard list head */
+ isi_cap->buf_discard[0].discard = true;
+ list_add_tail(&isi_cap->buf_discard[0].list, &isi_cap->out_discard);
+
+ isi_cap->buf_discard[1].discard = true;
+ list_add_tail(&isi_cap->buf_discard[1].list, &isi_cap->out_discard);
+
+ /* ISI channel output buffer 1 */
+ buf = list_first_entry(&isi_cap->out_discard, struct mxc_isi_buffer, list);
+ buf->v4l2_buf.sequence = 0;
+ vb2 = &buf->v4l2_buf.vb2_buf;
+ vb2->state = VB2_BUF_STATE_ACTIVE;
+ mxc_isi_channel_set_outbuf(mxc_isi, buf);
+ list_move_tail(isi_cap->out_discard.next, &isi_cap->out_active);
+
+ /* ISI channel output buffer 2 */
+ buf = list_first_entry(&isi_cap->out_pending, struct mxc_isi_buffer, list);
+ buf->v4l2_buf.sequence = 1;
+ vb2 = &buf->v4l2_buf.vb2_buf;
+ vb2->state = VB2_BUF_STATE_ACTIVE;
+ mxc_isi_channel_set_outbuf(mxc_isi, buf);
+ list_move_tail(isi_cap->out_pending.next, &isi_cap->out_active);
+
+ /* Clear frame count */
+ isi_cap->frame_count = 1;
+ spin_unlock_irqrestore(&isi_cap->slock, flags);
+
+ return 0;
+}
+
+static void cap_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_cap_dev *isi_cap = vb2_get_drv_priv(q);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_cap->pdev);
+ struct mxc_isi_buffer *buf;
+ unsigned long flags;
+ int i;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ mxc_isi_channel_disable(mxc_isi);
+
+ spin_lock_irqsave(&isi_cap->slock, flags);
+
+ while (!list_empty(&isi_cap->out_active)) {
+ buf = list_entry(isi_cap->out_active.next,
+ struct mxc_isi_buffer, list);
+ list_del_init(&buf->list);
+ if (buf->discard)
+ continue;
+
+ vb2_buffer_done(&buf->v4l2_buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&isi_cap->out_pending)) {
+ buf = list_entry(isi_cap->out_pending.next,
+ struct mxc_isi_buffer, list);
+ list_del_init(&buf->list);
+ vb2_buffer_done(&buf->v4l2_buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ while (!list_empty(&isi_cap->out_discard)) {
+ buf = list_entry(isi_cap->out_discard.next,
+ struct mxc_isi_buffer, list);
+ list_del_init(&buf->list);
+ }
+
+ INIT_LIST_HEAD(&isi_cap->out_active);
+ INIT_LIST_HEAD(&isi_cap->out_pending);
+ INIT_LIST_HEAD(&isi_cap->out_discard);
+
+ spin_unlock_irqrestore(&isi_cap->slock, flags);
+
+ for (i = 0; i < isi_cap->pix.num_planes; i++)
+ dma_free_coherent(&isi_cap->pdev->dev,
+ PAGE_ALIGN(isi_cap->discard_size[i]),
+ isi_cap->discard_buffer[i],
+ isi_cap->discard_buffer_dma[i]);
+}
+
+static struct vb2_ops mxc_cap_vb2_qops = {
+ .queue_setup = cap_vb2_queue_setup,
+ .buf_prepare = cap_vb2_buffer_prepare,
+ .buf_queue = cap_vb2_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = cap_vb2_start_streaming,
+ .stop_streaming = cap_vb2_stop_streaming,
+};
+
+/*
+ * V4L2 controls handling
+ */
+static inline struct mxc_isi_cap_dev *ctrl_to_isi_cap(struct v4l2_ctrl *ctrl)
+{
+ return container_of(ctrl->handler, struct mxc_isi_cap_dev, ctrls.handler);
+}
+
+static int mxc_isi_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mxc_isi_cap_dev *isi_cap = ctrl_to_isi_cap(ctrl);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_cap->pdev);
+ unsigned long flags;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ spin_lock_irqsave(&mxc_isi->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ if (ctrl->val < 0)
+ return -EINVAL;
+ mxc_isi->hflip = (ctrl->val > 0) ? 1 : 0;
+ break;
+
+ case V4L2_CID_VFLIP:
+ if (ctrl->val < 0)
+ return -EINVAL;
+ mxc_isi->vflip = (ctrl->val > 0) ? 1 : 0;
+ break;
+
+ case V4L2_CID_ALPHA_COMPONENT:
+ if (ctrl->val < 0 || ctrl->val > 255)
+ return -EINVAL;
+ mxc_isi->alpha = ctrl->val;
+ mxc_isi->alphaen = 1;
+ break;
+
+ default:
+ dev_err(&isi_cap->pdev->dev,
+ "%s: Not support %d CID\n", __func__, ctrl->id);
+ return -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&mxc_isi->slock, flags);
+ return 0;
+}
+
+static const struct v4l2_ctrl_ops mxc_isi_ctrl_ops = {
+ .s_ctrl = mxc_isi_s_ctrl,
+};
+
+int mxc_isi_ctrls_create(struct mxc_isi_cap_dev *isi_cap)
+{
+ struct mxc_isi_ctrls *ctrls = &isi_cap->ctrls;
+ struct v4l2_ctrl_handler *handler = &ctrls->handler;
+
+ if (isi_cap->ctrls.ready)
+ return 0;
+
+ v4l2_ctrl_handler_init(handler, 4);
+
+ ctrls->hflip = v4l2_ctrl_new_std(handler, &mxc_isi_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(handler, &mxc_isi_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ ctrls->alpha = v4l2_ctrl_new_std(handler, &mxc_isi_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT,
+ 0, 0xff, 1, 0);
+
+ if (!handler->error)
+ ctrls->ready = true;
+
+ return handler->error;
+}
+
+void mxc_isi_ctrls_delete(struct mxc_isi_cap_dev *isi_cap)
+{
+ struct mxc_isi_ctrls *ctrls = &isi_cap->ctrls;
+
+ if (ctrls->ready) {
+ v4l2_ctrl_handler_free(&ctrls->handler);
+ ctrls->ready = false;
+ ctrls->alpha = NULL;
+ }
+}
+
+static struct media_pad
+*mxc_isi_get_remote_source_pad(struct v4l2_subdev *subdev)
+{
+ struct media_pad *sink_pad, *source_pad;
+ int i;
+
+ while (1) {
+ source_pad = NULL;
+ for (i = 0; i < subdev->entity.num_pads; i++) {
+ sink_pad = &subdev->entity.pads[i];
+
+ if (sink_pad->flags & MEDIA_PAD_FL_SINK) {
+ source_pad = media_entity_remote_pad(sink_pad);
+ if (source_pad)
+ break;
+ }
+ }
+ /* return first pad point in the loop */
+ return source_pad;
+ }
+
+ if (i == subdev->entity.num_pads)
+ v4l2_err(subdev, "(%d): No remote pad found!\n", __LINE__);
+
+ return NULL;
+}
+
+static struct v4l2_subdev *mxc_get_remote_subdev(struct v4l2_subdev *subdev,
+ const char * const label)
+{
+ struct media_pad *source_pad;
+ struct v4l2_subdev *sen_sd;
+
+ /* Get remote source pad */
+ source_pad = mxc_isi_get_remote_source_pad(subdev);
+ if (!source_pad) {
+ v4l2_err(subdev, "%s, No remote pad found!\n", label);
+ return NULL;
+ }
+
+ /* Get remote source pad subdev */
+ sen_sd = media_entity_to_v4l2_subdev(source_pad->entity);
+ if (!sen_sd) {
+ v4l2_err(subdev, "%s, No remote subdev found!\n", label);
+ return NULL;
+ }
+
+ return sen_sd;
+}
+
+static bool is_entity_link_setup(struct mxc_isi_cap_dev *isi_cap)
+{
+ struct video_device *vdev = &isi_cap->vdev;
+ struct v4l2_subdev *csi_sd, *sen_sd;
+
+ if (!vdev->entity.num_links || !isi_cap->sd.entity.num_links)
+ return false;
+
+ csi_sd = mxc_get_remote_subdev(&isi_cap->sd, __func__);
+ if (!csi_sd || !csi_sd->entity.num_links)
+ return false;
+
+ sen_sd = mxc_get_remote_subdev(csi_sd, __func__);
+ if (!sen_sd || !sen_sd->entity.num_links)
+ return false;
+
+ return true;
+}
+
+static int mxc_isi_capture_open(struct file *file)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_cap->pdev);
+ struct device *dev = &isi_cap->pdev->dev;
+ struct v4l2_subdev *sd;
+ int ret = -EBUSY;
+
+ mutex_lock(&isi_cap->lock);
+ isi_cap->is_link_setup = is_entity_link_setup(isi_cap);
+ if (!isi_cap->is_link_setup) {
+ mutex_unlock(&isi_cap->lock);
+ return 0;
+ }
+ mutex_unlock(&isi_cap->lock);
+
+ if (mxc_isi->m2m_enabled) {
+ dev_err(dev, "ISI channel[%d] is busy\n", isi_cap->id);
+ return ret;
+ }
+
+ sd = mxc_get_remote_subdev(&isi_cap->sd, __func__);
+ if (!sd)
+ return -ENODEV;
+
+ mutex_lock(&isi_cap->lock);
+ ret = v4l2_fh_open(file);
+ if (ret) {
+ mutex_unlock(&isi_cap->lock);
+ return ret;
+ }
+ mutex_unlock(&isi_cap->lock);
+
+ pm_runtime_get_sync(dev);
+
+ ret = v4l2_subdev_call(sd, core, s_power, 1);
+ if (ret) {
+ dev_err(dev, "Call subdev s_power fail!\n");
+ pm_runtime_put(dev);
+ return ret;
+ }
+
+ /* increase usage count for ISI channel */
+ mutex_lock(&mxc_isi->lock);
+ atomic_inc(&mxc_isi->usage_count);
+ mxc_isi->m2m_enabled = false;
+ mutex_unlock(&mxc_isi->lock);
+
+ return 0;
+}
+
+static int mxc_isi_capture_release(struct file *file)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_cap->pdev);
+ struct device *dev = &isi_cap->pdev->dev;
+ struct v4l2_subdev *sd;
+ int ret = -1;
+
+ if (!isi_cap->is_link_setup)
+ return 0;
+
+ sd = mxc_get_remote_subdev(&isi_cap->sd, __func__);
+ if (!sd)
+ goto label;
+
+ mutex_lock(&isi_cap->lock);
+ ret = _vb2_fop_release(file, NULL);
+ if (ret) {
+ dev_err(dev, "%s fail\n", __func__);
+ mutex_unlock(&isi_cap->lock);
+ goto label;
+ }
+ mutex_unlock(&isi_cap->lock);
+
+ if (atomic_read(&mxc_isi->usage_count) > 0 &&
+ atomic_dec_and_test(&mxc_isi->usage_count))
+ mxc_isi_channel_deinit(mxc_isi);
+
+ ret = v4l2_subdev_call(sd, core, s_power, 0);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ dev_err(dev, "%s s_power fail\n", __func__);
+ goto label;
+ }
+
+label:
+ pm_runtime_put(dev);
+ return (ret) ? ret : 0;
+}
+
+static const struct v4l2_file_operations mxc_isi_capture_fops = {
+ .owner = THIS_MODULE,
+ .open = mxc_isi_capture_open,
+ .release = mxc_isi_capture_release,
+ .poll = vb2_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = vb2_fop_mmap,
+};
+
+/*
+ * The video node ioctl operations
+ */
+static int mxc_isi_cap_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+
+ strlcpy(cap->driver, MXC_ISI_CAPTURE, sizeof(cap->driver));
+ strlcpy(cap->card, MXC_ISI_CAPTURE, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s.%d",
+ dev_name(&isi_cap->pdev->dev), isi_cap->id);
+
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int mxc_isi_cap_enum_fmt(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct mxc_isi_fmt *fmt;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+ if (f->index >= (int)ARRAY_SIZE(mxc_isi_out_formats))
+ return -EINVAL;
+
+ fmt = &mxc_isi_out_formats[f->index];
+ if (!fmt)
+ return -EINVAL;
+
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int mxc_isi_cap_g_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mxc_isi_frame *dst_f = &isi_cap->dst_f;
+ int i;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ pix->width = dst_f->o_width;
+ pix->height = dst_f->o_height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = dst_f->fmt->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->num_planes = dst_f->fmt->memplanes;
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ pix->plane_fmt[i].bytesperline = dst_f->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = dst_f->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int mxc_isi_cap_try_fmt_mplane(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mxc_isi_fmt *fmt;
+ int i;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ for (i = 0; i < ARRAY_SIZE(mxc_isi_out_formats); i++) {
+ fmt = &mxc_isi_out_formats[i];
+ if (fmt->fourcc == pix->pixelformat)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(mxc_isi_out_formats)) {
+ v4l2_err(&isi_cap->sd, "format(%.4s) is not support!\n",
+ (char *)&pix->pixelformat);
+ return -EINVAL;
+ }
+
+ if (pix->width <= 0 || pix->height <= 0) {
+ v4l2_err(&isi_cap->sd, "%s, W/H=(%d, %d) is not valid\n"
+ , __func__, pix->width, pix->height);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* Update input frame size and formate */
+static int mxc_isi_source_fmt_init(struct mxc_isi_cap_dev *isi_cap)
+{
+ struct mxc_isi_frame *src_f = &isi_cap->src_f;
+ struct mxc_isi_frame *dst_f = &isi_cap->dst_f;
+ struct v4l2_subdev_format src_fmt;
+ struct media_pad *source_pad;
+ struct v4l2_subdev *src_sd;
+ int ret;
+
+ source_pad = mxc_isi_get_remote_source_pad(&isi_cap->sd);
+ if (!source_pad) {
+ v4l2_err(&isi_cap->sd,
+ "%s, No remote pad found!\n", __func__);
+ return -EINVAL;
+ }
+
+ src_sd = mxc_get_remote_subdev(&isi_cap->sd, __func__);
+ if (!src_sd)
+ return -EINVAL;
+
+ src_fmt.pad = source_pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ src_fmt.format.code = MEDIA_BUS_FMT_UYVY8_2X8;
+ src_fmt.format.width = dst_f->width;
+ src_fmt.format.height = dst_f->height;
+ ret = v4l2_subdev_call(src_sd, pad, set_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ v4l2_err(&isi_cap->sd, "set remote fmt fail!\n");
+ return ret;
+ }
+
+ memset(&src_fmt, 0, sizeof(src_fmt));
+ src_fmt.pad = source_pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(src_sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ v4l2_err(&isi_cap->sd, "get remote fmt fail!\n");
+ return ret;
+ }
+
+ /* Pixel link master will transfer format to RGB32 or YUV32 */
+ src_f->fmt = mxc_isi_get_src_fmt(&src_fmt);
+
+ set_frame_bounds(src_f, src_fmt.format.width, src_fmt.format.height);
+
+ if (dst_f->width > src_f->width || dst_f->height > src_f->height) {
+ dev_err(&isi_cap->pdev->dev,
+ "%s: src:(%d,%d), dst:(%d,%d) Not support upscale\n",
+ __func__,
+ src_f->width, src_f->height,
+ dst_f->width, dst_f->height);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mxc_isi_cap_s_fmt_mplane(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mxc_isi_frame *dst_f = &isi_cap->dst_f;
+ struct mxc_isi_fmt *fmt;
+ int bpl;
+ int i;
+
+ /* Step1: Check format with output support format list.
+ * Step2: Update output frame information.
+ * Step3: Checkout the format whether is supported by remote subdev
+ * Step3.1: If Yes, call remote subdev set_fmt.
+ * Step3.2: If NO, call remote subdev get_fmt.
+ * Step4: Update input frame information.
+ * Step5: Update mxc isi channel configuration.
+ */
+
+ dev_dbg(&isi_cap->pdev->dev, "%s, fmt=0x%X\n", __func__, pix->pixelformat);
+ if (vb2_is_busy(&isi_cap->vb2_q))
+ return -EBUSY;
+
+ /* Check out put format */
+ for (i = 0; i < ARRAY_SIZE(mxc_isi_out_formats); i++) {
+ fmt = &mxc_isi_out_formats[i];
+ if (pix && fmt->fourcc == pix->pixelformat)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(mxc_isi_out_formats)) {
+ dev_dbg(&isi_cap->pdev->dev,
+ "format(%.4s) is not support!\n", (char *)&pix->pixelformat);
+ return -EINVAL;
+ }
+
+ /* update out put frame size and formate */
+ if (pix->height <= 0 || pix->width <= 0)
+ return -EINVAL;
+
+ dst_f->fmt = fmt;
+ dst_f->height = pix->height;
+ dst_f->width = pix->width;
+
+ pix->num_planes = fmt->memplanes;
+
+ for (i = 0; i < pix->num_planes; i++) {
+ bpl = pix->plane_fmt[i].bytesperline;
+
+ if ((bpl == 0) || (bpl / (fmt->depth[i] >> 3)) < pix->width)
+ pix->plane_fmt[i].bytesperline =
+ (pix->width * fmt->depth[i]) >> 3;
+
+ if (pix->plane_fmt[i].sizeimage == 0) {
+ if ((i == 1) && (pix->pixelformat == V4L2_PIX_FMT_NV12))
+ pix->plane_fmt[i].sizeimage =
+ (pix->width * (pix->height >> 1) * fmt->depth[i] >> 3);
+ else
+ pix->plane_fmt[i].sizeimage =
+ (pix->width * pix->height * fmt->depth[i] >> 3);
+ }
+ }
+
+ if (pix->num_planes > 1) {
+ for (i = 0; i < pix->num_planes; i++) {
+ dst_f->bytesperline[i] = pix->plane_fmt[i].bytesperline;
+ dst_f->sizeimage[i] = pix->plane_fmt[i].sizeimage;
+ }
+ } else {
+ dst_f->bytesperline[0] = dst_f->width * dst_f->fmt->depth[0] / 8;
+ dst_f->sizeimage[0] = dst_f->height * dst_f->bytesperline[0];
+ }
+
+ memcpy(&isi_cap->pix, pix, sizeof(*pix));
+ set_frame_bounds(dst_f, pix->width, pix->height);
+
+ return 0;
+}
+
+static int mxc_isi_config_parm(struct mxc_isi_cap_dev *isi_cap)
+{
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_cap->pdev);
+ int ret;
+
+ ret = mxc_isi_source_fmt_init(isi_cap);
+ if (ret < 0)
+ return -EINVAL;
+
+ mxc_isi_channel_init(mxc_isi);
+ mxc_isi_channel_config(mxc_isi, &isi_cap->src_f, &isi_cap->dst_f);
+
+ return 0;
+}
+
+static int mxc_isi_cap_g_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct v4l2_subdev *sd;
+
+ sd = mxc_get_remote_subdev(&isi_cap->sd, __func__);
+ if (!sd)
+ return -ENODEV;
+
+ return v4l2_g_parm_cap(video_devdata(file), sd, a);
+}
+
+static int mxc_isi_cap_s_parm(struct file *file, void *fh,
+ struct v4l2_streamparm *a)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct v4l2_subdev *sd;
+
+ sd = mxc_get_remote_subdev(&isi_cap->sd, __func__);
+ if (!sd)
+ return -ENODEV;
+
+ return v4l2_s_parm_cap(video_devdata(file), sd, a);
+}
+
+
+static int mxc_isi_cap_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_cap->pdev);
+ int ret;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ ret = mxc_isi_config_parm(isi_cap);
+ if (ret < 0)
+ return ret;
+
+ ret = vb2_ioctl_streamon(file, priv, type);
+ mxc_isi_channel_enable(mxc_isi, mxc_isi->m2m_enabled);
+ ret = mxc_isi_pipeline_enable(isi_cap, 1);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ mxc_isi->is_streaming = 1;
+
+ return 0;
+}
+
+static int mxc_isi_cap_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_cap->pdev);
+ int ret;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ mxc_isi_pipeline_enable(isi_cap, 0);
+ mxc_isi_channel_disable(mxc_isi);
+ ret = vb2_ioctl_streamoff(file, priv, type);
+
+ mxc_isi->is_streaming = 0;
+
+ return ret;
+}
+
+static int mxc_isi_cap_g_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct mxc_isi_frame *f = &isi_cap->src_f;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ switch (s->target) {
+ case V4L2_SEL_TGT_COMPOSE_DEFAULT:
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ f = &isi_cap->dst_f;
+ /* fall through */
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ case V4L2_SEL_TGT_CROP_DEFAULT:
+ s->r.left = 0;
+ s->r.top = 0;
+ s->r.width = f->o_width;
+ s->r.height = f->o_height;
+ return 0;
+
+ case V4L2_SEL_TGT_COMPOSE:
+ f = &isi_cap->dst_f;
+ /* fall through */
+ case V4L2_SEL_TGT_CROP:
+ s->r.left = f->h_off;
+ s->r.top = f->v_off;
+ s->r.width = f->width;
+ s->r.height = f->height;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b)
+{
+ if (a->left < b->left || a->top < b->top)
+ return 0;
+
+ if (a->left + a->width > b->left + b->width)
+ return 0;
+
+ if (a->top + a->height > b->top + b->height)
+ return 0;
+
+ return 1;
+}
+
+static int mxc_isi_cap_s_selection(struct file *file, void *fh,
+ struct v4l2_selection *s)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct mxc_isi_frame *f;
+ struct v4l2_rect rect = s->r;
+ unsigned long flags;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+ if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ if (s->target == V4L2_SEL_TGT_COMPOSE)
+ f = &isi_cap->dst_f;
+ else if (s->target == V4L2_SEL_TGT_CROP)
+ f = &isi_cap->src_f;
+ else
+ return -EINVAL;
+
+ if (s->flags & V4L2_SEL_FLAG_LE &&
+ !enclosed_rectangle(&rect, &s->r))
+ return -ERANGE;
+
+ if (s->flags & V4L2_SEL_FLAG_GE &&
+ !enclosed_rectangle(&s->r, &rect))
+ return -ERANGE;
+
+ s->r = rect;
+ spin_lock_irqsave(&isi_cap->slock, flags);
+ set_frame_crop(f, s->r.left, s->r.top, s->r.width,
+ s->r.height);
+ spin_unlock_irqrestore(&isi_cap->slock, flags);
+
+ return 0;
+}
+
+static int mxc_isi_cap_enum_framesizes(struct file *file, void *priv,
+ struct v4l2_frmsizeenum *fsize)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct device_node *parent;
+ struct v4l2_subdev *sd;
+ struct mxc_isi_fmt *fmt;
+ struct v4l2_subdev_frame_size_enum fse = {
+ .index = fsize->index,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ fmt = mxc_isi_find_format(&fsize->pixel_format, NULL, 0);
+ if (!fmt || fmt->fourcc != fsize->pixel_format)
+ return -EINVAL;
+ fse.code = fmt->mbus_code;
+
+ sd = mxc_get_remote_subdev(&isi_cap->sd, __func__);
+ if (!sd) {
+ v4l2_err(&isi_cap->sd, "Can't find subdev\n");
+ return -ENODEV;
+ }
+
+ ret = v4l2_subdev_call(sd, pad, enum_frame_size, NULL, &fse);
+ if (ret)
+ return ret;
+
+ parent = of_get_parent(isi_cap->pdev->dev.of_node);
+ if ((of_device_is_compatible(parent, "fsl,imx8mp-isi")) &&
+ (fse.max_width > ISI_2K || fse.min_width > ISI_2K) &&
+ (isi_cap->id == 1))
+ return -EINVAL;
+
+ if (fse.min_width == fse.max_width &&
+ fse.min_height == fse.max_height) {
+ fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
+ fsize->discrete.width = fse.min_width;
+ fsize->discrete.height = fse.min_height;
+ return 0;
+ }
+
+ fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = fse.min_width;
+ fsize->stepwise.max_width = fse.max_width;
+ fsize->stepwise.min_height = fse.min_height;
+ fsize->stepwise.max_height = fse.max_height;
+ fsize->stepwise.step_width = 1;
+ fsize->stepwise.step_height = 1;
+
+ return 0;
+}
+
+static int mxc_isi_cap_enum_frameintervals(struct file *file, void *fh,
+ struct v4l2_frmivalenum *interval)
+{
+ struct mxc_isi_cap_dev *isi_cap = video_drvdata(file);
+ struct device_node *parent;
+ struct v4l2_subdev *sd;
+ struct mxc_isi_fmt *fmt;
+ struct v4l2_subdev_frame_interval_enum fie = {
+ .index = interval->index,
+ .width = interval->width,
+ .height = interval->height,
+ .which = V4L2_SUBDEV_FORMAT_ACTIVE,
+ };
+ int ret;
+
+ fmt = mxc_isi_find_format(&interval->pixel_format, NULL, 0);
+ if (!fmt || fmt->fourcc != interval->pixel_format)
+ return -EINVAL;
+ fie.code = fmt->mbus_code;
+
+ sd = mxc_get_remote_subdev(&isi_cap->sd, __func__);
+ if (!sd)
+ return -EINVAL;
+
+ ret = v4l2_subdev_call(sd, pad, enum_frame_interval, NULL, &fie);
+ if (ret)
+ return ret;
+
+ parent = of_get_parent(isi_cap->pdev->dev.of_node);
+ if (of_device_is_compatible(parent, "fsl,imx8mp-isi") &&
+ fie.width > ISI_2K && isi_cap->id == 1)
+ return -EINVAL;
+
+ interval->type = V4L2_FRMIVAL_TYPE_DISCRETE;
+ interval->discrete = fie.interval;
+
+ return 0;
+}
+
+static const struct v4l2_ioctl_ops mxc_isi_capture_ioctl_ops = {
+ .vidioc_querycap = mxc_isi_cap_querycap,
+
+ .vidioc_enum_fmt_vid_cap = mxc_isi_cap_enum_fmt,
+ .vidioc_try_fmt_vid_cap_mplane = mxc_isi_cap_try_fmt_mplane,
+ .vidioc_s_fmt_vid_cap_mplane = mxc_isi_cap_s_fmt_mplane,
+ .vidioc_g_fmt_vid_cap_mplane = mxc_isi_cap_g_fmt_mplane,
+
+ .vidioc_reqbufs = vb2_ioctl_reqbufs,
+ .vidioc_querybuf = vb2_ioctl_querybuf,
+ .vidioc_qbuf = vb2_ioctl_qbuf,
+ .vidioc_dqbuf = vb2_ioctl_dqbuf,
+ .vidioc_expbuf = vb2_ioctl_expbuf,
+ .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
+ .vidioc_create_bufs = vb2_ioctl_create_bufs,
+
+ .vidioc_g_parm = mxc_isi_cap_g_parm,
+ .vidioc_s_parm = mxc_isi_cap_s_parm,
+
+ .vidioc_streamon = mxc_isi_cap_streamon,
+ .vidioc_streamoff = mxc_isi_cap_streamoff,
+
+ .vidioc_g_selection = mxc_isi_cap_g_selection,
+ .vidioc_s_selection = mxc_isi_cap_s_selection,
+
+ .vidioc_enum_framesizes = mxc_isi_cap_enum_framesizes,
+ .vidioc_enum_frameintervals = mxc_isi_cap_enum_frameintervals,
+};
+
+/* Capture subdev media entity operations */
+static int mxc_isi_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct mxc_isi_cap_dev *isi_cap = v4l2_get_subdevdata(sd);
+
+ if (WARN_ON(!isi_cap))
+ return 0;
+
+ if (!(flags & MEDIA_LNK_FL_ENABLED))
+ return 0;
+
+ /* Add ISI source and sink pad link configuration */
+ if (local->flags & MEDIA_PAD_FL_SOURCE) {
+ switch (local->index) {
+ case MXC_ISI_SD_PAD_SOURCE_DC0:
+ case MXC_ISI_SD_PAD_SOURCE_DC1:
+ break;
+ case MXC_ISI_SD_PAD_SOURCE_MEM:
+ break;
+ default:
+ dev_err(&isi_cap->pdev->dev, "invalid source pad\n");
+ return -EINVAL;
+ }
+ } else if (local->flags & MEDIA_PAD_FL_SINK) {
+ switch (local->index) {
+ case MXC_ISI_SD_PAD_SINK_MIPI0_VC0:
+ case MXC_ISI_SD_PAD_SINK_MIPI0_VC1:
+ case MXC_ISI_SD_PAD_SINK_MIPI0_VC2:
+ case MXC_ISI_SD_PAD_SINK_MIPI0_VC3:
+ case MXC_ISI_SD_PAD_SINK_MIPI1_VC0:
+ case MXC_ISI_SD_PAD_SINK_MIPI1_VC1:
+ case MXC_ISI_SD_PAD_SINK_MIPI1_VC2:
+ case MXC_ISI_SD_PAD_SINK_MIPI1_VC3:
+ case MXC_ISI_SD_PAD_SINK_HDMI:
+ case MXC_ISI_SD_PAD_SINK_DC0:
+ case MXC_ISI_SD_PAD_SINK_DC1:
+ case MXC_ISI_SD_PAD_SINK_MEM:
+ case MXC_ISI_SD_PAD_SINK_PARALLEL_CSI:
+ break;
+ default:
+ dev_err(&isi_cap->pdev->dev,
+ "%s invalid sink pad\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static const struct media_entity_operations mxc_isi_sd_media_ops = {
+ .link_setup = mxc_isi_link_setup,
+};
+
+static int mxc_isi_subdev_enum_mbus_code(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_mbus_code_enum *code)
+{
+ return 0;
+}
+
+static int mxc_isi_subdev_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mxc_isi_cap_dev *isi_cap = v4l2_get_subdevdata(sd);
+ struct mxc_isi_frame *f;
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ mutex_lock(&isi_cap->lock);
+
+ switch (fmt->pad) {
+ case MXC_ISI_SD_PAD_SOURCE_MEM:
+ case MXC_ISI_SD_PAD_SOURCE_DC0:
+ case MXC_ISI_SD_PAD_SOURCE_DC1:
+ f = &isi_cap->dst_f;
+ break;
+ case MXC_ISI_SD_PAD_SINK_MIPI0_VC0:
+ case MXC_ISI_SD_PAD_SINK_MIPI0_VC1:
+ case MXC_ISI_SD_PAD_SINK_MIPI0_VC2:
+ case MXC_ISI_SD_PAD_SINK_MIPI0_VC3:
+ case MXC_ISI_SD_PAD_SINK_MIPI1_VC0:
+ case MXC_ISI_SD_PAD_SINK_MIPI1_VC1:
+ case MXC_ISI_SD_PAD_SINK_MIPI1_VC2:
+ case MXC_ISI_SD_PAD_SINK_MIPI1_VC3:
+ case MXC_ISI_SD_PAD_SINK_HDMI:
+ case MXC_ISI_SD_PAD_SINK_DC0:
+ case MXC_ISI_SD_PAD_SINK_DC1:
+ case MXC_ISI_SD_PAD_SINK_MEM:
+ f = &isi_cap->src_f;
+ break;
+ default:
+ mutex_unlock(&isi_cap->lock);
+ v4l2_err(&isi_cap->sd,
+ "%s, Pad is not support now!\n", __func__);
+ return -1;
+ }
+
+ if (!WARN_ON(!f->fmt))
+ mf->code = f->fmt->mbus_code;
+
+ /* Source/Sink pads crop rectangle size */
+ mf->width = f->width;
+ mf->height = f->height;
+ mf->colorspace = V4L2_COLORSPACE_JPEG;
+
+ mutex_unlock(&isi_cap->lock);
+
+ return 0;
+}
+
+static int mxc_isi_subdev_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mxc_isi_cap_dev *isi_cap = v4l2_get_subdevdata(sd);
+ struct device_node *parent;
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+ struct mxc_isi_frame *dst_f = &isi_cap->dst_f;
+ struct mxc_isi_fmt *out_fmt;
+ int i;
+
+ if (fmt->pad < MXC_ISI_SD_PAD_SOURCE_MEM &&
+ vb2_is_busy(&isi_cap->vb2_q))
+ return -EBUSY;
+
+ for (i = 0; i < ARRAY_SIZE(mxc_isi_out_formats); i++) {
+ out_fmt = &mxc_isi_out_formats[i];
+ if (mf->code == out_fmt->mbus_code)
+ break;
+ }
+ if (i >= ARRAY_SIZE(mxc_isi_out_formats)) {
+ v4l2_err(&isi_cap->sd,
+ "%s, format is not support!\n", __func__);
+ return -EINVAL;
+ }
+
+ parent = of_get_parent(isi_cap->pdev->dev.of_node);
+ if (of_device_is_compatible(parent, "fsl,imx8mn-isi") &&
+ mf->width > ISI_2K)
+ return -EINVAL;
+
+ mutex_lock(&isi_cap->lock);
+ /* update out put frame size and formate */
+ dst_f->fmt = &mxc_isi_out_formats[i];
+ set_frame_bounds(dst_f, mf->width, mf->height);
+ mutex_unlock(&isi_cap->lock);
+
+ dev_dbg(&isi_cap->pdev->dev, "pad%d: code: 0x%x, %dx%d",
+ fmt->pad, mf->code, mf->width, mf->height);
+
+ return 0;
+}
+
+static int mxc_isi_subdev_get_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct mxc_isi_cap_dev *isi_cap = v4l2_get_subdevdata(sd);
+ struct mxc_isi_frame *f = &isi_cap->src_f;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
+
+ mutex_lock(&isi_cap->lock);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_COMPOSE_BOUNDS:
+ f = &isi_cap->dst_f;
+ /* fall through */
+ case V4L2_SEL_TGT_CROP_BOUNDS:
+ r->width = f->o_width;
+ r->height = f->o_height;
+ r->left = 0;
+ r->top = 0;
+ mutex_unlock(&isi_cap->lock);
+ return 0;
+
+ case V4L2_SEL_TGT_CROP:
+ try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ f = &isi_cap->dst_f;
+ break;
+ default:
+ mutex_unlock(&isi_cap->lock);
+ return -EINVAL;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ sel->r = *try_sel;
+ } else {
+ r->left = f->h_off;
+ r->top = f->v_off;
+ r->width = f->width;
+ r->height = f->height;
+ }
+
+ dev_dbg(&isi_cap->pdev->dev,
+ "%s, target %#x: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d",
+ __func__, sel->pad, r->left, r->top, r->width, r->height,
+ f->c_width, f->c_height);
+
+ mutex_unlock(&isi_cap->lock);
+ return 0;
+}
+
+static int mxc_isi_subdev_set_selection(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_selection *sel)
+{
+ struct mxc_isi_cap_dev *isi_cap = v4l2_get_subdevdata(sd);
+ struct mxc_isi_frame *f = &isi_cap->src_f;
+ struct v4l2_rect *r = &sel->r;
+ struct v4l2_rect *try_sel;
+ unsigned long flags;
+
+ mutex_lock(&isi_cap->lock);
+
+ switch (sel->target) {
+ case V4L2_SEL_TGT_CROP:
+ try_sel = v4l2_subdev_get_try_crop(sd, cfg, sel->pad);
+ break;
+ case V4L2_SEL_TGT_COMPOSE:
+ try_sel = v4l2_subdev_get_try_compose(sd, cfg, sel->pad);
+ f = &isi_cap->dst_f;
+ break;
+ default:
+ mutex_unlock(&isi_cap->lock);
+ return -EINVAL;
+ }
+
+ if (sel->which == V4L2_SUBDEV_FORMAT_TRY) {
+ *try_sel = sel->r;
+ } else {
+ spin_lock_irqsave(&isi_cap->slock, flags);
+ set_frame_crop(f, r->left, r->top, r->width, r->height);
+ spin_unlock_irqrestore(&isi_cap->slock, flags);
+ }
+
+ dev_dbg(&isi_cap->pdev->dev, "%s, target %#x: (%d,%d)/%dx%d", __func__,
+ sel->target, r->left, r->top, r->width, r->height);
+
+ mutex_unlock(&isi_cap->lock);
+
+ return 0;
+}
+
+static struct v4l2_subdev_pad_ops mxc_isi_subdev_pad_ops = {
+ .enum_mbus_code = mxc_isi_subdev_enum_mbus_code,
+ .get_selection = mxc_isi_subdev_get_selection,
+ .set_selection = mxc_isi_subdev_set_selection,
+ .get_fmt = mxc_isi_subdev_get_fmt,
+ .set_fmt = mxc_isi_subdev_set_fmt,
+};
+
+static struct v4l2_subdev_ops mxc_isi_subdev_ops = {
+ .pad = &mxc_isi_subdev_pad_ops,
+};
+
+static int mxc_isi_register_cap_device(struct mxc_isi_cap_dev *isi_cap,
+ struct v4l2_device *v4l2_dev)
+{
+ struct video_device *vdev = &isi_cap->vdev;
+ struct vb2_queue *q = &isi_cap->vb2_q;
+ int ret = -ENOMEM;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+ memset(vdev, 0, sizeof(*vdev));
+ snprintf(vdev->name, sizeof(vdev->name), "mxc_isi.%d.capture", isi_cap->id);
+
+ vdev->fops = &mxc_isi_capture_fops;
+ vdev->ioctl_ops = &mxc_isi_capture_ioctl_ops;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->minor = -1;
+ vdev->release = video_device_release_empty;
+ vdev->queue = q;
+ vdev->lock = &isi_cap->lock;
+
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE;
+ video_set_drvdata(vdev, isi_cap);
+
+ INIT_LIST_HEAD(&isi_cap->out_pending);
+ INIT_LIST_HEAD(&isi_cap->out_active);
+ INIT_LIST_HEAD(&isi_cap->out_discard);
+
+ memset(q, 0, sizeof(*q));
+ q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ q->drv_priv = isi_cap;
+ q->ops = &mxc_cap_vb2_qops;
+ q->mem_ops = &vb2_dma_contig_memops;
+ q->buf_struct_size = sizeof(struct mxc_isi_buffer);
+ q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
+ q->lock = &isi_cap->lock;
+
+ ret = vb2_queue_init(q);
+ if (ret)
+ goto err_free_ctx;
+
+ /* Default configuration */
+ isi_cap->dst_f.width = 1280;
+ isi_cap->dst_f.height = 800;
+ isi_cap->dst_f.fmt = &mxc_isi_out_formats[0];
+ isi_cap->src_f.fmt = isi_cap->dst_f.fmt;
+
+ isi_cap->cap_pad.flags = MEDIA_PAD_FL_SINK;
+ vdev->entity.function = MEDIA_ENT_F_PROC_VIDEO_SCALER;
+ ret = media_entity_pads_init(&vdev->entity, 1, &isi_cap->cap_pad);
+ if (ret)
+ goto err_free_ctx;
+
+ ret = mxc_isi_ctrls_create(isi_cap);
+ if (ret)
+ goto err_me_cleanup;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret)
+ goto err_ctrl_free;
+
+ vdev->ctrl_handler = &isi_cap->ctrls.handler;
+ v4l2_info(v4l2_dev, "Registered %s as /dev/%s\n",
+ vdev->name, video_device_node_name(vdev));
+
+ return 0;
+
+err_ctrl_free:
+ mxc_isi_ctrls_delete(isi_cap);
+err_me_cleanup:
+ media_entity_cleanup(&vdev->entity);
+err_free_ctx:
+ return ret;
+}
+
+static int mxc_isi_subdev_registered(struct v4l2_subdev *sd)
+{
+ struct mxc_isi_cap_dev *isi_cap = sd_to_cap_dev(sd);
+ int ret;
+
+ if (!isi_cap)
+ return -ENXIO;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ ret = mxc_isi_register_cap_device(isi_cap, sd->v4l2_dev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void mxc_isi_subdev_unregistered(struct v4l2_subdev *sd)
+{
+ struct mxc_isi_cap_dev *isi_cap = v4l2_get_subdevdata(sd);
+ struct video_device *vdev;
+
+ if (!isi_cap)
+ return;
+
+ dev_dbg(&isi_cap->pdev->dev, "%s\n", __func__);
+
+ mutex_lock(&isi_cap->lock);
+ vdev = &isi_cap->vdev;
+ if (video_is_registered(vdev)) {
+ video_unregister_device(vdev);
+ mxc_isi_ctrls_delete(isi_cap);
+ media_entity_cleanup(&vdev->entity);
+ }
+ mutex_unlock(&isi_cap->lock);
+}
+
+static const struct v4l2_subdev_internal_ops mxc_isi_capture_sd_internal_ops = {
+ .registered = mxc_isi_subdev_registered,
+ .unregistered = mxc_isi_subdev_unregistered,
+};
+
+static int isi_cap_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxc_isi_dev *mxc_isi;
+ struct mxc_isi_cap_dev *isi_cap;
+ struct v4l2_subdev *sd;
+ int ret;
+
+ isi_cap = devm_kzalloc(dev, sizeof(*isi_cap), GFP_KERNEL);
+ if (!isi_cap)
+ return -ENOMEM;
+
+ dev->parent = mxc_isi_dev_get_parent(pdev);
+ if (!dev->parent) {
+ dev_info(dev, "deferring %s device registration\n", dev_name(dev));
+ return -EPROBE_DEFER;
+ }
+
+ mxc_isi = mxc_isi_get_hostdata(pdev);
+ if (!mxc_isi) {
+ dev_info(dev, "deferring %s device registration\n", dev_name(dev));
+ return -EPROBE_DEFER;
+ }
+
+ isi_cap->pdev = pdev;
+ isi_cap->id = mxc_isi->id;
+ mxc_isi->isi_cap = isi_cap;
+
+ spin_lock_init(&isi_cap->slock);
+ mutex_init(&isi_cap->lock);
+
+ sd = &isi_cap->sd;
+ v4l2_subdev_init(sd, &mxc_isi_subdev_ops);
+ snprintf(sd->name, sizeof(sd->name), "mxc_isi.%d", isi_cap->id);
+
+ sd->entity.function = MEDIA_ENT_F_PROC_VIDEO_PIXEL_FORMATTER;
+
+ /* ISI Sink pads */
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_MIPI0_VC0].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_MIPI0_VC1].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_MIPI0_VC2].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_MIPI0_VC3].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_MIPI1_VC0].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_MIPI1_VC1].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_MIPI1_VC2].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_MIPI1_VC3].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_DC0].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_DC1].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_HDMI].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_MEM].flags = MEDIA_PAD_FL_SINK;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SINK_PARALLEL_CSI].flags = MEDIA_PAD_FL_SINK;
+
+ /* ISI source pads */
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SOURCE_MEM].flags = MEDIA_PAD_FL_SOURCE;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SOURCE_DC0].flags = MEDIA_PAD_FL_SOURCE;
+ isi_cap->sd_pads[MXC_ISI_SD_PAD_SOURCE_DC1].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&sd->entity, MXC_ISI_SD_PADS_NUM, isi_cap->sd_pads);
+ if (ret)
+ return ret;
+
+ sd->entity.ops = &mxc_isi_sd_media_ops;
+ sd->internal_ops = &mxc_isi_capture_sd_internal_ops;
+
+ v4l2_set_subdevdata(sd, isi_cap);
+ platform_set_drvdata(pdev, isi_cap);
+
+ pm_runtime_enable(dev);
+ return 0;
+}
+
+static int isi_cap_remove(struct platform_device *pdev)
+{
+ struct mxc_isi_cap_dev *isi_cap = platform_get_drvdata(pdev);
+ struct v4l2_subdev *sd = &isi_cap->sd;
+
+ v4l2_device_unregister_subdev(sd);
+ media_entity_cleanup(&sd->entity);
+ v4l2_set_subdevdata(sd, NULL);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id isi_cap_of_match[] = {
+ {.compatible = "imx-isi-capture",},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, isi_cap_of_match);
+
+static struct platform_driver isi_cap_driver = {
+ .probe = isi_cap_probe,
+ .remove = isi_cap_remove,
+ .driver = {
+ .of_match_table = isi_cap_of_match,
+ .name = "isi-capture",
+ },
+};
+module_platform_driver(isi_cap_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("IMX8 Image Sensor Interface Capture driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ISI Capture");
+MODULE_VERSION("1.0");
diff --git a/drivers/staging/media/imx/imx8-isi-core.c b/drivers/staging/media/imx/imx8-isi-core.c
new file mode 100644
index 000000000000..bd7381d7bcaa
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-isi-core.c
@@ -0,0 +1,621 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019-2020 NXP
+ *
+ */
+
+#include "imx8-isi-hw.h"
+
+static const struct soc_device_attribute imx8_soc[] = {
+ {
+ .soc_id = "i.MX8QXP",
+ .revision = "1.0",
+ }, {
+ .soc_id = "i.MX8QXP",
+ .revision = "1.1",
+ }, {
+ .soc_id = "i.MX8QXP",
+ .revision = "1.2",
+ }, {
+ .soc_id = "i.MX8QM",
+ .revision = "1.0",
+ }, {
+ .soc_id = "i.MX8QM",
+ .revision = "1.1",
+ }, {
+ .soc_id = "i.MX8MN",
+ .revision = "1.0",
+ }, {
+ .soc_id = "i.MX8MP",
+ },
+};
+
+static const struct of_device_id mxc_isi_of_match[];
+
+static irqreturn_t mxc_isi_irq_handler(int irq, void *priv)
+{
+ struct mxc_isi_dev *mxc_isi = priv;
+ struct device *dev = &mxc_isi->pdev->dev;
+ struct mxc_isi_ier_reg *ier_reg = mxc_isi->pdata->ier_reg;
+ unsigned long flags;
+ u32 status;
+
+ spin_lock_irqsave(&mxc_isi->slock, flags);
+
+ status = mxc_isi_get_irq_status(mxc_isi);
+ mxc_isi->status = status;
+ mxc_isi_clean_irq_status(mxc_isi, status);
+
+ if (status & CHNL_STS_FRM_STRD_MASK) {
+ if (mxc_isi->m2m_enabled)
+ mxc_isi_m2m_frame_write_done(mxc_isi);
+ else
+ mxc_isi_cap_frame_write_done(mxc_isi);
+ }
+
+ if (status & (CHNL_STS_AXI_WR_ERR_Y_MASK |
+ CHNL_STS_AXI_WR_ERR_U_MASK |
+ CHNL_STS_AXI_WR_ERR_V_MASK))
+ dev_dbg(dev, "%s, IRQ AXI Error stat=0x%X\n", __func__, status);
+
+ if (status & (ier_reg->panic_y_buf_en.mask |
+ ier_reg->panic_u_buf_en.mask |
+ ier_reg->panic_v_buf_en.mask))
+ dev_dbg(dev, "%s, IRQ Panic OFLW Error stat=0x%X\n", __func__, status);
+
+ if (status & (ier_reg->oflw_y_buf_en.mask |
+ ier_reg->oflw_u_buf_en.mask |
+ ier_reg->oflw_v_buf_en.mask))
+ dev_dbg(dev, "%s, IRQ OFLW Error stat=0x%X\n", __func__, status);
+
+ if (status & (ier_reg->excs_oflw_y_buf_en.mask |
+ ier_reg->excs_oflw_u_buf_en.mask |
+ ier_reg->excs_oflw_v_buf_en.mask))
+ dev_dbg(dev, "%s, IRQ EXCS OFLW Error stat=0x%X\n", __func__, status);
+
+ spin_unlock_irqrestore(&mxc_isi->slock, flags);
+ return IRQ_HANDLED;
+}
+
+static int disp_mix_sft_rstn(struct reset_control *reset, bool enable)
+{
+ int ret;
+
+ if (!reset)
+ return 0;
+
+ ret = enable ? reset_control_assert(reset) :
+ reset_control_deassert(reset);
+ return ret;
+}
+
+static int disp_mix_clks_enable(struct reset_control *reset, bool enable)
+{
+ int ret;
+
+ if (!reset)
+ return 0;
+
+ ret = enable ? reset_control_assert(reset) :
+ reset_control_deassert(reset);
+ return ret;
+}
+
+static int mxc_imx8_clk_get(struct mxc_isi_dev *mxc_isi)
+{
+ struct device *dev = &mxc_isi->pdev->dev;
+
+ mxc_isi->clk = devm_clk_get(dev, NULL);
+
+ if (IS_ERR(mxc_isi->clk)) {
+ dev_err(dev, "failed to get isi clk\n");
+ return PTR_ERR(mxc_isi->clk);
+ }
+
+ return 0;
+}
+
+static int mxc_imx8_clk_enable(struct mxc_isi_dev *mxc_isi)
+{
+ struct device *dev = &mxc_isi->pdev->dev;
+ int ret;
+
+ ret = clk_prepare_enable(mxc_isi->clk);
+ if (ret < 0) {
+ dev_err(dev, "%s, enable clk error\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mxc_imx8_clk_disable(struct mxc_isi_dev *mxc_isi)
+{
+ clk_disable_unprepare(mxc_isi->clk);
+}
+
+static struct mxc_isi_dev_ops mxc_imx8_clk_ops = {
+ .clk_get = mxc_imx8_clk_get,
+ .clk_enable = mxc_imx8_clk_enable,
+ .clk_disable = mxc_imx8_clk_disable,
+};
+
+static struct mxc_isi_chan_src mxc_imx8_chan_src = {
+ .src_dc0 = 0,
+ .src_dc1 = 1,
+ .src_mipi0 = 2,
+ .src_mipi1 = 3,
+ .src_hdmi = 4,
+ .src_csi = 4,
+ .src_mem = 5,
+};
+
+/* For i.MX8QM/QXP B0 ISI IER version */
+static struct mxc_isi_ier_reg mxc_imx8_isi_ier_v0 = {
+ .oflw_y_buf_en = { .offset = 16, .mask = 0x10000 },
+ .oflw_u_buf_en = { .offset = 19, .mask = 0x80000 },
+ .oflw_v_buf_en = { .offset = 22, .mask = 0x400000 },
+
+ .excs_oflw_y_buf_en = { .offset = 17, .mask = 0x20000 },
+ .excs_oflw_u_buf_en = { .offset = 20, .mask = 0x100000 },
+ .excs_oflw_v_buf_en = { .offset = 23, .mask = 0x800000 },
+
+ .panic_y_buf_en = {.offset = 18, .mask = 0x40000 },
+ .panic_u_buf_en = {.offset = 21, .mask = 0x200000 },
+ .panic_v_buf_en = {.offset = 24, .mask = 0x1000000 },
+};
+
+/* Panic will assert when the buffers are 50% full */
+static struct mxc_isi_set_thd mxc_imx8_isi_thd_v0 = {
+ .panic_set_thd_y = { .mask = 0x03, .offset = 0, .threshold = 0x2 },
+ .panic_set_thd_u = { .mask = 0x18, .offset = 3, .threshold = 0x2 },
+ .panic_set_thd_v = { .mask = 0xC0, .offset = 6, .threshold = 0x2 },
+};
+
+/* For i.MX8QXP C0 and i.MX8MN ISI IER version */
+static struct mxc_isi_ier_reg mxc_imx8_isi_ier_v1 = {
+ .oflw_y_buf_en = { .offset = 19, .mask = 0x80000 },
+ .oflw_u_buf_en = { .offset = 21, .mask = 0x200000 },
+ .oflw_v_buf_en = { .offset = 23, .mask = 0x800000 },
+
+ .panic_y_buf_en = {.offset = 20, .mask = 0x100000 },
+ .panic_u_buf_en = {.offset = 22, .mask = 0x400000 },
+ .panic_v_buf_en = {.offset = 24, .mask = 0x1000000 },
+};
+
+/* For i.MX8MP ISI IER version */
+static struct mxc_isi_ier_reg mxc_imx8_isi_ier_v2 = {
+ .oflw_y_buf_en = { .offset = 18, .mask = 0x40000 },
+ .oflw_u_buf_en = { .offset = 20, .mask = 0x100000 },
+ .oflw_v_buf_en = { .offset = 22, .mask = 0x400000 },
+
+ .panic_y_buf_en = {.offset = 19, .mask = 0x80000 },
+ .panic_u_buf_en = {.offset = 21, .mask = 0x200000 },
+ .panic_v_buf_en = {.offset = 23, .mask = 0x800000 },
+};
+
+/* Panic will assert when the buffers are 50% full */
+static struct mxc_isi_set_thd mxc_imx8_isi_thd_v1 = {
+ .panic_set_thd_y = { .mask = 0x0000F, .offset = 0, .threshold = 0x7 },
+ .panic_set_thd_u = { .mask = 0x00F00, .offset = 8, .threshold = 0x7 },
+ .panic_set_thd_v = { .mask = 0xF0000, .offset = 16, .threshold = 0x7 },
+};
+
+static struct mxc_isi_plat_data mxc_imx8_data = {
+ .ops = &mxc_imx8_clk_ops,
+ .chan_src = &mxc_imx8_chan_src,
+ .ier_reg = &mxc_imx8_isi_ier_v0,
+ .set_thd = &mxc_imx8_isi_thd_v0,
+};
+
+static int mxc_imx8mn_clk_get(struct mxc_isi_dev *mxc_isi)
+{
+ struct device *dev = &mxc_isi->pdev->dev;
+
+ mxc_isi->clk_disp_axi = devm_clk_get(dev, "disp_axi");
+ if (IS_ERR(mxc_isi->clk_disp_axi)) {
+ dev_err(dev, "failed to get disp_axi clk\n");
+ return PTR_ERR(mxc_isi->clk_disp_axi);
+ }
+
+ mxc_isi->clk_disp_apb = devm_clk_get(dev, "disp_apb");
+ if (IS_ERR(mxc_isi->clk_disp_apb)) {
+ dev_err(dev, "failed to get disp_apb clk\n");
+ return PTR_ERR(mxc_isi->clk_disp_apb);
+ }
+
+ mxc_isi->clk_root_disp_axi = devm_clk_get(dev, "disp_axi_root");
+ if (IS_ERR(mxc_isi->clk_root_disp_axi)) {
+ dev_err(dev, "failed to get disp axi root clk\n");
+ return PTR_ERR(mxc_isi->clk_root_disp_axi);
+ }
+
+ mxc_isi->clk_root_disp_apb = devm_clk_get(dev, "disp_apb_root");
+ if (IS_ERR(mxc_isi->clk_root_disp_apb)) {
+ dev_err(dev, "failed to get disp apb root clk\n");
+ return PTR_ERR(mxc_isi->clk_root_disp_apb);
+ }
+
+ return 0;
+}
+
+static int mxc_imx8mn_clk_enable(struct mxc_isi_dev *mxc_isi)
+{
+ struct device *dev = &mxc_isi->pdev->dev;
+ int ret;
+
+ ret = clk_prepare_enable(mxc_isi->clk_disp_axi);
+ if (ret < 0) {
+ dev_err(dev, "prepare and enable axi clk error\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mxc_isi->clk_disp_apb);
+ if (ret < 0) {
+ dev_err(dev, "prepare and enable abp clk error\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mxc_isi->clk_root_disp_axi);
+ if (ret < 0) {
+ dev_err(dev, "prepare and enable axi root clk error\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(mxc_isi->clk_root_disp_apb);
+ if (ret < 0) {
+ dev_err(dev, "prepare and enable apb root clk error\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mxc_imx8mn_clk_disable(struct mxc_isi_dev *mxc_isi)
+{
+ clk_disable_unprepare(mxc_isi->clk_root_disp_axi);
+ clk_disable_unprepare(mxc_isi->clk_root_disp_apb);
+ clk_disable_unprepare(mxc_isi->clk_disp_axi);
+ clk_disable_unprepare(mxc_isi->clk_disp_apb);
+}
+
+static struct mxc_isi_chan_src mxc_imx8mn_chan_src = {
+ .src_mipi0 = 0,
+ .src_mipi1 = 1,
+ /* For i.MX8MP */
+ .src_mem = 2,
+};
+
+static struct mxc_isi_dev_ops mxc_imx8mn_clk_ops = {
+ .clk_get = mxc_imx8mn_clk_get,
+ .clk_enable = mxc_imx8mn_clk_enable,
+ .clk_disable = mxc_imx8mn_clk_disable,
+};
+
+static struct mxc_isi_plat_data mxc_imx8mn_data = {
+ .ops = &mxc_imx8mn_clk_ops,
+ .chan_src = &mxc_imx8mn_chan_src,
+ .ier_reg = &mxc_imx8_isi_ier_v1,
+ .set_thd = &mxc_imx8_isi_thd_v1,
+};
+
+static int mxc_isi_parse_dt(struct mxc_isi_dev *mxc_isi)
+{
+ struct device *dev = &mxc_isi->pdev->dev;
+ struct device_node *node = dev->of_node;
+ int ret = 0;
+
+ mxc_isi->id = of_alias_get_id(node, "isi");
+
+ ret = of_property_read_u32_array(node, "interface", mxc_isi->interface, 3);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(dev, "%s, isi_%d,interface(%d, %d, %d)\n", __func__,
+ mxc_isi->id,
+ mxc_isi->interface[0],
+ mxc_isi->interface[1],
+ mxc_isi->interface[2]);
+ return 0;
+}
+
+static int mxc_isi_clk_get(struct mxc_isi_dev *mxc_isi)
+{
+ const struct mxc_isi_dev_ops *ops = mxc_isi->pdata->ops;
+
+ if (!ops && !ops->clk_get)
+ return -EINVAL;
+
+ return ops->clk_get(mxc_isi);
+}
+
+static int mxc_isi_clk_enable(struct mxc_isi_dev *mxc_isi)
+{
+ const struct mxc_isi_dev_ops *ops = mxc_isi->pdata->ops;
+
+ if (!ops && !ops->clk_enable)
+ return -EINVAL;
+
+ return ops->clk_enable(mxc_isi);
+}
+
+static void mxc_isi_clk_disable(struct mxc_isi_dev *mxc_isi)
+{
+ const struct mxc_isi_dev_ops *ops = mxc_isi->pdata->ops;
+
+ if (!ops && !ops->clk_disable)
+ return;
+
+ ops->clk_disable(mxc_isi);
+}
+
+static int mxc_isi_of_parse_resets(struct mxc_isi_dev *mxc_isi)
+{
+ int ret;
+ struct device *dev = &mxc_isi->pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *parent, *child;
+ struct of_phandle_args args;
+ struct reset_control *rstc;
+ const char *compat;
+ uint32_t len, rstc_num = 0;
+
+ ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
+ 0, &args);
+ if (ret)
+ return ret;
+
+ parent = args.np;
+ for_each_child_of_node(parent, child) {
+ compat = of_get_property(child, "compatible", NULL);
+ if (!compat)
+ continue;
+
+ rstc = of_reset_control_array_get(child, false, false, true);
+ if (IS_ERR(rstc))
+ continue;
+
+ len = strlen(compat);
+ if (!of_compat_cmp("isi,soft-resetn", compat, len)) {
+ mxc_isi->soft_resetn = rstc;
+ rstc_num++;
+ } else if (!of_compat_cmp("isi,clk-enable", compat, len)) {
+ mxc_isi->clk_enable = rstc;
+ rstc_num++;
+ } else {
+ dev_warn(dev, "invalid isi reset node: %s\n", compat);
+ }
+ }
+
+ if (!rstc_num) {
+ dev_err(dev, "no invalid reset control exists\n");
+ return -EINVAL;
+ }
+
+ of_node_put(parent);
+ return 0;
+}
+
+static int mxc_isi_soc_match(struct mxc_isi_dev *mxc_isi,
+ const struct soc_device_attribute *data)
+{
+ struct mxc_isi_ier_reg *ier_reg = mxc_isi->pdata->ier_reg;
+ struct mxc_isi_set_thd *set_thd = mxc_isi->pdata->set_thd;
+ const struct soc_device_attribute *match;
+
+ match = soc_device_match(data);
+ if (!match)
+ return -EPROBE_DEFER;
+
+ mxc_isi->buf_active_reverse = false;
+
+ if (!strcmp(match->soc_id, "i.MX8QXP") ||
+ !strcmp(match->soc_id, "i.MX8QM")) {
+ /* Chip C0 */
+ if (strcmp(match->revision, "1.1") > 0) {
+ memcpy(ier_reg, &mxc_imx8_isi_ier_v1, sizeof(*ier_reg));
+ memcpy(set_thd, &mxc_imx8_isi_thd_v1, sizeof(*set_thd));
+ mxc_isi->buf_active_reverse = true;
+ }
+ } else if (!strcmp(match->soc_id, "i.MX8MP")) {
+ memcpy(ier_reg, &mxc_imx8_isi_ier_v2, sizeof(*ier_reg));
+ mxc_isi->buf_active_reverse = true;
+ }
+
+ return 0;
+}
+
+static int mxc_isi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct mxc_isi_dev *mxc_isi;
+ struct resource *res;
+ const struct of_device_id *of_id;
+ int ret = 0;
+
+
+ mxc_isi = devm_kzalloc(dev, sizeof(*mxc_isi), GFP_KERNEL);
+ if (!mxc_isi)
+ return -ENOMEM;
+
+ mxc_isi->pdev = pdev;
+ of_id = of_match_node(mxc_isi_of_match, dev->of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ mxc_isi->pdata = of_id->data;
+ if (!mxc_isi->pdata) {
+ dev_err(dev, "Can't get platform device data\n");
+ return -EINVAL;
+ }
+
+ ret = mxc_isi_soc_match(mxc_isi, imx8_soc);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Can't match soc version\n");
+ return ret;
+ }
+
+ ret = mxc_isi_parse_dt(mxc_isi);
+ if (ret < 0)
+ return ret;
+
+ if (mxc_isi->id >= MXC_ISI_MAX_DEVS || mxc_isi->id < 0) {
+ dev_err(dev, "Invalid driver data or device id (%d)\n",
+ mxc_isi->id);
+ return -EINVAL;
+ }
+
+ mxc_isi->chain = syscon_regmap_lookup_by_phandle(dev->of_node, "isi_chain");
+ if (IS_ERR(mxc_isi->chain))
+ mxc_isi->chain = NULL;
+
+ spin_lock_init(&mxc_isi->slock);
+ mutex_init(&mxc_isi->lock);
+ atomic_set(&mxc_isi->usage_count, 0);
+
+ if (!of_property_read_bool(dev->of_node, "no-reset-control")) {
+ ret = mxc_isi_of_parse_resets(mxc_isi);
+ if (ret) {
+ dev_warn(dev, "Can not parse reset control\n");
+ return ret;
+ }
+ }
+
+ ret = mxc_isi_clk_get(mxc_isi);
+ if (ret < 0) {
+ dev_err(dev, "ISI_%d get clocks fail\n", mxc_isi->id);
+ return ret;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mxc_isi->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(mxc_isi->regs)) {
+ dev_err(dev, "Failed to get ISI register map\n");
+ return PTR_ERR(mxc_isi->regs);
+ }
+
+ ret = mxc_isi_clk_enable(mxc_isi);
+ if (ret < 0) {
+ dev_err(dev, "ISI_%d enable clocks fail\n", mxc_isi->id);
+ return ret;
+ }
+ disp_mix_sft_rstn(mxc_isi->soft_resetn, false);
+ disp_mix_clks_enable(mxc_isi->clk_enable, true);
+
+ mxc_isi_clean_registers(mxc_isi);
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!res) {
+ dev_err(dev, "Failed to get IRQ resource\n");
+ goto err;
+ }
+ ret = devm_request_irq(dev, res->start, mxc_isi_irq_handler,
+ 0, dev_name(dev), mxc_isi);
+ if (ret < 0) {
+ dev_err(dev, "failed to install irq (%d)\n", ret);
+ goto err;
+ }
+
+ mxc_isi_channel_set_chain_buf(mxc_isi);
+
+ ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
+ if (ret < 0)
+ dev_warn(dev, "Populate child platform device fail\n");
+
+ mxc_isi_clk_disable(mxc_isi);
+
+ platform_set_drvdata(pdev, mxc_isi);
+ pm_runtime_enable(dev);
+
+ dev_info(dev, "mxc_isi.%d registered successfully\n", mxc_isi->id);
+ return 0;
+
+err:
+ disp_mix_clks_enable(mxc_isi->clk_enable, false);
+ disp_mix_sft_rstn(mxc_isi->soft_resetn, true);
+ mxc_isi_clk_disable(mxc_isi);
+ return -ENXIO;
+}
+
+static int mxc_isi_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+
+ of_platform_depopulate(dev);
+ pm_runtime_disable(dev);
+
+ return 0;
+}
+
+static int mxc_isi_pm_suspend(struct device *dev)
+{
+ struct mxc_isi_dev *mxc_isi = dev_get_drvdata(dev);
+
+ if (mxc_isi->is_streaming) {
+ dev_warn(dev, "running, prevent entering suspend.\n");
+ return -EAGAIN;
+ }
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int mxc_isi_pm_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+
+static int mxc_isi_runtime_suspend(struct device *dev)
+{
+ struct mxc_isi_dev *mxc_isi = dev_get_drvdata(dev);
+
+ disp_mix_clks_enable(mxc_isi->clk_enable, false);
+ mxc_isi_clk_disable(mxc_isi);
+
+ return 0;
+}
+
+static int mxc_isi_runtime_resume(struct device *dev)
+{
+ struct mxc_isi_dev *mxc_isi = dev_get_drvdata(dev);
+ int ret;
+
+ ret = mxc_isi_clk_enable(mxc_isi);
+ if (ret) {
+ dev_err(dev, "%s clk enable fail\n", __func__);
+ return ret;
+ }
+ disp_mix_sft_rstn(mxc_isi->soft_resetn, false);
+ disp_mix_clks_enable(mxc_isi->clk_enable, true);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mxc_isi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mxc_isi_pm_suspend, mxc_isi_pm_resume)
+ SET_RUNTIME_PM_OPS(mxc_isi_runtime_suspend, mxc_isi_runtime_resume, NULL)
+};
+
+static const struct of_device_id mxc_isi_of_match[] = {
+ {.compatible = "fsl,imx8-isi", .data = &mxc_imx8_data },
+ {.compatible = "fsl,imx8mn-isi", .data = &mxc_imx8mn_data },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mxc_isi_of_match);
+
+static struct platform_driver mxc_isi_driver = {
+ .probe = mxc_isi_probe,
+ .remove = mxc_isi_remove,
+ .driver = {
+ .of_match_table = mxc_isi_of_match,
+ .name = MXC_ISI_DRIVER_NAME,
+ .pm = &mxc_isi_pm_ops,
+ }
+};
+module_platform_driver(mxc_isi_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("IMX8 Image Subsystem driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ISI");
+MODULE_VERSION("1.0");
diff --git a/drivers/staging/media/imx/imx8-isi-core.h b/drivers/staging/media/imx/imx8-isi-core.h
new file mode 100644
index 000000000000..c955ec21c8d8
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-isi-core.h
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019-2020 NXP
+ */
+
+#ifndef __MXC_ISI_CORE_H__
+#define __MXC_ISI_CORE_H__
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/mfd/syscon.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <media/media-device.h>
+#include <media/media-entity.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-ctrls.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/sys_soc.h>
+
+#include "imx8-common.h"
+
+#define MXC_ISI_DRIVER_NAME "mxc-isi"
+#define MXC_ISI_CAPTURE "mxc-isi-cap"
+#define MXC_ISI_M2M "mxc-isi-m2m"
+#define MXC_MAX_PLANES 3
+
+struct mxc_isi_dev;
+
+enum mxc_isi_out_fmt {
+ MXC_ISI_OUT_FMT_RGBA32 = 0x0,
+ MXC_ISI_OUT_FMT_ABGR32,
+ MXC_ISI_OUT_FMT_ARGB32,
+ MXC_ISI_OUT_FMT_RGBX32,
+ MXC_ISI_OUT_FMT_XBGR32,
+ MXC_ISI_OUT_FMT_XRGB32,
+ MXC_ISI_OUT_FMT_RGB32P,
+ MXC_ISI_OUT_FMT_BGR32P,
+ MXC_ISI_OUT_FMT_A2BGR10,
+ MXC_ISI_OUT_FMT_A2RGB10,
+ MXC_ISI_OUT_FMT_RGB565,
+ MXC_ISI_OUT_FMT_RAW8,
+ MXC_ISI_OUT_FMT_RAW10,
+ MXC_ISI_OUT_FMT_RAW10P,
+ MXC_ISI_OUT_FMT_RAW12,
+ MXC_ISI_OUT_FMT_RAW16,
+ MXC_ISI_OUT_FMT_YUV444_1P8P,
+ MXC_ISI_OUT_FMT_YUV444_2P8P,
+ MXC_ISI_OUT_FMT_YUV444_3P8P,
+ MXC_ISI_OUT_FMT_YUV444_1P8,
+ MXC_ISI_OUT_FMT_YUV444_1P10,
+ MXC_ISI_OUT_FMT_YUV444_2P10,
+ MXC_ISI_OUT_FMT_YUV444_3P10,
+ MXC_ISI_OUT_FMT_YUV444_1P10P = 0x18,
+ MXC_ISI_OUT_FMT_YUV444_2P10P,
+ MXC_ISI_OUT_FMT_YUV444_3P10P,
+ MXC_ISI_OUT_FMT_YUV444_1P12 = 0x1C,
+ MXC_ISI_OUT_FMT_YUV444_2P12,
+ MXC_ISI_OUT_FMT_YUV444_3P12,
+ MXC_ISI_OUT_FMT_YUV422_1P8P = 0x20,
+ MXC_ISI_OUT_FMT_YUV422_2P8P,
+ MXC_ISI_OUT_FMT_YUV422_3P8P,
+ MXC_ISI_OUT_FMT_YUV422_1P10 = 0x24,
+ MXC_ISI_OUT_FMT_YUV422_2P10,
+ MXC_ISI_OUT_FMT_YUV422_3P10,
+ MXC_ISI_OUT_FMT_YUV422_1P10P = 0x28,
+ MXC_ISI_OUT_FMT_YUV422_2P10P,
+ MXC_ISI_OUT_FMT_YUV422_3P10P,
+ MXC_ISI_OUT_FMT_YUV422_1P12 = 0x2C,
+ MXC_ISI_OUT_FMT_YUV422_2P12,
+ MXC_ISI_OUT_FMT_YUV422_3P12,
+ MXC_ISI_OUT_FMT_YUV420_2P8P = 0x31,
+ MXC_ISI_OUT_FMT_YUV420_3P8P,
+ MXC_ISI_OUT_FMT_YUV420_2P10 = 0x35,
+ MXC_ISI_OUT_FMT_YUV420_3P10,
+ MXC_ISI_OUT_FMT_YUV420_2P10P = 0x39,
+ MXC_ISI_OUT_FMT_YUV420_3P10P,
+ MXC_ISI_OUT_FMT_YUV420_2P12 = 0x3D,
+ MXC_ISI_OUT_FMT_YUV420_3P12,
+};
+
+enum mxc_isi_in_fmt {
+ MXC_ISI_IN_FMT_BGR8P = 0x0,
+};
+
+enum mxc_isi_m2m_in_fmt {
+ MXC_ISI_M2M_IN_FMT_BGR8P = 0x0,
+ MXC_ISI_M2M_IN_FMT_RGB8P,
+ MXC_ISI_M2M_IN_FMT_XRGB8,
+ MXC_ISI_M2M_IN_FMT_RGBX8,
+ MXC_ISI_M2M_IN_FMT_XBGR8,
+ MXC_ISI_M2M_IN_FMT_RGB565,
+ MXC_ISI_M2M_IN_FMT_A2BGR10,
+ MXC_ISI_M2M_IN_FMT_A2RGB10,
+ MXC_ISI_M2M_IN_FMT_YUV444_1P8P,
+ MXC_ISI_M2M_IN_FMT_YUV444_1P10,
+ MXC_ISI_M2M_IN_FMT_YUV444_1P10P,
+ MXC_ISI_M2M_IN_FMT_YUV444_1P12,
+ MXC_ISI_M2M_IN_FMT_YUV444_1P8,
+ MXC_ISI_M2M_IN_FMT_YUV422_1P8P,
+ MXC_ISI_M2M_IN_FMT_YUV422_1P10,
+ MXC_ISI_M2M_IN_FMT_YUV422_1P10P,
+};
+
+struct mxc_isi_fmt {
+ char *name;
+ u32 mbus_code;
+ u32 fourcc;
+ u32 color;
+ u16 memplanes;
+ u16 colplanes;
+ u8 colorspace;
+ u8 depth[MXC_MAX_PLANES];
+ u16 mdataplanes;
+ u16 flags;
+};
+
+struct mxc_isi_ctrls {
+ struct v4l2_ctrl_handler handler;
+ struct v4l2_ctrl *hflip;
+ struct v4l2_ctrl *vflip;
+ struct v4l2_ctrl *alpha;
+ struct v4l2_ctrl *num_cap_buf;
+ struct v4l2_ctrl *num_out_buf;
+ bool ready;
+};
+
+/**
+ * struct addr - physical address set for DMA
+ * @y: luminance plane physical address
+ * @cb: Cb plane physical address
+ * @cr: Cr plane physical address
+ */
+struct frame_addr {
+ u32 y;
+ u32 cb;
+ u32 cr;
+};
+
+/**
+ * struct mxc_isi_frame - source/target frame properties
+ * o_width: original image width from sensor
+ * o_height: original image height from sensor
+ * c_width: crop image width set by g_selection
+ * c_height: crop image height set by g_selection
+ * h_off: crop horizontal pixel offset
+ * v_off: crop vertical pixel offset
+ * width: out image pixel width
+ * height: out image pixel weight
+ * bytesperline: bytesperline value for each plane
+ * paddr: image frame buffer physical addresses
+ * fmt: color format pointer
+ */
+struct mxc_isi_frame {
+ u32 o_width;
+ u32 o_height;
+ u32 c_width;
+ u32 c_height;
+ u32 h_off;
+ u32 v_off;
+ u32 width;
+ u32 height;
+ unsigned int sizeimage[MXC_MAX_PLANES];
+ unsigned int bytesperline[MXC_MAX_PLANES];
+ struct mxc_isi_fmt *fmt;
+};
+
+struct mxc_isi_roi_alpha {
+ u8 alpha;
+ struct v4l2_rect rect;
+};
+
+struct mxc_isi_buffer {
+ struct vb2_v4l2_buffer v4l2_buf;
+ struct list_head list;
+ struct frame_addr paddr;
+ enum mxc_isi_buf_id id;
+ bool discard;
+};
+
+struct mxc_isi_m2m_dev {
+ struct platform_device *pdev;
+
+ struct video_device vdev;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_m2m_dev *m2m_dev;
+ struct v4l2_fh fh;
+ struct v4l2_pix_format_mplane pix;
+
+ struct list_head out_active;
+ struct mxc_isi_ctrls ctrls;
+
+ struct mxc_isi_frame src_f;
+ struct mxc_isi_frame dst_f;
+
+ struct mutex lock;
+ spinlock_t slock;
+
+ unsigned int aborting;
+ unsigned int frame_count;
+
+ u32 req_cap_buf_num;
+ u32 req_out_buf_num;
+
+ u8 id;
+};
+
+struct mxc_isi_ctx {
+ struct mxc_isi_m2m_dev *isi_m2m;
+ struct v4l2_fh fh;
+};
+
+struct mxc_isi_chan_src {
+ u32 src_dc0;
+ u32 src_dc1;
+ u32 src_mipi0;
+ u32 src_mipi1;
+ u32 src_hdmi;
+ u32 src_csi;
+ u32 src_mem;
+};
+
+struct mxc_isi_reg {
+ u32 offset;
+ u32 mask;
+};
+
+struct mxc_isi_ier_reg {
+ /* Overflow Y/U/V triggier enable*/
+ struct mxc_isi_reg oflw_y_buf_en;
+ struct mxc_isi_reg oflw_u_buf_en;
+ struct mxc_isi_reg oflw_v_buf_en;
+
+ /* Excess overflow Y/U/V triggier enable*/
+ struct mxc_isi_reg excs_oflw_y_buf_en;
+ struct mxc_isi_reg excs_oflw_u_buf_en;
+ struct mxc_isi_reg excs_oflw_v_buf_en;
+
+ /* Panic Y/U/V triggier enable*/
+ struct mxc_isi_reg panic_y_buf_en;
+ struct mxc_isi_reg panic_v_buf_en;
+ struct mxc_isi_reg panic_u_buf_en;
+};
+
+struct mxc_isi_dev_ops {
+ int (*clk_get)(struct mxc_isi_dev *mxc_isi);
+ int (*clk_enable)(struct mxc_isi_dev *mxc_isi);
+ void (*clk_disable)(struct mxc_isi_dev *mxc_isi);
+};
+
+struct mxc_isi_panic_thd {
+ u32 mask;
+ u32 offset;
+ u32 threshold;
+};
+
+struct mxc_isi_set_thd {
+ struct mxc_isi_panic_thd panic_set_thd_y;
+ struct mxc_isi_panic_thd panic_set_thd_u;
+ struct mxc_isi_panic_thd panic_set_thd_v;
+};
+
+struct mxc_isi_plat_data {
+ struct mxc_isi_dev_ops *ops;
+ struct mxc_isi_chan_src *chan_src;
+ struct mxc_isi_ier_reg *ier_reg;
+ struct mxc_isi_set_thd *set_thd;
+};
+
+struct mxc_isi_cap_dev {
+ struct v4l2_subdev sd;
+ struct video_device vdev;
+ struct v4l2_fh fh;
+ struct vb2_queue vb2_q;
+ struct v4l2_pix_format_mplane pix;
+
+ struct mxc_isi_dev *mxc_isi;
+ struct platform_device *pdev;
+ struct mxc_isi_ctrls ctrls;
+ struct mxc_isi_buffer buf_discard[2];
+
+ struct media_pad cap_pad;
+ struct media_pad sd_pads[MXC_ISI_SD_PADS_NUM];
+
+ struct list_head out_pending;
+ struct list_head out_active;
+ struct list_head out_discard;
+
+ struct mxc_isi_frame src_f;
+ struct mxc_isi_frame dst_f;
+
+ u32 frame_count;
+ u32 id;
+ bool is_link_setup;
+
+ struct mutex lock;
+ spinlock_t slock;
+
+ /* dirty buffer */
+ size_t discard_size[MXC_MAX_PLANES];
+ void *discard_buffer[MXC_MAX_PLANES];
+ dma_addr_t discard_buffer_dma[MXC_MAX_PLANES];
+};
+
+struct mxc_isi_dev {
+ /* Pointer to isi capture child device driver data */
+ struct mxc_isi_cap_dev *isi_cap;
+
+ /* Pointer to isi m2m child device driver data */
+ struct mxc_isi_m2m_dev *isi_m2m;
+
+ struct platform_device *pdev;
+
+ /* clk for imx8qxp/qm platform */
+ struct clk *clk;
+
+ /* clks for imx8mn platform */
+ struct clk *clk_disp_axi;
+ struct clk *clk_disp_apb;
+ struct clk *clk_root_disp_axi;
+ struct clk *clk_root_disp_apb;
+
+ const struct mxc_isi_plat_data *pdata;
+
+ struct reset_control *soft_resetn;
+ struct reset_control *clk_enable;
+
+ struct regmap *chain;
+
+ struct mutex lock;
+ spinlock_t slock;
+
+ void __iomem *regs;
+
+ u8 chain_buf;
+ u8 alpha;
+ bool m2m_enabled;
+ bool buf_active_reverse;
+
+ /* manage share ISI channel resource */
+ atomic_t usage_count;
+
+ /* scale factor */
+ u32 xfactor;
+ u32 yfactor;
+ u32 pre_dec_x;
+ u32 pre_dec_y;
+
+ u32 status;
+
+ u32 interface[MAX_PORTS];
+ int id;
+
+ unsigned int hflip:1;
+ unsigned int vflip:1;
+ unsigned int cscen:1;
+ unsigned int scale:1;
+ unsigned int alphaen:1;
+ unsigned int crop:1;
+ unsigned int deinterlace:1;
+ unsigned int is_streaming:1;
+};
+
+static inline void set_frame_bounds(struct mxc_isi_frame *f,
+ u32 width, u32 height)
+{
+ f->o_width = width;
+ f->o_height = height;
+ f->c_width = width;
+ f->c_height = height;
+ f->width = width;
+ f->height = height;
+}
+
+static inline void set_frame_out(struct mxc_isi_frame *f,
+ u32 width, u32 height)
+{
+ f->c_width = width;
+ f->c_height = height;
+ f->width = width;
+ f->height = height;
+}
+
+static inline void set_frame_crop(struct mxc_isi_frame *f,
+ u32 left, u32 top, u32 width, u32 height)
+{
+ f->h_off = left;
+ f->v_off = top;
+ f->c_width = width;
+ f->c_height = height;
+}
+#endif /* __MXC_ISI_CORE_H__ */
diff --git a/drivers/staging/media/imx/imx8-isi-hw.c b/drivers/staging/media/imx/imx8-isi-hw.c
new file mode 100644
index 000000000000..f8e804483a48
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-isi-hw.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019-2020 NXP
+ *
+ */
+#include <dt-bindings/pinctrl/pads-imx8qxp.h>
+
+#include <linux/module.h>
+#include "imx8-isi-hw.h"
+#include "imx8-common.h"
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("IMX8 Image Sensor Interface Hardware driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+
+#define ISI_DOWNSCALE_THRESHOLD 0x4000
+
+#ifdef DEBUG
+void dump_isi_regs(struct mxc_isi_dev *mxc_isi)
+{
+ struct device *dev = &mxc_isi->pdev->dev;
+ struct {
+ u32 offset;
+ const char *const name;
+ } registers[] = {
+ { 0x00, "CHNL_CTRL" },
+ { 0x04, "CHNL_IMG_CTRL" },
+ { 0x08, "CHNL_OUT_BUF_CTRL" },
+ { 0x0C, "CHNL_IMG_CFG" },
+ { 0x10, "CHNL_IER" },
+ { 0x14, "CHNL_STS" },
+ { 0x18, "CHNL_SCALE_FACTOR" },
+ { 0x1C, "CHNL_SCALE_OFFSET" },
+ { 0x20, "CHNL_CROP_ULC" },
+ { 0x24, "CHNL_CROP_LRC" },
+ { 0x28, "CHNL_CSC_COEFF0" },
+ { 0x2C, "CHNL_CSC_COEFF1" },
+ { 0x30, "CHNL_CSC_COEFF2" },
+ { 0x34, "CHNL_CSC_COEFF3" },
+ { 0x38, "CHNL_CSC_COEFF4" },
+ { 0x3C, "CHNL_CSC_COEFF5" },
+ { 0x40, "CHNL_ROI_0_ALPHA" },
+ { 0x44, "CHNL_ROI_0_ULC" },
+ { 0x48, "CHNL_ROI_0_LRC" },
+ { 0x4C, "CHNL_ROI_1_ALPHA" },
+ { 0x50, "CHNL_ROI_1_ULC" },
+ { 0x54, "CHNL_ROI_1_LRC" },
+ { 0x58, "CHNL_ROI_2_ALPHA" },
+ { 0x5C, "CHNL_ROI_2_ULC" },
+ { 0x60, "CHNL_ROI_2_LRC" },
+ { 0x64, "CHNL_ROI_3_ALPHA" },
+ { 0x68, "CHNL_ROI_3_ULC" },
+ { 0x6C, "CHNL_ROI_3_LRC" },
+ { 0x70, "CHNL_OUT_BUF1_ADDR_Y" },
+ { 0x74, "CHNL_OUT_BUF1_ADDR_U" },
+ { 0x78, "CHNL_OUT_BUF1_ADDR_V" },
+ { 0x7C, "CHNL_OUT_BUF_PITCH" },
+ { 0x80, "CHNL_IN_BUF_ADDR" },
+ { 0x84, "CHNL_IN_BUF_PITCH" },
+ { 0x88, "CHNL_MEM_RD_CTRL" },
+ { 0x8C, "CHNL_OUT_BUF2_ADDR_Y" },
+ { 0x90, "CHNL_OUT_BUF2_ADDR_U" },
+ { 0x94, "CHNL_OUT_BUF2_ADDR_V" },
+ { 0x98, "CHNL_SCL_IMG_CFG" },
+ { 0x9C, "CHNL_FLOW_CTRL" },
+ };
+ u32 i;
+
+ dev_dbg(dev, "ISI CHNLC register dump, isi%d\n", mxc_isi->id);
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 reg = readl(mxc_isi->regs + registers[i].offset);
+ dev_dbg(dev, "%20s[0x%.2x]: %.2x\n",
+ registers[i].name, registers[i].offset, reg);
+ }
+}
+#else
+void dump_isi_regs(struct mxc_isi_dev *mxc_isi)
+{
+}
+#endif
+
+/*
+ * A2,A1, B1, A3, B3, B2,
+ * C2, C1, D1, C3, D3, D2
+ */
+static const u32 coeffs[2][6] = {
+ /* YUV2RGB */
+ { 0x0000012A, 0x012A0198, 0x0730079C,
+ 0x0204012A, 0x01F00000, 0x01800180 },
+
+ /* RGB->YUV */
+ { 0x00810041, 0x07db0019, 0x007007b6,
+ 0x07a20070, 0x001007ee, 0x00800080 },
+};
+
+static void printk_pixelformat(char *prefix, int val)
+{
+ pr_info("%s %c%c%c%c\n", prefix ? prefix : "pixelformat",
+ val & 0xff,
+ (val >> 8) & 0xff,
+ (val >> 16) & 0xff,
+ (val >> 24) & 0xff);
+}
+
+static bool is_rgb(u32 pix_fmt)
+{
+ if ((pix_fmt == V4L2_PIX_FMT_RGB565) ||
+ (pix_fmt == V4L2_PIX_FMT_RGB24) ||
+ (pix_fmt == V4L2_PIX_FMT_RGB32) ||
+ (pix_fmt == V4L2_PIX_FMT_BGR32) ||
+ (pix_fmt == V4L2_PIX_FMT_XRGB32) ||
+ (pix_fmt == V4L2_PIX_FMT_XBGR32) ||
+ (pix_fmt == V4L2_PIX_FMT_BGR24) ||
+ (pix_fmt == V4L2_PIX_FMT_RGBA32) ||
+ (pix_fmt == V4L2_PIX_FMT_ABGR32) ||
+ (pix_fmt == V4L2_PIX_FMT_ARGB32))
+ return true;
+ else
+ return false;
+}
+
+static bool is_yuv(u32 pix_fmt)
+{
+ if ((pix_fmt == V4L2_PIX_FMT_YUYV) ||
+ (pix_fmt == V4L2_PIX_FMT_YUV32) ||
+ (pix_fmt == V4L2_PIX_FMT_YUV444M) ||
+ (pix_fmt == V4L2_PIX_FMT_YUV24) ||
+ (pix_fmt == V4L2_PIX_FMT_NV12))
+ return true;
+ else
+ return false;
+}
+
+bool is_buf_active(struct mxc_isi_dev *mxc_isi, int buf_id)
+{
+ u32 status = mxc_isi->status;
+ bool reverse = mxc_isi->buf_active_reverse;
+
+ return (buf_id == 1) ? ((reverse) ? (status & 0x100) : (status & 0x200)) :
+ ((reverse) ? (status & 0x200) : (status & 0x100));
+}
+EXPORT_SYMBOL_GPL(is_buf_active);
+
+static void chain_buf(struct mxc_isi_dev *mxc_isi, struct mxc_isi_frame *frm)
+{
+ u32 val;
+
+ if (frm->o_width > ISI_2K) {
+ val = readl(mxc_isi->regs + CHNL_CTRL);
+ val &= ~CHNL_CTRL_CHAIN_BUF_MASK;
+ val |= (CHNL_CTRL_CHAIN_BUF_2_CHAIN << CHNL_CTRL_CHAIN_BUF_OFFSET);
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+ if (mxc_isi->chain)
+ regmap_write(mxc_isi->chain, CHNL_CTRL, CHNL_CTRL_CLK_EN_MASK);
+ mxc_isi->chain_buf = 1;
+ } else {
+ val = readl(mxc_isi->regs + CHNL_CTRL);
+ val &= ~CHNL_CTRL_CHAIN_BUF_MASK;
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+ mxc_isi->chain_buf = 0;
+ }
+}
+
+struct device *mxc_isi_dev_get_parent(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *parent;
+ struct platform_device *parent_pdev;
+
+ if (!pdev)
+ return NULL;
+
+ /* Get parent for isi capture device */
+ parent = of_get_parent(dev->of_node);
+ parent_pdev = of_find_device_by_node(parent);
+ if (!parent_pdev) {
+ of_node_put(parent);
+ return NULL;
+ }
+ of_node_put(parent);
+
+ return &parent_pdev->dev;
+}
+EXPORT_SYMBOL_GPL(mxc_isi_dev_get_parent);
+
+struct mxc_isi_dev *mxc_isi_get_hostdata(struct platform_device *pdev)
+{
+ struct mxc_isi_dev *mxc_isi;
+
+ if (!pdev || !pdev->dev.parent)
+ return NULL;
+
+ mxc_isi = (struct mxc_isi_dev *)dev_get_drvdata(pdev->dev.parent);
+ if (!mxc_isi) {
+ dev_err(&pdev->dev, "Cann't get host data\n");
+ return NULL;
+ }
+
+ return mxc_isi;
+}
+EXPORT_SYMBOL_GPL(mxc_isi_get_hostdata);
+
+void mxc_isi_channel_set_outbuf(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_buffer *buf)
+{
+ struct vb2_buffer *vb2_buf = &buf->v4l2_buf.vb2_buf;
+ u32 framecount = buf->v4l2_buf.sequence;
+ struct frame_addr *paddr = &buf->paddr;
+ struct mxc_isi_cap_dev *isi_cap;
+ struct v4l2_pix_format_mplane *pix;
+ int val = 0;
+
+ if (buf->discard) {
+ isi_cap = mxc_isi->isi_cap;
+ pix = &isi_cap->pix;
+ paddr->y = isi_cap->discard_buffer_dma[0];
+ if (pix->num_planes == 2)
+ paddr->cb = isi_cap->discard_buffer_dma[1];
+ if (pix->num_planes == 3) {
+ paddr->cb = isi_cap->discard_buffer_dma[1];
+ paddr->cr = isi_cap->discard_buffer_dma[2];
+ }
+ } else {
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb2_buf, 0);
+
+ if (vb2_buf->num_planes == 2)
+ paddr->cb = vb2_dma_contig_plane_dma_addr(vb2_buf, 1);
+ if (vb2_buf->num_planes == 3) {
+ paddr->cb = vb2_dma_contig_plane_dma_addr(vb2_buf, 1);
+ paddr->cr = vb2_dma_contig_plane_dma_addr(vb2_buf, 2);
+ }
+ }
+
+ val = readl(mxc_isi->regs + CHNL_OUT_BUF_CTRL);
+
+ if (framecount == 0 || ((is_buf_active(mxc_isi, 2)) && (framecount != 1))) {
+ writel(paddr->y, mxc_isi->regs + CHNL_OUT_BUF1_ADDR_Y);
+ writel(paddr->cb, mxc_isi->regs + CHNL_OUT_BUF1_ADDR_U);
+ writel(paddr->cr, mxc_isi->regs + CHNL_OUT_BUF1_ADDR_V);
+ val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF1_ADDR_MASK;
+ buf->id = MXC_ISI_BUF1;
+ } else if (framecount == 1 || is_buf_active(mxc_isi, 1)) {
+ writel(paddr->y, mxc_isi->regs + CHNL_OUT_BUF2_ADDR_Y);
+ writel(paddr->cb, mxc_isi->regs + CHNL_OUT_BUF2_ADDR_U);
+ writel(paddr->cr, mxc_isi->regs + CHNL_OUT_BUF2_ADDR_V);
+ val ^= CHNL_OUT_BUF_CTRL_LOAD_BUF2_ADDR_MASK;
+ buf->id = MXC_ISI_BUF2;
+ }
+ writel(val, mxc_isi->regs + CHNL_OUT_BUF_CTRL);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_set_outbuf);
+
+void mxc_isi_channel_set_m2m_src_addr(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_buffer *buf)
+{
+ struct vb2_buffer *vb2_buf = &buf->v4l2_buf.vb2_buf;
+ struct frame_addr *paddr = &buf->paddr;
+
+ /* Only support one plane */
+ paddr->y = vb2_dma_contig_plane_dma_addr(vb2_buf, 0);
+ writel(paddr->y, mxc_isi->regs + CHNL_IN_BUF_ADDR);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_set_m2m_src_addr);
+
+void mxc_isi_channel_sw_reset(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val;
+
+ val = readl(mxc_isi->regs + CHNL_CTRL);
+ val |= CHNL_CTRL_SW_RST;
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+ mdelay(5);
+ val &= ~CHNL_CTRL_SW_RST;
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_sw_reset);
+
+void mxc_isi_channel_source_config(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val;
+
+ val = readl(mxc_isi->regs + CHNL_CTRL);
+ val &= ~(CHNL_CTRL_MIPI_VC_ID_MASK |
+ CHNL_CTRL_SRC_INPUT_MASK | CHNL_CTRL_SRC_TYPE_MASK);
+
+ switch (mxc_isi->interface[IN_PORT]) {
+ case ISI_INPUT_INTERFACE_MIPI0_CSI2:
+ val |= mxc_isi->pdata->chan_src->src_mipi0;
+ if (mxc_isi->interface[SUB_IN_PORT] <= CHNL_CTRL_MIPI_VC_ID_VC3 &&
+ mxc_isi->interface[SUB_IN_PORT] >= CHNL_CTRL_MIPI_VC_ID_VC0)
+ val |= (mxc_isi->interface[SUB_IN_PORT] << CHNL_CTRL_MIPI_VC_ID_OFFSET);
+ break;
+ case ISI_INPUT_INTERFACE_MIPI1_CSI2:
+ val |= mxc_isi->pdata->chan_src->src_mipi1;
+ if (mxc_isi->interface[SUB_IN_PORT] <= CHNL_CTRL_MIPI_VC_ID_VC3 &&
+ mxc_isi->interface[SUB_IN_PORT] >= CHNL_CTRL_MIPI_VC_ID_VC0)
+ val |= (mxc_isi->interface[SUB_IN_PORT] << CHNL_CTRL_MIPI_VC_ID_OFFSET);
+ break;
+ case ISI_INPUT_INTERFACE_DC0:
+ val |= mxc_isi->pdata->chan_src->src_dc0;
+ break;
+ case ISI_INPUT_INTERFACE_DC1:
+ val |= mxc_isi->pdata->chan_src->src_dc1;
+ break;
+ case ISI_INPUT_INTERFACE_HDMI:
+ val |= mxc_isi->pdata->chan_src->src_hdmi;
+ break;
+ case ISI_INPUT_INTERFACE_PARALLEL_CSI:
+ val |= mxc_isi->pdata->chan_src->src_csi;
+ break;
+ case ISI_INPUT_INTERFACE_MEM:
+ val |= mxc_isi->pdata->chan_src->src_mem;
+ val |= (CHNL_CTRL_SRC_TYPE_MEMORY << CHNL_CTRL_SRC_TYPE_OFFSET);
+ break;
+ default:
+ dev_err(&mxc_isi->pdev->dev, "invalid interface\n");
+ break;
+ }
+
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_source_config);
+
+void mxc_isi_channel_set_flip(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val;
+
+ val = readl(mxc_isi->regs + CHNL_IMG_CTRL);
+ val &= ~(CHNL_IMG_CTRL_VFLIP_EN_MASK | CHNL_IMG_CTRL_HFLIP_EN_MASK);
+
+ if (mxc_isi->vflip)
+ val |= (CHNL_IMG_CTRL_VFLIP_EN_ENABLE << CHNL_IMG_CTRL_VFLIP_EN_OFFSET);
+ if (mxc_isi->hflip)
+ val |= (CHNL_IMG_CTRL_HFLIP_EN_ENABLE << CHNL_IMG_CTRL_HFLIP_EN_OFFSET);
+
+ writel(val, mxc_isi->regs + CHNL_IMG_CTRL);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_set_chain_buf);
+
+void mxc_isi_channel_set_csc(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *src_f,
+ struct mxc_isi_frame *dst_f)
+{
+ struct mxc_isi_fmt *src_fmt = src_f->fmt;
+ struct mxc_isi_fmt *dst_fmt = dst_f->fmt;
+ u32 val, csc = 0;
+
+ val = readl(mxc_isi->regs + CHNL_IMG_CTRL);
+ val &= ~(CHNL_IMG_CTRL_FORMAT_MASK |
+ CHNL_IMG_CTRL_YCBCR_MODE_MASK |
+ CHNL_IMG_CTRL_CSC_BYPASS_MASK |
+ CHNL_IMG_CTRL_CSC_MODE_MASK);
+
+ /* set outbuf format */
+ val |= dst_fmt->color << CHNL_IMG_CTRL_FORMAT_OFFSET;
+
+ mxc_isi->cscen = 1;
+
+ if (is_yuv(src_fmt->fourcc) && is_rgb(dst_fmt->fourcc)) {
+ /* YUV2RGB */
+ csc = YUV2RGB;
+ /* YCbCr enable??? */
+ val |= (CHNL_IMG_CTRL_CSC_MODE_YCBCR2RGB << CHNL_IMG_CTRL_CSC_MODE_OFFSET);
+ val |= (CHNL_IMG_CTRL_YCBCR_MODE_ENABLE << CHNL_IMG_CTRL_YCBCR_MODE_OFFSET);
+ } else if (is_rgb(src_fmt->fourcc) && is_yuv(dst_fmt->fourcc)) {
+ /* RGB2YUV */
+ csc = RGB2YUV;
+ val |= (CHNL_IMG_CTRL_CSC_MODE_RGB2YCBCR << CHNL_IMG_CTRL_CSC_MODE_OFFSET);
+ } else {
+ /* Bypass CSC */
+ pr_info("bypass csc\n");
+ mxc_isi->cscen = 0;
+ val |= CHNL_IMG_CTRL_CSC_BYPASS_ENABLE;
+ }
+
+ printk_pixelformat("input fmt", src_fmt->fourcc);
+ printk_pixelformat("output fmt", dst_fmt->fourcc);
+
+ if (mxc_isi->cscen) {
+ writel(coeffs[csc][0], mxc_isi->regs + CHNL_CSC_COEFF0);
+ writel(coeffs[csc][1], mxc_isi->regs + CHNL_CSC_COEFF1);
+ writel(coeffs[csc][2], mxc_isi->regs + CHNL_CSC_COEFF2);
+ writel(coeffs[csc][3], mxc_isi->regs + CHNL_CSC_COEFF3);
+ writel(coeffs[csc][4], mxc_isi->regs + CHNL_CSC_COEFF4);
+ writel(coeffs[csc][5], mxc_isi->regs + CHNL_CSC_COEFF5);
+ }
+
+ writel(val, mxc_isi->regs + CHNL_IMG_CTRL);
+}
+
+void mxc_isi_channel_set_alpha_roi0(struct mxc_isi_dev *mxc_isi,
+ struct v4l2_rect *rect)
+{
+ u32 val0, val1;
+
+ val0 = (rect->left << 16) | rect->top;
+ writel(val0, mxc_isi->regs + CHNL_ROI_0_ULC);
+ val1 = (rect->width << 16) | rect->height;
+ writel(val0 + val1, mxc_isi->regs + CHNL_ROI_0_LRC);
+}
+
+void mxc_isi_channel_set_alpha(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val;
+
+ val = readl(mxc_isi->regs + CHNL_IMG_CTRL);
+ val &= ~(CHNL_IMG_CTRL_GBL_ALPHA_VAL_MASK | CHNL_IMG_CTRL_GBL_ALPHA_EN_MASK);
+
+ if (mxc_isi->alphaen)
+ val |= ((mxc_isi->alpha << CHNL_IMG_CTRL_GBL_ALPHA_VAL_OFFSET) |
+ (CHNL_IMG_CTRL_GBL_ALPHA_EN_ENABLE << CHNL_IMG_CTRL_GBL_ALPHA_EN_OFFSET));
+
+ writel(val, mxc_isi->regs + CHNL_IMG_CTRL);
+}
+
+void mxc_isi_channel_set_panic_threshold(struct mxc_isi_dev *mxc_isi)
+{
+ struct mxc_isi_set_thd *set_thd = mxc_isi->pdata->set_thd;
+ u32 val;
+
+ val = readl(mxc_isi->regs + CHNL_OUT_BUF_CTRL);
+
+ val &= ~(set_thd->panic_set_thd_y.mask);
+ val |= set_thd->panic_set_thd_y.threshold << set_thd->panic_set_thd_y.offset;
+
+ val &= ~(set_thd->panic_set_thd_u.mask);
+ val |= set_thd->panic_set_thd_u.threshold << set_thd->panic_set_thd_u.offset;
+
+ val &= ~(set_thd->panic_set_thd_v.mask);
+ val |= set_thd->panic_set_thd_v.threshold << set_thd->panic_set_thd_v.offset;
+
+ writel(val, mxc_isi->regs + CHNL_OUT_BUF_CTRL);
+}
+
+void mxc_isi_channel_set_chain_buf(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val;
+
+ if (mxc_isi->chain_buf) {
+ val = readl(mxc_isi->regs + CHNL_CTRL);
+ val &= ~CHNL_CTRL_CHAIN_BUF_MASK;
+ val |= (CHNL_CTRL_CHAIN_BUF_2_CHAIN << CHNL_CTRL_CHAIN_BUF_OFFSET);
+
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+ }
+}
+
+void mxc_isi_channel_deinterlace_init(struct mxc_isi_dev *mxc_isi)
+{
+ /* Config for Blending deinterlace */
+}
+
+void mxc_isi_channel_set_deinterlace(struct mxc_isi_dev *mxc_isi)
+{
+ /* de-interlacing method
+ * Weaving-------------Yes
+ * Line Doubling-------No
+ * Blending -----------TODO
+ */
+ u32 val;
+
+ val = readl(mxc_isi->regs + CHNL_IMG_CTRL);
+ val &= ~CHNL_IMG_CTRL_DEINT_MASK;
+ if (mxc_isi->deinterlace)
+ val |= mxc_isi->deinterlace << CHNL_IMG_CTRL_DEINT_OFFSET;
+ if (mxc_isi->deinterlace == CHNL_IMG_CTRL_DEINT_LDOUBLE_ODD_EVEN ||
+ mxc_isi->deinterlace == CHNL_IMG_CTRL_DEINT_LDOUBLE_EVEN_ODD)
+ mxc_isi_channel_deinterlace_init(mxc_isi);
+
+ writel(val, mxc_isi->regs + CHNL_IMG_CTRL);
+}
+
+void mxc_isi_channel_set_crop(struct mxc_isi_dev *mxc_isi)
+{
+ struct mxc_isi_frame *src_f = &mxc_isi->isi_cap->src_f;
+ struct v4l2_rect crop;
+ u32 val, val0, val1, temp;
+
+ val = readl(mxc_isi->regs + CHNL_IMG_CTRL);
+ val &= ~CHNL_IMG_CTRL_CROP_EN_MASK;
+
+ if ((src_f->o_height == src_f->height) &&
+ (src_f->o_width == src_f->width)) {
+ mxc_isi->crop = 0;
+ writel(val, mxc_isi->regs + CHNL_IMG_CTRL);
+ return;
+ }
+
+ if (mxc_isi->scale) {
+ temp = (src_f->h_off << 12) / mxc_isi->xfactor;
+ crop.left = temp >> mxc_isi->pre_dec_x;
+ temp = (src_f->v_off << 12) / mxc_isi->yfactor;
+ crop.top = temp >> mxc_isi->pre_dec_y;
+ temp = (src_f->width << 12) / mxc_isi->xfactor;
+ crop.width = temp >> mxc_isi->pre_dec_x;
+ temp = (src_f->height << 12) / mxc_isi->yfactor;
+ crop.height = temp >> mxc_isi->pre_dec_y;
+ } else {
+ crop.left = src_f->h_off;
+ crop.top = src_f->v_off;
+ crop.width = src_f->width;
+ crop.height = src_f->height;
+ }
+
+ mxc_isi->crop = 1;
+ val |= (CHNL_IMG_CTRL_CROP_EN_ENABLE << CHNL_IMG_CTRL_CROP_EN_OFFSET);
+ val0 = crop.top | (crop.left << CHNL_CROP_ULC_X_OFFSET);
+ val1 = crop.height | (crop.width << CHNL_CROP_LRC_X_OFFSET);
+
+ writel(val0, mxc_isi->regs + CHNL_CROP_ULC);
+ writel((val1 + val0), mxc_isi->regs + CHNL_CROP_LRC);
+ writel(val, mxc_isi->regs + CHNL_IMG_CTRL);
+}
+
+static void mxc_isi_channel_clear_scaling(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val0;
+
+ writel(0x10001000, mxc_isi->regs + CHNL_SCALE_FACTOR);
+
+ val0 = readl(mxc_isi->regs + CHNL_IMG_CTRL);
+ val0 &= ~(CHNL_IMG_CTRL_DEC_X_MASK | CHNL_IMG_CTRL_DEC_Y_MASK);
+ writel(val0, mxc_isi->regs + CHNL_IMG_CTRL);
+}
+
+void mxc_isi_channel_set_scaling(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *src_f,
+ struct mxc_isi_frame *dst_f)
+{
+ u32 decx, decy;
+ u32 xscale, yscale;
+ u32 xdec = 0, ydec = 0;
+ u32 val0, val1;
+
+ if (dst_f->height == src_f->height &&
+ dst_f->width == src_f->width) {
+ mxc_isi->scale = 0;
+ mxc_isi_channel_clear_scaling(mxc_isi);
+ dev_dbg(&mxc_isi->pdev->dev, "%s: no scale\n", __func__);
+ return;
+ }
+
+ dev_info(&mxc_isi->pdev->dev, "input_size(%d,%d), output_size(%d,%d)\n",
+ src_f->width, src_f->height, dst_f->width, dst_f->height);
+
+ mxc_isi->scale = 1;
+
+ decx = src_f->width / dst_f->width;
+ decy = src_f->height / dst_f->height;
+
+ if (decx > 1) {
+ /* Down */
+ if (decx >= 2 && decx < 4) {
+ decx = 2;
+ xdec = 1;
+ } else if (decx >= 4 && decx < 8) {
+ decx = 4;
+ xdec = 2;
+ } else if (decx >= 8) {
+ decx = 8;
+ xdec = 3;
+ }
+ xscale = src_f->width * 0x1000 / (dst_f->width * decx);
+ } else {
+ /* Up */
+ xscale = src_f->width * 0x1000 / dst_f->width;
+ }
+
+ if (decy > 1) {
+ if (decy >= 2 && decy < 4) {
+ decy = 2;
+ ydec = 1;
+ } else if (decy >= 4 && decy < 8) {
+ decy = 4;
+ ydec = 2;
+ } else if (decy >= 8) {
+ decy = 8;
+ ydec = 3;
+ }
+ yscale = src_f->height * 0x1000 / (dst_f->height * decy);
+ } else {
+ yscale = src_f->height * 0x1000 / dst_f->height;
+ }
+
+ val0 = readl(mxc_isi->regs + CHNL_IMG_CTRL);
+ val0 |= CHNL_IMG_CTRL_YCBCR_MODE_MASK;//YCbCr Sandor???
+ val0 &= ~(CHNL_IMG_CTRL_DEC_X_MASK | CHNL_IMG_CTRL_DEC_Y_MASK);
+ val0 |= (xdec << CHNL_IMG_CTRL_DEC_X_OFFSET) |
+ (ydec << CHNL_IMG_CTRL_DEC_Y_OFFSET);
+ writel(val0, mxc_isi->regs + CHNL_IMG_CTRL);
+
+ if (xscale > ISI_DOWNSCALE_THRESHOLD)
+ xscale = ISI_DOWNSCALE_THRESHOLD;
+ if (yscale > ISI_DOWNSCALE_THRESHOLD)
+ yscale = ISI_DOWNSCALE_THRESHOLD;
+
+ val1 = xscale | (yscale << CHNL_SCALE_FACTOR_Y_SCALE_OFFSET);
+
+ writel(val1, mxc_isi->regs + CHNL_SCALE_FACTOR);
+
+ /* Update scale config if scaling enabled */
+ val1 = dst_f->o_width | (dst_f->o_height << CHNL_SCL_IMG_CFG_HEIGHT_OFFSET);
+ writel(val1, mxc_isi->regs + CHNL_SCL_IMG_CFG);
+
+ writel(0, mxc_isi->regs + CHNL_SCALE_OFFSET);
+
+ return;
+}
+
+void mxc_isi_channel_init(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val;
+
+ /* sw reset */
+ mxc_isi_channel_sw_reset(mxc_isi);
+
+ /* Init channel clk first */
+ val = readl(mxc_isi->regs + CHNL_CTRL);
+ val |= (CHNL_CTRL_CLK_EN_ENABLE << CHNL_CTRL_CLK_EN_OFFSET);
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_init);
+
+void mxc_isi_channel_deinit(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val;
+
+ /* sw reset */
+ mxc_isi_channel_sw_reset(mxc_isi);
+
+ /* deinit channel clk first */
+ val = (CHNL_CTRL_CLK_EN_DISABLE << CHNL_CTRL_CLK_EN_OFFSET);
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+
+ if (mxc_isi->chain_buf && mxc_isi->chain)
+ regmap_write(mxc_isi->chain, CHNL_CTRL, 0x0);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_deinit);
+
+void mxc_isi_channel_config(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *src_f,
+ struct mxc_isi_frame *dst_f)
+{
+ u32 val;
+
+ /* images having higher than 2048 horizontal resolution */
+ chain_buf(mxc_isi, src_f);
+
+ /* config output frame size and format */
+ val = src_f->o_width | (src_f->o_height << CHNL_IMG_CFG_HEIGHT_OFFSET);
+ writel(val, mxc_isi->regs + CHNL_IMG_CFG);
+
+ /* scale size need to equal input size when scaling disabled*/
+ writel(val, mxc_isi->regs + CHNL_SCL_IMG_CFG);
+
+ /* check csc and scaling */
+ mxc_isi_channel_set_csc(mxc_isi, src_f, dst_f);
+
+ mxc_isi_channel_set_scaling(mxc_isi, src_f, dst_f);
+
+ /* select the source input / src type / virtual channel for mipi*/
+ mxc_isi_channel_source_config(mxc_isi);
+
+ /* line pitch */
+ val = dst_f->bytesperline[0];
+ writel(val, mxc_isi->regs + CHNL_OUT_BUF_PITCH);
+
+ /* TODO */
+ mxc_isi_channel_set_flip(mxc_isi);
+
+ mxc_isi_channel_set_alpha(mxc_isi);
+
+ mxc_isi_channel_set_panic_threshold(mxc_isi);
+
+ val = readl(mxc_isi->regs + CHNL_CTRL);
+ val &= ~CHNL_CTRL_CHNL_BYPASS_MASK;
+
+ /* Bypass channel */
+ if (!mxc_isi->cscen && !mxc_isi->scale)
+ val |= (CHNL_CTRL_CHNL_BYPASS_ENABLE << CHNL_CTRL_CHNL_BYPASS_OFFSET);
+
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_config);
+
+void mxc_isi_clean_registers(struct mxc_isi_dev *mxc_isi)
+{
+ u32 status;
+
+ status = mxc_isi_get_irq_status(mxc_isi);
+ mxc_isi_clean_irq_status(mxc_isi, status);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_clean_registers);
+
+void mxc_isi_channel_enable(struct mxc_isi_dev *mxc_isi, bool m2m_enabled)
+{
+ u32 val;
+
+ val = readl(mxc_isi->regs + CHNL_CTRL);
+ val |= 0xff << CHNL_CTRL_BLANK_PXL_OFFSET;
+
+ if (m2m_enabled) {
+ val &= ~(CHNL_CTRL_SRC_TYPE_MASK | CHNL_CTRL_SRC_INPUT_MASK);
+ val |= (mxc_isi->pdata->chan_src->src_mem << CHNL_CTRL_SRC_INPUT_OFFSET |
+ CHNL_CTRL_SRC_TYPE_MEMORY << CHNL_CTRL_SRC_TYPE_OFFSET);
+ }
+
+ val &= ~CHNL_CTRL_CHNL_EN_MASK;
+ val |= CHNL_CTRL_CHNL_EN_ENABLE << CHNL_CTRL_CHNL_EN_OFFSET;
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+
+ mxc_isi_clean_registers(mxc_isi);
+ mxc_isi_enable_irq(mxc_isi);
+
+ if (m2m_enabled) {
+ mxc_isi_m2m_start_read(mxc_isi);
+ return;
+ }
+
+ dump_isi_regs(mxc_isi);
+ msleep(300);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_enable);
+
+void mxc_isi_channel_disable(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val;
+
+ mxc_isi_disable_irq(mxc_isi);
+
+ val = readl(mxc_isi->regs + CHNL_CTRL);
+ val &= ~(CHNL_CTRL_CHNL_EN_MASK | CHNL_CTRL_CLK_EN_MASK);
+ val |= (CHNL_CTRL_CHNL_EN_DISABLE << CHNL_CTRL_CHNL_EN_OFFSET);
+ val |= (CHNL_CTRL_CLK_EN_DISABLE << CHNL_CTRL_CLK_EN_OFFSET);
+ writel(val, mxc_isi->regs + CHNL_CTRL);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_channel_disable);
+
+void mxc_isi_enable_irq(struct mxc_isi_dev *mxc_isi)
+{
+ struct mxc_isi_ier_reg *ier_reg = mxc_isi->pdata->ier_reg;
+ u32 val;
+
+ val = CHNL_IER_FRM_RCVD_EN_MASK |
+ CHNL_IER_AXI_WR_ERR_U_EN_MASK |
+ CHNL_IER_AXI_WR_ERR_V_EN_MASK |
+ CHNL_IER_AXI_WR_ERR_Y_EN_MASK;
+
+ /* Y/U/V overflow enable */
+ val |= ier_reg->oflw_y_buf_en.mask |
+ ier_reg->oflw_u_buf_en.mask |
+ ier_reg->oflw_v_buf_en.mask;
+
+ /* Y/U/V excess overflow enable */
+ val |= ier_reg->excs_oflw_y_buf_en.mask |
+ ier_reg->excs_oflw_u_buf_en.mask |
+ ier_reg->excs_oflw_v_buf_en.mask;
+
+ /* Y/U/V panic enable */
+ val |= ier_reg->panic_y_buf_en.mask |
+ ier_reg->panic_u_buf_en.mask |
+ ier_reg->panic_v_buf_en.mask;
+
+ writel(val, mxc_isi->regs + CHNL_IER);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_enable_irq);
+
+void mxc_isi_disable_irq(struct mxc_isi_dev *mxc_isi)
+{
+ writel(0, mxc_isi->regs + CHNL_IER);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_disable_irq);
+
+u32 mxc_isi_get_irq_status(struct mxc_isi_dev *mxc_isi)
+{
+ return readl(mxc_isi->regs + CHNL_STS);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_get_irq_status);
+
+void mxc_isi_clean_irq_status(struct mxc_isi_dev *mxc_isi, u32 val)
+{
+ writel(val, mxc_isi->regs + CHNL_STS);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_clean_irq_status);
+
+void mxc_isi_m2m_config_src(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *src_f)
+{
+ u32 val;
+
+ /* source format */
+ val = readl(mxc_isi->regs + CHNL_MEM_RD_CTRL);
+ val &= ~CHNL_MEM_RD_CTRL_IMG_TYPE_MASK;
+ val |= src_f->fmt->color << CHNL_MEM_RD_CTRL_IMG_TYPE_OFFSET;
+ writel(val, mxc_isi->regs + CHNL_MEM_RD_CTRL);
+
+ /* source image width and height */
+ val = (src_f->width << CHNL_IMG_CFG_WIDTH_OFFSET |
+ src_f->height << CHNL_IMG_CFG_HEIGHT_OFFSET);
+ writel(val, mxc_isi->regs + CHNL_IMG_CFG);
+
+ /* source pitch */
+ val = src_f->bytesperline[0] << CHNL_IN_BUF_PITCH_LINE_PITCH_OFFSET;
+ writel(val, mxc_isi->regs + CHNL_IN_BUF_PITCH);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_m2m_config_src);
+
+void mxc_isi_m2m_config_dst(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *dst_f)
+{
+ u32 val;
+
+ /* out format */
+ val = readl(mxc_isi->regs + CHNL_IMG_CTRL);
+ val &= ~CHNL_IMG_CTRL_FORMAT_MASK;
+ val |= dst_f->fmt->color << CHNL_IMG_CTRL_FORMAT_OFFSET;
+ writel(val, mxc_isi->regs + CHNL_IMG_CTRL);
+
+ /* out pitch */
+ val = readl(mxc_isi->regs + CHNL_OUT_BUF_PITCH);
+ val &= ~CHNL_IN_BUF_PITCH_LINE_PITCH_MASK;
+ val |= dst_f->bytesperline[0] << CHNL_OUT_BUF_PITCH_LINE_PITCH_OFFSET;
+ writel(val, mxc_isi->regs + CHNL_OUT_BUF_PITCH);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_m2m_config_dst);
+
+void mxc_isi_m2m_start_read(struct mxc_isi_dev *mxc_isi)
+{
+ u32 val;
+
+ val = readl(mxc_isi->regs + CHNL_MEM_RD_CTRL);
+ val &= ~ CHNL_MEM_RD_CTRL_READ_MEM_MASK;
+ writel(val, mxc_isi->regs + CHNL_MEM_RD_CTRL);
+ udelay(300);
+
+ val |= CHNL_MEM_RD_CTRL_READ_MEM_ENABLE << CHNL_MEM_RD_CTRL_READ_MEM_OFFSET;
+ writel(val, mxc_isi->regs + CHNL_MEM_RD_CTRL);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_m2m_start_read);
diff --git a/drivers/staging/media/imx/imx8-isi-hw.h b/drivers/staging/media/imx/imx8-isi-hw.h
new file mode 100644
index 000000000000..54cde426fa0d
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-isi-hw.h
@@ -0,0 +1,484 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019-2020 NXP
+ *
+ */
+
+#ifndef __MXC_ISI_HW_H__
+#define __MXC_ISI_HW_H__
+
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/platform_device.h>
+#include <linux/videodev2.h>
+
+#include "imx8-isi-core.h"
+
+/* ISI Registers Define */
+/* Channel Control Register */
+#define CHNL_CTRL 0x0
+#define CHNL_CTRL_CHNL_EN_OFFSET 31
+#define CHNL_CTRL_CHNL_EN_MASK 0x80000000
+#define CHNL_CTRL_CHNL_EN_DISABLE 0
+#define CHNL_CTRL_CHNL_EN_ENABLE 1
+#define CHNL_CTRL_CLK_EN_OFFSET 30
+#define CHNL_CTRL_CLK_EN_MASK 0x40000000
+#define CHNL_CTRL_CLK_EN_DISABLE 0
+#define CHNL_CTRL_CLK_EN_ENABLE 1
+#define CHNL_CTRL_CHNL_BYPASS_OFFSET 29
+#define CHNL_CTRL_CHNL_BYPASS_MASK 0x20000000
+#define CHNL_CTRL_CHNL_BYPASS_ENABLE 1
+#define CHNL_CTRL_CHAIN_BUF_OFFSET 25
+#define CHNL_CTRL_CHAIN_BUF_MASK 0x6000000
+#define CHNL_CTRL_CHAIN_BUF_NO_CHAIN 0
+#define CHNL_CTRL_CHAIN_BUF_2_CHAIN 1
+#define CHNL_CTRL_SW_RST_OFFSET 24
+#define CHNL_CTRL_SW_RST_MASK 0x1000000
+#define CHNL_CTRL_SW_RST 0x1000000
+#define CHNL_CTRL_BLANK_PXL_OFFSET 16
+#define CHNL_CTRL_MIPI_VC_ID_OFFSET 6
+#define CHNL_CTRL_MIPI_VC_ID_MASK 0xc0
+#define CHNL_CTRL_MIPI_VC_ID_VC0 0
+#define CHNL_CTRL_MIPI_VC_ID_VC1 1
+#define CHNL_CTRL_MIPI_VC_ID_VC2 2
+#define CHNL_CTRL_MIPI_VC_ID_VC3 3
+#define CHNL_CTRL_SRC_TYPE_OFFSET 4
+#define CHNL_CTRL_SRC_TYPE_MASK 0x10
+#define CHNL_CTRL_SRC_TYPE_DEVICE 0
+#define CHNL_CTRL_SRC_TYPE_MEMORY 1
+#define CHNL_CTRL_SRC_INPUT_OFFSET 0
+#define CHNL_CTRL_SRC_INPUT_MASK 0x7
+#define CHNL_CTRL_SRC_INPUT_MEMORY 5
+
+/* Channel Image Control Register */
+#define CHNL_IMG_CTRL 0x4
+#define CHNL_IMG_CTRL_FORMAT_OFFSET 24
+#define CHNL_IMG_CTRL_FORMAT_MASK 0x3F000000
+#define CHNL_IMG_CTRL_GBL_ALPHA_VAL_OFFSET 16
+#define CHNL_IMG_CTRL_GBL_ALPHA_VAL_MASK 0xFF0000
+#define CHNL_IMG_CTRL_GBL_ALPHA_EN_OFFSET 15
+#define CHNL_IMG_CTRL_GBL_ALPHA_EN_ENABLE 1
+#define CHNL_IMG_CTRL_GBL_ALPHA_EN_MASK 0x8000
+#define CHNL_IMG_CTRL_DEINT_OFFSET 12
+#define CHNL_IMG_CTRL_DEINT_MASK 0x7000
+#define CHNL_IMG_CTRL_DEINT_WEAVE_ODD_EVEN 2
+#define CHNL_IMG_CTRL_DEINT_WEAVE_EVEN_ODD 3
+#define CHNL_IMG_CTRL_DEINT_BLEND_ODD_EVEN 4
+#define CHNL_IMG_CTRL_DEINT_BLEND_EVEN_ODD 5
+#define CHNL_IMG_CTRL_DEINT_LDOUBLE_ODD_EVEN 6
+#define CHNL_IMG_CTRL_DEINT_LDOUBLE_EVEN_ODD 7
+#define CHNL_IMG_CTRL_DEC_X_OFFSET 10
+#define CHNL_IMG_CTRL_DEC_X_MASK 0xC00
+#define CHNL_IMG_CTRL_DEC_X_0 0
+#define CHNL_IMG_CTRL_DEC_X_2 1
+#define CHNL_IMG_CTRL_DEC_X_4 2
+#define CHNL_IMG_CTRL_DEC_X_8 3
+#define CHNL_IMG_CTRL_DEC_Y_OFFSET 8
+#define CHNL_IMG_CTRL_DEC_Y_MASK 0x300
+#define CHNL_IMG_CTRL_DEC_Y_0 0
+#define CHNL_IMG_CTRL_DEC_Y_2 1
+#define CHNL_IMG_CTRL_DEC_Y_4 2
+#define CHNL_IMG_CTRL_DEC_Y_8 3
+#define CHNL_IMG_CTRL_CROP_EN_OFFSET 7
+#define CHNL_IMG_CTRL_CROP_EN_MASK 0x80
+#define CHNL_IMG_CTRL_CROP_EN_ENABLE 1
+#define CHNL_IMG_CTRL_VFLIP_EN_OFFSET 6
+#define CHNL_IMG_CTRL_VFLIP_EN_MASK 0x40
+#define CHNL_IMG_CTRL_VFLIP_EN_ENABLE 1
+#define CHNL_IMG_CTRL_HFLIP_EN_OFFSET 5
+#define CHNL_IMG_CTRL_HFLIP_EN_MASK 0x20
+#define CHNL_IMG_CTRL_HFLIP_EN_ENABLE 1
+#define CHNL_IMG_CTRL_YCBCR_MODE_OFFSET 3
+#define CHNL_IMG_CTRL_YCBCR_MODE_MASK 0x8
+#define CHNL_IMG_CTRL_YCBCR_MODE_ENABLE 1
+#define CHNL_IMG_CTRL_CSC_MODE_OFFSET 1
+#define CHNL_IMG_CTRL_CSC_MODE_MASK 0x6
+#define CHNL_IMG_CTRL_CSC_MODE_YUV2RGB 0
+#define CHNL_IMG_CTRL_CSC_MODE_YCBCR2RGB 1
+#define CHNL_IMG_CTRL_CSC_MODE_RGB2YUV 2
+#define CHNL_IMG_CTRL_CSC_MODE_RGB2YCBCR 3
+#define CHNL_IMG_CTRL_CSC_BYPASS_OFFSET 0
+#define CHNL_IMG_CTRL_CSC_BYPASS_MASK 0x1
+#define CHNL_IMG_CTRL_CSC_BYPASS_ENABLE 0x1
+
+/* Channel Output Buffer Control Register */
+#define CHNL_OUT_BUF_CTRL 0x8
+#define CHNL_OUT_BUF_CTRL_LOAD_BUF2_ADDR_OFFSET 15
+#define CHNL_OUT_BUF_CTRL_LOAD_BUF2_ADDR_MASK 0x8000
+#define CHNL_OUT_BUF_CTRL_LOAD_BUF1_ADDR_OFFSET 14
+#define CHNL_OUT_BUF_CTRL_LOAD_BUF1_ADDR_MASK 0x4000
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_V_OFFSET 6
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_V_MASK 0xC0
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_V_NO_PANIC 0
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_V_PANIC_25 1
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_V_PANIC_50 2
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_V_PANIC_75 3
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_U_OFFSET 3
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_U_MASK 0x18
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_U_NO_PANIC 0
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_U_PANIC_25 1
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_U_PANIC_50 2
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_U_PANIC_75 3
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_Y_OFFSET 0
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_Y_MASK 0x3
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_Y_NO_PANIC 0
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_Y_PANIC_25 1
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_Y_PANIC_50 2
+#define CHNL_OUT_BUF_CTRL_OFLW_PANIC_SET_THD_Y_PANIC_75 3
+
+/* Channel Image Configuration */
+#define CHNL_IMG_CFG 0xC
+#define CHNL_IMG_CFG_HEIGHT_OFFSET 16
+#define CHNL_IMG_CFG_HEIGHT_MASK 0x1FFF0000
+#define CHNL_IMG_CFG_WIDTH_OFFSET 0
+#define CHNL_IMG_CFG_WIDTH_MASK 0x1FFF
+
+/* Channel Interrupt Enable Register */
+#define CHNL_IER 0x10
+#define CHNL_IER_MEM_RD_DONE_EN_OFFSET 31
+#define CHNL_IER_MEM_RD_DONE_EN_MASK 0x80000000
+#define CHNL_IER_MEM_RD_DONE_EN_ENABLE 1
+#define CHNL_IER_LINE_RCVD_EN_OFFSET 30
+#define CHNL_IER_LINE_RCVD_EN_MASK 0x40000000
+#define CHNL_IER_LINE_RCVD_EN_ENABLE 1
+#define CHNL_IER_FRM_RCVD_EN_OFFSET 29
+#define CHNL_IER_FRM_RCVD_EN_MASK 0x20000000
+#define CHNL_IER_FRM_RCVD_EN_ENABLE 1
+#define CHNL_IER_AXI_WR_ERR_V_EN_OFFSET 28
+#define CHNL_IER_AXI_WR_ERR_V_EN_MASK 0x10000000
+#define CHNL_IER_AXI_WR_ERR_V_EN_ENABLE 1
+#define CHNL_IER_AXI_WR_ERR_U_EN_OFFSET 27
+#define CHNL_IER_AXI_WR_ERR_U_EN_MASK 0x8000000
+#define CHNL_IER_AXI_WR_ERR_U_EN_ENABLE 1
+#define CHNL_IER_AXI_WR_ERR_Y_EN_OFFSET 26
+#define CHNL_IER_AXI_WR_ERR_Y_EN_MASK 0x4000000
+#define CHNL_IER_AXI_WR_ERR_Y_EN_ENABLE 1
+#define CHNL_IER_AXI_RD_ERR_EN_OFFSET 25
+#define CHNL_IER_AXI_RD_ERR_EN_MASK 0x2000000
+#define CHNL_IER_AXI_RD_ERR_EN_ENABLE 1
+
+/* Channel Status Register */
+#define CHNL_STS 0x14
+#define CHNL_STS_MEM_RD_DONE_OFFSET 31
+#define CHNL_STS_MEM_RD_DONE_MASK 0x80000000
+#define CHNL_STS_MEM_RD_DONE_ENABLE 1
+#define CHNL_STS_LINE_STRD_OFFSET 30
+#define CHNL_STS_LINE_STRD_MASK 0x40000000
+#define CHNL_STS_LINE_STRD_ENABLE 1
+#define CHNL_STS_FRM_STRD_OFFSET 29
+#define CHNL_STS_FRM_STRD_MASK 0x20000000
+#define CHNL_STS_FRM_STRD_ENABLE 1
+#define CHNL_STS_AXI_WR_ERR_V_OFFSET 28
+#define CHNL_STS_AXI_WR_ERR_V_MASK 0x10000000
+#define CHNL_STS_AXI_WR_ERR_V_ENABLE 1
+#define CHNL_STS_AXI_WR_ERR_U_OFFSET 27
+#define CHNL_STS_AXI_WR_ERR_U_MASK 0x8000000
+#define CHNL_STS_AXI_WR_ERR_U_ENABLE 1
+#define CHNL_STS_AXI_WR_ERR_Y_OFFSET 26
+#define CHNL_STS_AXI_WR_ERR_Y_MASK 0x4000000
+#define CHNL_STS_AXI_WR_ERR_Y_ENABLE 1
+#define CHNL_STS_AXI_RD_ERR_OFFSET 25
+#define CHNL_STS_AXI_RD_ERR_MASK 0x2000000
+#define CHNL_STS_AXI_RD_ERR_ENABLE 1
+#define CHNL_STS_OFLW_PANIC_V_BUF_OFFSET 24
+#define CHNL_STS_OFLW_PANIC_V_BUF_MASK 0x1000000
+#define CHNL_STS_OFLW_PANIC_V_BUF_ENABLE 1
+#define CHNL_STS_EXCS_OFLW_V_BUF_OFFSET 23
+#define CHNL_STS_EXCS_OFLW_V_BUF_MASK 0x800000
+#define CHNL_STS_EXCS_OFLW_V_BUF_ENABLE 1
+#define CHNL_STS_OFLW_V_BUF_OFFSET 22
+#define CHNL_STS_OFLW_V_BUF_MASK 0x400000
+#define CHNL_STS_OFLW_V_BUF_ENABLE 1
+#define CHNL_STS_OFLW_PANIC_U_BUF_OFFSET 21
+#define CHNL_STS_OFLW_PANIC_U_BUF_MASK 0x200000
+#define CHNL_STS_OFLW_PANIC_U_BUF_ENABLE 1
+#define CHNL_STS_EXCS_OFLW_U_BUF_OFFSET 20
+#define CHNL_STS_EXCS_OFLW_U_BUF_MASK 0x100000
+#define CHNL_STS_EXCS_OFLW_U_BUF_ENABLE 1
+#define CHNL_STS_OFLW_U_BUF_OFFSET 19
+#define CHNL_STS_OFLW_U_BUF_MASK 0x80000
+#define CHNL_STS_OFLW_U_BUF_ENABLE 1
+#define CHNL_STS_OFLW_PANIC_Y_BUF_OFFSET 18
+#define CHNL_STS_OFLW_PANIC_Y_BUF_MASK 0x40000
+#define CHNL_STS_OFLW_PANIC_Y_BUF_ENABLE 1
+#define CHNL_STS_EXCS_OFLW_Y_BUF_OFFSET 17
+#define CHNL_STS_EXCS_OFLW_Y_BUF_MASK 0x20000
+#define CHNL_STS_EXCS_OFLW_Y_BUF_ENABLE 1
+#define CHNL_STS_OFLW_Y_BUF_OFFSET 16
+#define CHNL_STS_OFLW_Y_BUF_MASK 0x10000
+#define CHNL_STS_OFLW_Y_BUF_ENABLE 1
+#define CHNL_STS_OFLW_BYTES_OFFSET 0
+#define CHNL_STS_OFLW_BYTES_MASK 0xFF
+
+/* Channel Scale Factor Register */
+#define CHNL_SCALE_FACTOR 0x18
+#define CHNL_SCALE_FACTOR_Y_SCALE_OFFSET 16
+#define CHNL_SCALE_FACTOR_Y_SCALE_MASK 0x3FFF0000
+#define CHNL_SCALE_FACTOR_X_SCALE_OFFSET 0
+#define CHNL_SCALE_FACTOR_X_SCALE_MASK 0x3FFF
+
+/* Channel Scale Offset Register */
+#define CHNL_SCALE_OFFSET 0x1C
+#define CHNL_SCALE_OFFSET_Y_SCALE_OFFSET 16
+#define CHNL_SCALE_OFFSET_Y_SCALE_MASK 0xFFF0000
+#define CHNL_SCALE_OFFSET_X_SCALE_OFFSET 0
+#define CHNL_SCALE_OFFSET_X_SCALE_MASK 0xFFF
+
+/* Channel Crop Upper Left Corner Coordinate Register */
+#define CHNL_CROP_ULC 0x20
+#define CHNL_CROP_ULC_X_OFFSET 16
+#define CHNL_CROP_ULC_X_MASK 0xFFF0000
+#define CHNL_CROP_ULC_Y_OFFSET 0
+#define CHNL_CROP_ULC_Y_MASK 0xFFF
+
+/* Channel Crop Lower Right Corner Coordinate Register */
+#define CHNL_CROP_LRC 0x24
+#define CHNL_CROP_LRC_X_OFFSET 16
+#define CHNL_CROP_LRC_X_MASK 0xFFF0000
+#define CHNL_CROP_LRC_Y_OFFSET 0
+#define CHNL_CROP_LRC_Y_MASK 0xFFF
+
+/* Channel Color Space Conversion Coefficient Register 0 */
+#define CHNL_CSC_COEFF0 0x28
+#define CHNL_CSC_COEFF0_A2_OFFSET 16
+#define CHNL_CSC_COEFF0_A2_MASK 0x7FF0000
+#define CHNL_CSC_COEFF0_A1_OFFSET 0
+#define CHNL_CSC_COEFF0_A1_MASK 0x7FF
+
+/* Channel Color Space Conversion Coefficient Register 1 */
+#define CHNL_CSC_COEFF1 0x2C
+#define CHNL_CSC_COEFF1_B1_OFFSET 16
+#define CHNL_CSC_COEFF1_B1_MASK 0x7FF0000
+#define CHNL_CSC_COEFF1_A3_OFFSET 0
+#define CHNL_CSC_COEFF1_A3_MASK 0x7FF
+
+/* Channel Color Space Conversion Coefficient Register 2 */
+#define CHNL_CSC_COEFF2 0x30
+#define CHNL_CSC_COEFF2_B3_OFFSET 16
+#define CHNL_CSC_COEFF2_B3_MASK 0x7FF0000
+#define CHNL_CSC_COEFF2_B2_OFFSET 0
+#define CHNL_CSC_COEFF2_B2_MASK 0x7FF
+
+/* Channel Color Space Conversion Coefficient Register 3 */
+#define CHNL_CSC_COEFF3 0x34
+#define CHNL_CSC_COEFF3_C2_OFFSET 16
+#define CHNL_CSC_COEFF3_C2_MASK 0x7FF0000
+#define CHNL_CSC_COEFF3_C1_OFFSET 0
+#define CHNL_CSC_COEFF3_C1_MASK 0x7FF
+
+/* Channel Color Space Conversion Coefficient Register 4 */
+#define CHNL_CSC_COEFF4 0x38
+#define CHNL_CSC_COEFF4_D1_OFFSET 16
+#define CHNL_CSC_COEFF4_D1_MASK 0x1FF0000
+#define CHNL_CSC_COEFF4_C3_OFFSET 0
+#define CHNL_CSC_COEFF4_C3_MASK 0x7FF
+
+/* Channel Color Space Conversion Coefficient Register 5 */
+#define CHNL_CSC_COEFF5 0x3C
+#define CHNL_CSC_COEFF5_D3_OFFSET 16
+#define CHNL_CSC_COEFF5_D3_MASK 0x1FF0000
+#define CHNL_CSC_COEFF5_D2_OFFSET 0
+#define CHNL_CSC_COEFF5_D2_MASK 0x1FF
+
+/* Channel Alpha Value Register for ROI 0 */
+#define CHNL_ROI_0_ALPHA 0x40
+#define CHNL_ROI_0_ALPHA_OFFSET 24
+#define CHNL_ROI_0_ALPHA_MASK 0xFF000000
+#define CHNL_ROI_0_ALPHA_EN_OFFSET 16
+#define CHNL_ROI_0_ALPHA_EN_MASK 0x10000
+
+/* Channel Upper Left Coordinate Register for ROI 0 */
+#define CHNL_ROI_0_ULC 0x44
+#define CHNL_ROI_0_ULC_X_OFFSET 16
+#define CHNL_ROI_0_ULC_X_MASK 0xFFF0000
+#define CHNL_ROI_0_ULC_Y_OFFSET 0
+#define CHNL_ROI_0_ULC_Y_MASK 0xFFF
+
+/* Channel Lower Right Coordinate Register for ROI 0 */
+#define CHNL_ROI_0_LRC 0x48
+#define CHNL_ROI_0_LRC_X_OFFSET 16
+#define CHNL_ROI_0_LRC_X_MASK 0xFFF0000
+#define CHNL_ROI_0_LRC_Y_OFFSET 0
+#define CHNL_ROI_0_LRC_Y_MASK 0xFFF
+
+/* Channel Alpha Value Register for ROI 1 */
+#define CHNL_ROI_1_ALPHA 0x4C
+#define CHNL_ROI_1_ALPHA_OFFSET 24
+#define CHNL_ROI_1_ALPHA_MASK 0xFF000000
+#define CHNL_ROI_1_ALPHA_EN_OFFSET 16
+#define CHNL_ROI_1_ALPHA_EN_MASK 0x10000
+
+/* Channel Upper Left Coordinate Register for ROI 1 */
+#define CHNL_ROI_1_ULC 0x50
+#define CHNL_ROI_1_ULC_X_OFFSET 16
+#define CHNL_ROI_1_ULC_X_MASK 0xFFF0000
+#define CHNL_ROI_1_ULC_Y_OFFSET 0
+#define CHNL_ROI_1_ULC_Y_MASK 0xFFF
+
+/* Channel Lower Right Coordinate Register for ROI 1 */
+#define CHNL_ROI_1_LRC 0x54
+#define CHNL_ROI_1_LRC_X_OFFSET 16
+#define CHNL_ROI_1_LRC_X_MASK 0xFFF0000
+#define CHNL_ROI_1_LRC_Y_OFFSET 0
+#define CHNL_ROI_1_LRC_Y_MASK 0xFFF
+
+/* Channel Alpha Value Register for ROI 2 */
+#define CHNL_ROI_2_ALPHA 0x58
+#define CHNL_ROI_2_ALPHA_OFFSET 24
+#define CHNL_ROI_2_ALPHA_MASK 0xFF000000
+#define CHNL_ROI_2_ALPHA_EN_OFFSET 16
+#define CHNL_ROI_2_ALPHA_EN_MASK 0x10000
+
+/* Channel Upper Left Coordinate Register for ROI 2 */
+#define CHNL_ROI_2_ULC 0x5C
+#define CHNL_ROI_2_ULC_X_OFFSET 16
+#define CHNL_ROI_2_ULC_X_MASK 0xFFF0000
+#define CHNL_ROI_2_ULC_Y_OFFSET 0
+#define CHNL_ROI_2_ULC_Y_MASK 0xFFF
+
+/* Channel Lower Right Coordinate Register for ROI 2 */
+#define CHNL_ROI_2_LRC 0x60
+#define CHNL_ROI_2_LRC_X_OFFSET 16
+#define CHNL_ROI_2_LRC_X_MASK 0xFFF0000
+#define CHNL_ROI_2_LRC_Y_OFFSET 0
+#define CHNL_ROI_2_LRC_Y_MASK 0xFFF
+
+/* Channel Alpha Value Register for ROI 3 */
+#define CHNL_ROI_3_ALPHA 0x64
+#define CHNL_ROI_3_ALPHA_OFFSET 24
+#define CHNL_ROI_3_ALPHA_MASK 0xFF000000
+#define CHNL_ROI_3_ALPHA_EN_OFFSET 16
+#define CHNL_ROI_3_ALPHA_EN_MASK 0x10000
+
+/* Channel Upper Left Coordinate Register for ROI 3 */
+#define CHNL_ROI_3_ULC 0x68
+#define CHNL_ROI_3_ULC_X_OFFSET 16
+#define CHNL_ROI_3_ULC_X_MASK 0xFFF0000
+#define CHNL_ROI_3_ULC_Y_OFFSET 0
+#define CHNL_ROI_3_ULC_Y_MASK 0xFFF
+
+/* Channel Lower Right Coordinate Register for ROI 3 */
+#define CHNL_ROI_3_LRC 0x6C
+#define CHNL_ROI_3_LRC_X_OFFSET 16
+#define CHNL_ROI_3_LRC_X_MASK 0xFFF0000
+#define CHNL_ROI_3_LRC_Y_OFFSET 0
+#define CHNL_ROI_3_LRC_Y_MASK 0xFFF
+
+/* Channel RGB or Luma (Y) Output Buffer 1 Address */
+#define CHNL_OUT_BUF1_ADDR_Y 0x70
+
+/* Channel Chroma (U/Cb/UV/CbCr) Output Buffer 1 Address */
+#define CHNL_OUT_BUF1_ADDR_U 0x74
+
+/* Channel Chroma (V/Cr) Output Buffer 1 Address */
+#define CHNL_OUT_BUF1_ADDR_V 0x78
+
+/* Channel Output Buffer Pitch */
+#define CHNL_OUT_BUF_PITCH 0x7C
+#define CHNL_OUT_BUF_PITCH_LINE_PITCH_OFFSET 0
+#define CHNL_OUT_BUF_PITCH_LINE_PITCH_MASK 0xFFFF
+
+/* Channel Input Buffer Address */
+#define CHNL_IN_BUF_ADDR 0x80
+
+/* Channel Input Buffer Pitch */
+#define CHNL_IN_BUF_PITCH 0x84
+#define CHNL_IN_BUF_PITCH_FRM_PITCH_OFFSET 16
+#define CHNL_IN_BUF_PITCH_FRM_PITCH_MASK 0xFFFF0000
+#define CHNL_IN_BUF_PITCH_LINE_PITCH_OFFSET 0
+#define CHNL_IN_BUF_PITCH_LINE_PITCH_MASK 0xFFFF
+
+/* Channel Memory Read Control */
+#define CHNL_MEM_RD_CTRL 0x88
+#define CHNL_MEM_RD_CTRL_IMG_TYPE_OFFSET 28
+#define CHNL_MEM_RD_CTRL_IMG_TYPE_MASK 0xF0000000
+#define CHNL_MEM_RD_CTRL_READ_MEM_OFFSET 0
+#define CHNL_MEM_RD_CTRL_READ_MEM_MASK 1
+#define CHNL_MEM_RD_CTRL_READ_MEM_ENABLE 1
+
+/* Channel RGB or Luma (Y) Output Buffer 2 Address */
+#define CHNL_OUT_BUF2_ADDR_Y 0x8C
+
+/* Channel Chroma (U/Cb/UV/CbCr) Output Buffer 2 Address */
+#define CHNL_OUT_BUF2_ADDR_U 0x90
+
+/* Channel Chroma (V/Cr) Output Buffer 2 Address */
+#define CHNL_OUT_BUF2_ADDR_V 0x94
+
+/* Channel scale image config */
+#define CHNL_SCL_IMG_CFG 0x98
+#define CHNL_SCL_IMG_CFG_HEIGHT_OFFSET 16
+#define CHNL_SCL_IMG_CFG_HEIGHT_MASK 0x1FFF0000
+#define CHNL_SCL_IMG_CFG_WIDTH_OFFSET 0
+#define CHNL_SCL_IMG_CFG_WIDTH_MASK 0x1FFF
+
+/* Channel Flow Control Register */
+#define CHNL_FLOW_CTRL 0x9C
+#define CHNL_FLOW_CTRL_FC_DENOM_MASK 0xFF
+#define CHNL_FLOW_CTRL_FC_DENOM_OFFSET 0
+#define CHNL_FLOW_CTRL_FC_NUMER_MASK 0xFF0000
+#define CHNL_FLOW_CTRL_FC_NUMER_OFFSET 0
+
+enum isi_csi_coeff {
+ YUV2RGB = 0,
+ RGB2YUV,
+};
+
+void mxc_isi_channel_init(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_deinit(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_enable(struct mxc_isi_dev *mxc_isi, bool m2m_enabled);
+void mxc_isi_channel_disable(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_cap_frame_write_done(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_set_deinterlace(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_sw_reset(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_hw_reset(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_source_config(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_set_flip(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_set_alpha(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_set_chain_buf(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_set_deinterlace(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_set_crop(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_set_memory_image(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_channel_set_panic_threshold(struct mxc_isi_dev *mxc_isi);
+
+void mxc_isi_channel_set_scaling(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *src_f,
+ struct mxc_isi_frame *dst_f);
+
+void mxc_isi_channel_set_outbuf(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_buffer *buf);
+
+void mxc_isi_channel_set_csc(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *src_f,
+ struct mxc_isi_frame *dst_f);
+
+void mxc_isi_channel_config(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *src_f,
+ struct mxc_isi_frame *dst_f);
+
+void mxc_isi_channel_set_alpha_roi0(struct mxc_isi_dev *mxc_isi,
+ struct v4l2_rect *rect);
+void mxc_isi_channel_set_m2m_src_addr(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_buffer *buf);
+
+void mxc_isi_m2m_config_src(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *src_f);
+void mxc_isi_m2m_config_dst(struct mxc_isi_dev *mxc_isi,
+ struct mxc_isi_frame *dst_f);
+
+void mxc_isi_m2m_start_read(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_m2m_frame_write_done(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_clean_irq_status(struct mxc_isi_dev *mxc_isi, u32 val);
+void mxc_isi_clean_registers(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_enable_irq(struct mxc_isi_dev *mxc_isi);
+void mxc_isi_disable_irq(struct mxc_isi_dev *mxc_isi);
+void dump_isi_regs(struct mxc_isi_dev *mxc_isi);
+
+u32 mxc_isi_get_irq_status(struct mxc_isi_dev *mxc_isi);
+bool is_buf_active(struct mxc_isi_dev *mxc_isi, int buf_id);
+
+struct device *mxc_isi_dev_get_parent(struct platform_device *pdev);
+struct mxc_isi_dev *mxc_isi_get_hostdata(struct platform_device *pdev);
+#endif /* __MXC_ISI_HW_H__ */
diff --git a/drivers/staging/media/imx/imx8-isi-m2m.c b/drivers/staging/media/imx/imx8-isi-m2m.c
new file mode 100644
index 000000000000..e292b91e9ecc
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-isi-m2m.c
@@ -0,0 +1,1201 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ISI V4L2 memory to memory driver for i.MX8QXP/QM platform
+ *
+ * ISI is a Image Sensor Interface of i.MX8QXP/QM platform, which
+ * used to process image from camera sensor or memory to memory or DC
+ *
+ * Copyright (c) 2019 NXP Semiconductor
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/bug.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+#include <linux/pm_runtime.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/of_graph.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-ioctl.h>
+#include <media/v4l2-mem2mem.h>
+#include <media/videobuf2-core.h>
+#include <media/videobuf2-dma-contig.h>
+
+#include "imx8-isi-hw.h"
+#include "imx8-common.h"
+
+#define to_isi_buffer(x) \
+ container_of((x), struct mxc_isi_buffer, v4l2_buf)
+
+#define file_to_ctx(file) \
+ container_of(file->private_data, struct mxc_isi_ctx, fh);
+
+#if defined(CONFIG_IMX8_ISI_CAPTURE)
+extern struct mxc_isi_fmt mxc_isi_out_formats[9];
+#else
+static struct mxc_isi_fmt mxc_isi_out_formats[9] = {};
+#endif
+
+struct mxc_isi_fmt mxc_isi_input_formats[] = {
+ /* Pixel link input format */
+ {
+ .name = "XBGR32",
+ .fourcc = V4L2_PIX_FMT_XBGR32,
+ .depth = { 32 },
+ .color = MXC_ISI_M2M_IN_FMT_XRGB8,
+ .memplanes = 1,
+ .colplanes = 1,
+ }, {
+ .name = "RGB565",
+ .fourcc = V4L2_PIX_FMT_RGB565,
+ .depth = { 16 },
+ .color = MXC_ISI_M2M_IN_FMT_RGB565,
+ .memplanes = 1,
+ .colplanes = 1,
+ }, {
+ .name = "YUV24 (X-Y-U-V)",
+ .fourcc = V4L2_PIX_FMT_YUV24,
+ .depth = { 24 },
+ .color = MXC_ISI_M2M_IN_FMT_YUV444_1P8P,
+ .memplanes = 1,
+ .colplanes = 1,
+ }, {
+ .name = "YUV16 (X-Y-U-V)",
+ .fourcc = V4L2_PIX_FMT_YUYV,
+ .depth = { 16 },
+ .color = MXC_ISI_M2M_IN_FMT_YUV422_1P8P,
+ .memplanes = 1,
+ .colplanes = 1,
+ }, {
+ .name = "RGBA (R-G-B-A)",
+ .fourcc = V4L2_PIX_FMT_RGBA32,
+ .depth = { 32 },
+ .color = MXC_ISI_M2M_IN_FMT_XBGR8,
+ .memplanes = 1,
+ .colplanes = 1,
+ }
+};
+
+static struct v4l2_m2m_buffer *to_v4l2_m2m_buffer(struct vb2_v4l2_buffer *vbuf)
+{
+ struct v4l2_m2m_buffer *b;
+
+ b = container_of(vbuf, struct v4l2_m2m_buffer, vb);
+ return b;
+}
+
+void mxc_isi_m2m_frame_write_done(struct mxc_isi_dev *mxc_isi)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = mxc_isi->isi_m2m;
+ struct v4l2_fh *fh;
+ struct mxc_isi_ctx *curr_mxc_ctx;
+ struct vb2_v4l2_buffer *src_vbuf, *dst_vbuf;
+ struct mxc_isi_buffer *src_buf, *dst_buf;
+ struct v4l2_m2m_buffer *b;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+
+ curr_mxc_ctx = v4l2_m2m_get_curr_priv(isi_m2m->m2m_dev);
+ if (!curr_mxc_ctx) {
+ dev_err(&isi_m2m->pdev->dev,
+ "Instance released before the end of transaction\n");
+ return;
+ }
+ fh = &curr_mxc_ctx->fh;
+
+ if (isi_m2m->aborting) {
+ mxc_isi_channel_disable(mxc_isi);
+ dev_warn(&isi_m2m->pdev->dev, "Aborting current job\n");
+ goto job_finish;
+ }
+
+ src_vbuf = v4l2_m2m_next_src_buf(fh->m2m_ctx);
+ if (!src_vbuf) {
+ dev_err(&isi_m2m->pdev->dev, "No enought source buffers\n");
+ goto job_finish;
+ }
+ src_buf = to_isi_buffer(src_vbuf);
+ v4l2_m2m_src_buf_remove(fh->m2m_ctx);
+ v4l2_m2m_buf_done(src_vbuf, VB2_BUF_STATE_DONE);
+
+ if (!list_empty(&isi_m2m->out_active)) {
+ dst_buf = list_first_entry(&isi_m2m->out_active,
+ struct mxc_isi_buffer, list);
+ dst_vbuf = &dst_buf->v4l2_buf;
+ list_del_init(&dst_buf->list);
+ dst_buf->v4l2_buf.vb2_buf.timestamp = ktime_get_ns();
+ v4l2_m2m_buf_done(dst_vbuf, VB2_BUF_STATE_DONE);
+
+ }
+ isi_m2m->frame_count++;
+
+ dst_vbuf = v4l2_m2m_next_dst_buf(fh->m2m_ctx);
+ if (dst_vbuf) {
+ dst_vbuf->vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ dst_buf = to_isi_buffer(dst_vbuf);
+ dst_buf->v4l2_buf.sequence = isi_m2m->frame_count;
+ mxc_isi_channel_set_outbuf(mxc_isi, dst_buf);
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
+ b = to_v4l2_m2m_buffer(dst_vbuf);
+ list_add_tail(&b->list, &isi_m2m->out_active);
+ }
+
+job_finish:
+ v4l2_m2m_job_finish(isi_m2m->m2m_dev, fh->m2m_ctx);
+}
+EXPORT_SYMBOL_GPL(mxc_isi_m2m_frame_write_done);
+
+static void mxc_isi_m2m_device_run(void *priv)
+{
+ struct mxc_isi_ctx *mxc_ctx = priv;
+ struct mxc_isi_m2m_dev *isi_m2m = mxc_ctx->isi_m2m;
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_m2m->pdev);
+ struct v4l2_fh *fh = &mxc_ctx->fh;
+ struct vb2_v4l2_buffer *vbuf;
+ struct mxc_isi_buffer *src_buf;
+ unsigned long flags;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s enter\n", __func__);
+
+ spin_lock_irqsave(&isi_m2m->slock, flags);
+
+ /* SRC */
+ vbuf = v4l2_m2m_next_src_buf(fh->m2m_ctx);
+ if (!vbuf) {
+ dev_err(&isi_m2m->pdev->dev, "Null src buf\n");
+ goto unlock;
+ }
+
+ src_buf = to_isi_buffer(vbuf);
+ mxc_isi_channel_set_m2m_src_addr(mxc_isi, src_buf);
+ mxc_isi_channel_enable(mxc_isi, mxc_isi->m2m_enabled);
+
+unlock:
+ spin_unlock_irqrestore(&isi_m2m->slock, flags);
+}
+
+static int mxc_isi_m2m_job_ready(void *priv)
+{
+ struct mxc_isi_ctx *mxc_ctx = priv;
+ struct mxc_isi_m2m_dev *isi_m2m = mxc_ctx->isi_m2m;
+ struct v4l2_fh *fh = &mxc_ctx->fh;
+ unsigned int num_src_bufs_ready;
+ unsigned int num_dst_bufs_ready;
+ unsigned long flags;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+
+ spin_lock_irqsave(&isi_m2m->slock, flags);
+ num_src_bufs_ready = v4l2_m2m_num_src_bufs_ready(fh->m2m_ctx);
+ num_dst_bufs_ready = v4l2_m2m_num_dst_bufs_ready(fh->m2m_ctx);
+ spin_unlock_irqrestore(&isi_m2m->slock, flags);
+
+ if (num_src_bufs_ready >= 1 && num_dst_bufs_ready >= 1)
+ return 1;
+ return 0;
+}
+
+static void mxc_isi_m2m_job_abort(void *priv)
+{
+ struct mxc_isi_ctx *mxc_ctx = priv;
+ struct mxc_isi_m2m_dev *isi_m2m = mxc_ctx->isi_m2m;
+
+ isi_m2m->aborting = 1;
+ dev_dbg(&isi_m2m->pdev->dev, "Abort requested\n");
+}
+
+static struct v4l2_m2m_ops mxc_isi_m2m_ops = {
+ .device_run = mxc_isi_m2m_device_run,
+ .job_ready = mxc_isi_m2m_job_ready,
+ .job_abort = mxc_isi_m2m_job_abort,
+};
+
+static int m2m_vb2_queue_setup(struct vb2_queue *q,
+ unsigned int *num_buffers, unsigned int *num_planes,
+ unsigned int sizes[], struct device *alloc_devs[])
+{
+ struct mxc_isi_ctx *mxc_ctx = vb2_get_drv_priv(q);
+ struct mxc_isi_m2m_dev *isi_m2m = mxc_ctx->isi_m2m;
+ struct device *dev = &isi_m2m->pdev->dev;
+ struct mxc_isi_frame *frame;
+ struct mxc_isi_fmt *fmt;
+ unsigned long wh;
+ int i;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+
+ if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ if (*num_buffers < 3) {
+ dev_err(dev, "%s at least need 3 buffer\n", __func__);
+ return -EINVAL;
+ }
+ frame = &isi_m2m->dst_f;
+ isi_m2m->req_cap_buf_num = *num_buffers;
+ } else {
+ if (*num_buffers < 1) {
+ dev_err(dev, "%s at least need one buffer\n", __func__);
+ return -EINVAL;
+ }
+ frame = &isi_m2m->src_f;
+ isi_m2m->req_out_buf_num = *num_buffers;
+ }
+
+ fmt = frame->fmt;
+ if (fmt == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < fmt->memplanes; i++)
+ alloc_devs[i] = &isi_m2m->pdev->dev;
+
+ *num_planes = fmt->memplanes;
+ wh = frame->width * frame->height;
+
+ for (i = 0; i < fmt->memplanes; i++) {
+ unsigned int size = (wh * fmt->depth[i]) >> 3;
+
+ if (i == 1 && fmt->fourcc == V4L2_PIX_FMT_NV12)
+ size >>= 1;
+ sizes[i] = max_t(u32, size, frame->sizeimage[i]);
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s, buf_n=%d, planes[%d]->size=%d\n",
+ __func__, *num_buffers, i, sizes[i]);
+ }
+
+ return 0;
+}
+
+static int m2m_vb2_buffer_prepare(struct vb2_buffer *vb2)
+{
+ struct vb2_queue *vq = vb2->vb2_queue;
+ struct mxc_isi_ctx *mxc_ctx = vb2_get_drv_priv(vq);
+ struct mxc_isi_m2m_dev *isi_m2m = mxc_ctx->isi_m2m;
+ struct mxc_isi_frame *frame;
+ int i;
+
+ if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ frame = &isi_m2m->dst_f;
+ else
+ frame = &isi_m2m->src_f;
+
+ if (frame == NULL)
+ return -EINVAL;
+
+ for (i = 0; i < frame->fmt->memplanes; i++) {
+ unsigned long size = frame->sizeimage[i];
+
+ if (vb2_plane_size(vb2, i) < size) {
+ dev_err(&isi_m2m->pdev->dev,
+ "User buffer too small (%ld < %ld)\n",
+ vb2_plane_size(vb2, i), size);
+ return -EINVAL;
+ }
+ vb2_set_plane_payload(vb2, i, size);
+ }
+
+ return 0;
+}
+
+static void m2m_vb2_buffer_queue(struct vb2_buffer *vb2)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb2);
+ struct mxc_isi_ctx *mxc_ctx = vb2_get_drv_priv(vb2->vb2_queue);
+ struct v4l2_fh *fh = &mxc_ctx->fh;
+
+ v4l2_m2m_buf_queue(fh->m2m_ctx, vbuf);
+}
+
+static int m2m_vb2_start_streaming(struct vb2_queue *q, unsigned int count)
+{
+ struct mxc_isi_ctx *mxc_ctx = vb2_get_drv_priv(q);
+ struct mxc_isi_m2m_dev *isi_m2m = mxc_ctx->isi_m2m;
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_m2m->pdev);
+ struct v4l2_fh *fh = &mxc_ctx->fh;
+ struct vb2_v4l2_buffer *dst_vbuf;
+ struct v4l2_m2m_buffer *b;
+ struct mxc_isi_buffer *dst_buf;
+ unsigned long flags;
+
+ if (V4L2_TYPE_IS_OUTPUT(q->type))
+ return 0;
+
+ if (count < 2) {
+ dev_err(&isi_m2m->pdev->dev, "Need to at leas 2 buffers\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&isi_m2m->slock, flags);
+
+ /* BUF1 */
+ dst_vbuf = v4l2_m2m_next_dst_buf(fh->m2m_ctx);
+ if (!dst_vbuf) {
+ dev_err(&isi_m2m->pdev->dev, "%d: Null dst buf\n", __LINE__);
+ goto unlock;
+ }
+ dst_vbuf->vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ dst_buf = to_isi_buffer(dst_vbuf);
+ dst_buf->v4l2_buf.sequence = 0;
+ mxc_isi_channel_set_outbuf(mxc_isi, dst_buf);
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
+ b = to_v4l2_m2m_buffer(dst_vbuf);
+ list_add_tail(&b->list, &isi_m2m->out_active);
+
+ /* BUF2 */
+ dst_vbuf = v4l2_m2m_next_dst_buf(fh->m2m_ctx);
+ if (!dst_vbuf) {
+ dev_err(&isi_m2m->pdev->dev, "%d: Null dst buf\n", __LINE__);
+ goto unlock;
+ }
+ dst_vbuf->vb2_buf.state = VB2_BUF_STATE_ACTIVE;
+ dst_buf = to_isi_buffer(dst_vbuf);
+ dst_buf->v4l2_buf.sequence = 1;
+ mxc_isi_channel_set_outbuf(mxc_isi, dst_buf);
+ v4l2_m2m_dst_buf_remove(fh->m2m_ctx);
+ b = to_v4l2_m2m_buffer(dst_vbuf);
+ list_add_tail(&b->list, &isi_m2m->out_active);
+
+ isi_m2m->frame_count = 1;
+ isi_m2m->aborting = 0;
+unlock:
+ spin_unlock_irqrestore(&isi_m2m->slock, flags);
+
+ return 0;
+}
+
+static void m2m_vb2_stop_streaming(struct vb2_queue *q)
+{
+ struct mxc_isi_ctx *mxc_ctx = vb2_get_drv_priv(q);
+ struct mxc_isi_m2m_dev *isi_m2m = mxc_ctx->isi_m2m;
+ struct vb2_v4l2_buffer *vb2;
+ struct mxc_isi_buffer *buf;
+ unsigned long flags;
+
+ spin_lock_irqsave(&isi_m2m->slock, flags);
+
+ while ((vb2 = v4l2_m2m_src_buf_remove(mxc_ctx->fh.m2m_ctx)) != NULL)
+ v4l2_m2m_buf_done(vb2, VB2_BUF_STATE_ERROR);
+
+ while ((vb2 = v4l2_m2m_dst_buf_remove(mxc_ctx->fh.m2m_ctx)) != NULL)
+ v4l2_m2m_buf_done(vb2, VB2_BUF_STATE_ERROR);
+
+ while (!list_empty(&isi_m2m->out_active)) {
+ buf = list_entry(isi_m2m->out_active.next, struct mxc_isi_buffer, list);
+ list_del(&buf->list);
+ vb2_buffer_done(&buf->v4l2_buf.vb2_buf, VB2_BUF_STATE_ERROR);
+ }
+
+ INIT_LIST_HEAD(&isi_m2m->out_active);
+
+ spin_unlock_irqrestore(&isi_m2m->slock, flags);
+}
+
+static struct vb2_ops mxc_m2m_vb2_qops = {
+ .queue_setup = m2m_vb2_queue_setup,
+ .buf_prepare = m2m_vb2_buffer_prepare,
+ .buf_queue = m2m_vb2_buffer_queue,
+ .wait_prepare = vb2_ops_wait_prepare,
+ .wait_finish = vb2_ops_wait_finish,
+ .start_streaming = m2m_vb2_start_streaming,
+ .stop_streaming = m2m_vb2_stop_streaming,
+};
+
+static int mxc_m2m_queue_init(void *priv, struct vb2_queue *src_vq,
+ struct vb2_queue *dst_vq)
+{
+ struct mxc_isi_ctx *mxc_ctx = priv;
+ struct mxc_isi_m2m_dev *isi_m2m = mxc_ctx->isi_m2m;
+ int ret;
+
+ src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
+ src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ src_vq->drv_priv = mxc_ctx;
+ src_vq->buf_struct_size = sizeof(struct mxc_isi_buffer);
+ src_vq->ops = &mxc_m2m_vb2_qops;
+ src_vq->mem_ops = &vb2_dma_contig_memops;
+ src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ src_vq->lock = &isi_m2m->lock;
+ src_vq->dev = &isi_m2m->pdev->dev;
+
+ ret = vb2_queue_init(src_vq);
+ if (ret)
+ return ret;
+
+ dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
+ dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
+ dst_vq->drv_priv = mxc_ctx;
+ dst_vq->buf_struct_size = sizeof(struct mxc_isi_buffer);
+ dst_vq->ops = &mxc_m2m_vb2_qops;
+ dst_vq->mem_ops = &vb2_dma_contig_memops;
+ dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ dst_vq->lock = &isi_m2m->lock;
+ dst_vq->dev = &isi_m2m->pdev->dev;
+
+ ret = vb2_queue_init(dst_vq);
+ return ret;
+}
+
+static int mxc_isi_m2m_open(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_m2m->pdev);
+ struct device *dev = &isi_m2m->pdev->dev;
+ struct mxc_isi_ctx *mxc_ctx = NULL;
+ int ret = 0;
+
+ if (atomic_read(&mxc_isi->usage_count) > 0) {
+ dev_err(dev, "ISI channel[%d] is busy\n", isi_m2m->id);
+ return -EBUSY;
+ }
+
+ if (mutex_lock_interruptible(&isi_m2m->lock))
+ return -ERESTARTSYS;
+
+ mxc_ctx = kzalloc(sizeof(*mxc_ctx), GFP_KERNEL);
+ if (!mxc_ctx) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
+
+ mxc_ctx->isi_m2m = isi_m2m;
+
+ v4l2_fh_init(&mxc_ctx->fh, vdev);
+ file->private_data = &mxc_ctx->fh;
+
+ mxc_ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(isi_m2m->m2m_dev,
+ mxc_ctx,
+ mxc_m2m_queue_init);
+ if (IS_ERR(mxc_ctx->fh.m2m_ctx)) {
+ dev_err(dev, "v4l2_m2m_ctx_init fail\n");
+ ret = PTR_ERR(mxc_ctx->fh.m2m_ctx);
+ v4l2_fh_exit(&mxc_ctx->fh);
+ kfree(mxc_ctx);
+ goto unlock;
+ }
+ v4l2_fh_add(&mxc_ctx->fh);
+
+ pm_runtime_get_sync(dev);
+ if (atomic_inc_return(&mxc_isi->usage_count) == 1)
+ mxc_isi_channel_init(mxc_isi);
+
+ /* lock host data */
+ mutex_lock(&mxc_isi->lock);
+ mxc_isi->m2m_enabled = true;
+ mutex_unlock(&mxc_isi->lock);
+unlock:
+ mutex_unlock(&isi_m2m->lock);
+ return ret;
+}
+
+static int mxc_isi_m2m_release(struct file *file)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_m2m->pdev);
+ struct device *dev = &isi_m2m->pdev->dev;
+ struct mxc_isi_ctx *mxc_ctx = file_to_ctx(file);
+
+ v4l2_fh_del(&mxc_ctx->fh);
+ v4l2_fh_exit(&mxc_ctx->fh);
+
+ mutex_lock(&isi_m2m->lock);
+ v4l2_m2m_ctx_release(mxc_ctx->fh.m2m_ctx);
+ mutex_unlock(&isi_m2m->lock);
+
+ kfree(mxc_ctx);
+ if (atomic_dec_and_test(&mxc_isi->usage_count))
+ mxc_isi_channel_deinit(mxc_isi);
+
+ mutex_lock(&mxc_isi->lock);
+ mxc_isi->m2m_enabled = false;
+ mutex_unlock(&mxc_isi->lock);
+
+ pm_runtime_put(dev);
+ return 0;
+}
+
+static const struct v4l2_file_operations mxc_isi_m2m_fops = {
+ .owner = THIS_MODULE,
+ .open = mxc_isi_m2m_open,
+ .release = mxc_isi_m2m_release,
+ .poll = v4l2_m2m_fop_poll,
+ .unlocked_ioctl = video_ioctl2,
+ .mmap = v4l2_m2m_fop_mmap,
+};
+
+static int mxc_isi_m2m_querycap(struct file *file, void *priv,
+ struct v4l2_capability *cap)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+
+ strlcpy(cap->driver, MXC_ISI_M2M, sizeof(cap->driver));
+ strlcpy(cap->card, MXC_ISI_M2M, sizeof(cap->card));
+ snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s.%d",
+ dev_name(&isi_m2m->pdev->dev), isi_m2m->id);
+ cap->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
+ return 0;
+}
+
+static int mxc_isi_m2m_enum_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct mxc_isi_fmt *fmt;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+ if (f->index >= (int)ARRAY_SIZE(mxc_isi_input_formats))
+ return -EINVAL;
+
+ fmt = &mxc_isi_input_formats[f->index];
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int mxc_isi_m2m_enum_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_fmtdesc *f)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct mxc_isi_fmt *fmt;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+ if (f->index >= (int)ARRAY_SIZE(mxc_isi_out_formats))
+ return -EINVAL;
+
+ fmt = &mxc_isi_out_formats[f->index];
+ strncpy(f->description, fmt->name, sizeof(f->description) - 1);
+
+ f->pixelformat = fmt->fourcc;
+
+ return 0;
+}
+
+static int mxc_isi_m2m_try_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct device *dev = &isi_m2m->pdev->dev;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mxc_isi_fmt *fmt = NULL;
+ int i;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mxc_isi_input_formats); i++) {
+ fmt = &mxc_isi_input_formats[i];
+ if (fmt->fourcc == pix->pixelformat)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(mxc_isi_input_formats)) {
+ dev_err(dev, "%s, format is not support!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (pix->width <= 0 || pix->height <= 0) {
+ dev_err(dev, "%s, width %d, height %d is not valid\n"
+ , __func__, pix->width, pix->height);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mxc_isi_m2m_try_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct device *dev = &isi_m2m->pdev->dev;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mxc_isi_fmt *fmt = NULL;
+ int i;
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(mxc_isi_out_formats); i++) {
+ fmt = &mxc_isi_out_formats[i];
+ if (fmt->fourcc == pix->pixelformat)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(mxc_isi_out_formats)) {
+ dev_err(dev, "%s, format is not support!\n", __func__);
+ return -EINVAL;
+ }
+
+ if (pix->width <= 0 || pix->height <= 0) {
+ dev_err(dev, "%s, width %d, height %d is not valid\n"
+ , __func__, pix->width, pix->height);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int mxc_isi_m2m_s_fmt_vid_out(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_m2m->pdev);
+ struct v4l2_fh *fh = file->private_data;
+ struct mxc_isi_frame *frame = &isi_m2m->src_f;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mxc_isi_fmt *fmt;
+ struct vb2_queue *vq;
+ int bpl, i;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ vq = v4l2_m2m_get_vq(fh->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ dev_err(&isi_m2m->pdev->dev, "queue busy\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mxc_isi_input_formats); i++) {
+ fmt = &mxc_isi_input_formats[i];
+ if (pix && fmt->fourcc == pix->pixelformat)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(mxc_isi_input_formats)) {
+ dev_dbg(&isi_m2m->pdev->dev, "%s, format is not support!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* update out put frame size and formate */
+ if (pix->height <= 0 || pix->width <= 0)
+ return -EINVAL;
+
+ frame->fmt = fmt;
+ frame->height = pix->height;
+ frame->width = pix->width;
+
+ pix->num_planes = fmt->memplanes;
+ for (i = 0; i < pix->num_planes; i++) {
+ bpl = pix->plane_fmt[i].bytesperline;
+
+ if ((bpl == 0) || (bpl / (fmt->depth[i] >> 3)) < pix->width)
+ pix->plane_fmt[i].bytesperline =
+ (pix->width * fmt->depth[i]) >> 3;
+
+ if (pix->plane_fmt[i].sizeimage == 0)
+ pix->plane_fmt[i].sizeimage = (pix->width * pix->height *
+ fmt->depth[i] >> 3);
+ }
+
+ frame->bytesperline[0] = frame->width * frame->fmt->depth[0] / 8;
+ frame->sizeimage[0] = frame->height * frame->bytesperline[0];
+
+ set_frame_bounds(frame, pix->width, pix->height);
+ mxc_isi_m2m_config_src(mxc_isi, frame);
+
+ return 0;
+}
+
+static int mxc_isi_m2m_s_fmt_vid_cap(struct file *file, void *priv,
+ struct v4l2_format *f)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_m2m->pdev);
+ struct v4l2_fh *fh = file->private_data;
+ struct mxc_isi_frame *frame = &isi_m2m->dst_f;
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mxc_isi_fmt *fmt;
+ struct vb2_queue *vq;
+ int bpl, i;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ vq = v4l2_m2m_get_vq(fh->m2m_ctx, f->type);
+ if (!vq)
+ return -EINVAL;
+
+ if (vb2_is_busy(vq)) {
+ dev_err(&isi_m2m->pdev->dev, "queue busy\n");
+ return -EBUSY;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(mxc_isi_out_formats); i++) {
+ fmt = &mxc_isi_out_formats[i];
+ if (pix && fmt->fourcc == pix->pixelformat)
+ break;
+ }
+
+ if (i >= ARRAY_SIZE(mxc_isi_out_formats)) {
+ dev_err(&isi_m2m->pdev->dev, "%s, format is not support!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* update out put frame size and formate */
+ if (pix->height <= 0 || pix->width <= 0) {
+ dev_err(&isi_m2m->pdev->dev,
+ "Invalid width or height(w=%d, h=%d)\n",
+ pix->width, pix->height);
+ return -EINVAL;
+ }
+
+ if ((pix->pixelformat == V4L2_PIX_FMT_NV12) && ((pix->width / 4) % 2)) {
+ dev_err(&isi_m2m->pdev->dev,
+ "Invalid width or height(w=%d, h=%d) for NV12\n",
+ pix->width, pix->height);
+ return -EINVAL;
+ } else if ((pix->pixelformat != V4L2_PIX_FMT_XBGR32) && (pix->width % 2)) {
+ dev_err(&isi_m2m->pdev->dev,
+ "Invalid width or height(w=%d, h=%d) for %.4s\n",
+ pix->width, pix->height, (char *)&pix->pixelformat);
+ return -EINVAL;
+ }
+
+ frame->fmt = fmt;
+ frame->height = pix->height;
+ frame->width = pix->width;
+
+ pix->num_planes = fmt->memplanes;
+ for (i = 0; i < pix->num_planes; i++) {
+ bpl = pix->plane_fmt[i].bytesperline;
+
+ if ((bpl == 0) || (bpl / (fmt->depth[i] >> 3)) < pix->width)
+ pix->plane_fmt[i].bytesperline =
+ (pix->width * fmt->depth[i]) >> 3;
+
+ if (pix->plane_fmt[i].sizeimage == 0) {
+
+ if ((i == 1) && (pix->pixelformat == V4L2_PIX_FMT_NV12))
+ pix->plane_fmt[i].sizeimage =
+ (pix->width * (pix->height >> 1) * fmt->depth[i] >> 3);
+ else
+ pix->plane_fmt[i].sizeimage = (pix->width * pix->height *
+ fmt->depth[i] >> 3);
+ }
+ }
+
+ if (pix->num_planes > 1) {
+ for (i = 0; i < pix->num_planes; i++) {
+ frame->bytesperline[i] = pix->plane_fmt[i].bytesperline;
+ frame->sizeimage[i] = pix->plane_fmt[i].sizeimage;
+ }
+ } else {
+ frame->bytesperline[0] = frame->width * frame->fmt->depth[0] / 8;
+ frame->sizeimage[0] = frame->height * frame->bytesperline[0];
+ }
+
+ /*memcpy(&isi_m2m->pix, pix, sizeof(*pix));*/
+ memcpy(&isi_m2m->pix, pix, sizeof(*pix));
+
+ set_frame_bounds(frame, pix->width, pix->height);
+ mxc_isi_m2m_config_dst(mxc_isi, frame);
+
+ return 0;
+}
+
+static int mxc_isi_m2m_g_fmt_vid_cap(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mxc_isi_frame *frame = &isi_m2m->dst_f;
+ int i;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ return -EINVAL;
+
+ pix->width = frame->o_width;
+ pix->height = frame->o_height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = frame->fmt->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->num_planes = frame->fmt->memplanes;
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ pix->plane_fmt[i].bytesperline = frame->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = frame->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int mxc_isi_m2m_g_fmt_vid_out(struct file *file, void *fh,
+ struct v4l2_format *f)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp;
+ struct mxc_isi_frame *frame = &isi_m2m->src_f;
+ int i;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+
+ if (f->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
+ return -EINVAL;
+
+ pix->width = frame->o_width;
+ pix->height = frame->o_height;
+ pix->field = V4L2_FIELD_NONE;
+ pix->pixelformat = frame->fmt->fourcc;
+ pix->colorspace = V4L2_COLORSPACE_JPEG;
+ pix->num_planes = frame->fmt->memplanes;
+
+ for (i = 0; i < pix->num_planes; ++i) {
+ pix->plane_fmt[i].bytesperline = frame->bytesperline[i];
+ pix->plane_fmt[i].sizeimage = frame->sizeimage[i];
+ }
+
+ return 0;
+}
+
+static int mxc_isi_m2m_streamon(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_m2m->pdev);
+ struct mxc_isi_frame *src_f, *dst_f;
+ int ret;
+
+ src_f = &isi_m2m->src_f;
+ dst_f = &isi_m2m->dst_f;
+
+ if ((dst_f->width > src_f->width) ||
+ (dst_f->height > src_f->height)) {
+ dev_err(&isi_m2m->pdev->dev, "%s Not support upscale\n", __func__);
+ return -EINVAL;
+ }
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
+ isi_m2m->frame_count = 0;
+ mxc_isi_channel_config(mxc_isi, src_f, dst_f);
+ }
+
+ ret = v4l2_m2m_ioctl_streamon(file, priv, type);
+
+ return ret;
+}
+
+static int mxc_isi_m2m_streamoff(struct file *file, void *priv,
+ enum v4l2_buf_type type)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = video_drvdata(file);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_m2m->pdev);
+ int ret;
+
+ if (type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
+ mxc_isi_channel_disable(mxc_isi);
+
+ ret = v4l2_m2m_ioctl_streamoff(file, priv, type);
+
+ return ret;
+}
+
+static const struct v4l2_ioctl_ops mxc_isi_m2m_ioctl_ops = {
+ .vidioc_querycap = mxc_isi_m2m_querycap,
+
+ .vidioc_enum_fmt_vid_cap = mxc_isi_m2m_enum_fmt_vid_cap,
+ .vidioc_enum_fmt_vid_out = mxc_isi_m2m_enum_fmt_vid_out,
+
+ .vidioc_try_fmt_vid_cap_mplane = mxc_isi_m2m_try_fmt_vid_cap,
+ .vidioc_try_fmt_vid_out_mplane = mxc_isi_m2m_try_fmt_vid_out,
+
+ .vidioc_s_fmt_vid_cap_mplane = mxc_isi_m2m_s_fmt_vid_cap,
+ .vidioc_s_fmt_vid_out_mplane = mxc_isi_m2m_s_fmt_vid_out,
+
+ .vidioc_g_fmt_vid_cap_mplane = mxc_isi_m2m_g_fmt_vid_cap,
+ .vidioc_g_fmt_vid_out_mplane = mxc_isi_m2m_g_fmt_vid_out,
+
+ .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
+ .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
+ .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
+ .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
+ .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
+ .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
+ .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
+
+ .vidioc_streamon = mxc_isi_m2m_streamon,
+ .vidioc_streamoff = mxc_isi_m2m_streamoff,
+};
+
+/*
+ * V4L2 controls handling
+ */
+#define ctrl_to_mxc_isi_m2m(__ctrl) \
+ container_of((__ctrl)->handler, struct mxc_isi_m2m_dev, ctrls.handler)
+
+static int mxc_isi_m2m_s_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = ctrl_to_mxc_isi_m2m(ctrl);
+ struct mxc_isi_dev *mxc_isi = mxc_isi_get_hostdata(isi_m2m->pdev);
+ unsigned long flags;
+ int ret = 0;
+
+ dev_dbg(&isi_m2m->pdev->dev, "%s\n", __func__);
+
+ if (ctrl->flags & V4L2_CTRL_FLAG_INACTIVE)
+ return 0;
+
+ spin_lock_irqsave(&mxc_isi->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_HFLIP:
+ if (ctrl->val < 0) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ mxc_isi->hflip = (ctrl->val > 0) ? 1 : 0;
+ break;
+
+ case V4L2_CID_VFLIP:
+ if (ctrl->val < 0) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ mxc_isi->vflip = (ctrl->val > 0) ? 1 : 0;
+ break;
+
+ case V4L2_CID_ALPHA_COMPONENT:
+ if (ctrl->val < 0 || ctrl->val > 255) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+ mxc_isi->alpha = ctrl->val;
+ mxc_isi->alphaen = 1;
+ break;
+
+ default:
+ dev_err(&isi_m2m->pdev->dev, "%s: Not support %d CID\n", __func__, ctrl->id);
+ ret = -EINVAL;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&mxc_isi->slock, flags);
+ return ret;
+}
+
+static int mxc_isi_m2m_g_ctrl(struct v4l2_ctrl *ctrl)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = ctrl_to_mxc_isi_m2m(ctrl);
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&isi_m2m->slock, flags);
+
+ switch (ctrl->id) {
+ case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
+ ctrl->val = isi_m2m->req_cap_buf_num;
+ break;
+ case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
+ ctrl->val = isi_m2m->req_out_buf_num;
+ break;
+ default:
+ dev_err(&isi_m2m->pdev->dev, "%s: Not support %d CID\n",
+ __func__, ctrl->id);
+ ret = -EINVAL;
+ }
+
+ spin_unlock_irqrestore(&isi_m2m->slock, flags);
+ return ret;
+
+}
+
+static const struct v4l2_ctrl_ops mxc_isi_m2m_ctrl_ops = {
+ .s_ctrl = mxc_isi_m2m_s_ctrl,
+ .g_volatile_ctrl = mxc_isi_m2m_g_ctrl,
+};
+
+static int mxc_isi_m2m_ctrls_create(struct mxc_isi_m2m_dev *isi_m2m)
+{
+ struct mxc_isi_ctrls *ctrls = &isi_m2m->ctrls;
+ struct v4l2_ctrl_handler *handler = &ctrls->handler;
+
+ if (isi_m2m->ctrls.ready)
+ return 0;
+
+ v4l2_ctrl_handler_init(handler, 4);
+
+ ctrls->hflip = v4l2_ctrl_new_std(handler, &mxc_isi_m2m_ctrl_ops,
+ V4L2_CID_HFLIP, 0, 1, 1, 0);
+ ctrls->vflip = v4l2_ctrl_new_std(handler, &mxc_isi_m2m_ctrl_ops,
+ V4L2_CID_VFLIP, 0, 1, 1, 0);
+ ctrls->alpha = v4l2_ctrl_new_std(handler, &mxc_isi_m2m_ctrl_ops,
+ V4L2_CID_ALPHA_COMPONENT, 0, 0xff, 1, 0);
+ ctrls->num_cap_buf = v4l2_ctrl_new_std(handler, &mxc_isi_m2m_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 3, 16, 1, 3);
+ ctrls->num_out_buf = v4l2_ctrl_new_std(handler, &mxc_isi_m2m_ctrl_ops,
+ V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 1, 16, 1, 1);
+
+ if (!handler->error)
+ ctrls->ready = true;
+
+ return handler->error;
+
+}
+
+void mxc_isi_m2m_ctrls_delete(struct mxc_isi_m2m_dev *isi_m2m)
+{
+ struct mxc_isi_ctrls *ctrls = &isi_m2m->ctrls;
+
+ if (ctrls->ready) {
+ v4l2_ctrl_handler_free(&ctrls->handler);
+ ctrls->ready = false;
+ ctrls->alpha = NULL;
+ }
+}
+
+static int isi_m2m_probe(struct platform_device *pdev)
+{
+ struct mxc_isi_dev *mxc_isi;
+ struct mxc_isi_m2m_dev *isi_m2m;
+ struct v4l2_device *v4l2_dev;
+ struct video_device *vdev;
+ int ret = -ENOMEM;
+
+ isi_m2m = devm_kzalloc(&pdev->dev, sizeof(*isi_m2m), GFP_KERNEL);
+ if (!isi_m2m)
+ return -ENOMEM;
+ isi_m2m->pdev = pdev;
+
+ pdev->dev.parent = mxc_isi_dev_get_parent(pdev);
+ if (!pdev->dev.parent) {
+ dev_info(&pdev->dev, "deferring %s device registration\n",
+ dev_name(&pdev->dev));
+ return -EPROBE_DEFER;
+ }
+
+ mxc_isi = mxc_isi_get_hostdata(pdev);
+ if (!mxc_isi) {
+ dev_info(&pdev->dev, "deferring %s device registration\n",
+ dev_name(&pdev->dev));
+ return -EPROBE_DEFER;
+ }
+ mxc_isi->isi_m2m = isi_m2m;
+ isi_m2m->id = mxc_isi->id;
+
+ spin_lock_init(&isi_m2m->slock);
+ mutex_init(&isi_m2m->lock);
+
+ /* m2m */
+ isi_m2m->m2m_dev = v4l2_m2m_init(&mxc_isi_m2m_ops);
+ if (IS_ERR(isi_m2m->m2m_dev)) {
+ dev_err(&pdev->dev, "%s fail to get m2m device\n", __func__);
+ return PTR_ERR(isi_m2m->m2m_dev);
+ }
+
+ /* V4L2 device */
+ v4l2_dev = &isi_m2m->v4l2_dev;
+ strlcpy(v4l2_dev->name, "mx8-isi-m2m", sizeof(v4l2_dev->name));
+
+ ret = v4l2_device_register(&pdev->dev, v4l2_dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register v4l2_device\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&isi_m2m->out_active);
+
+ /* Video device */
+ vdev = &isi_m2m->vdev;
+ memset(vdev, 0, sizeof(*vdev));
+ snprintf(vdev->name, sizeof(vdev->name), "mxc_isi.%d.m2m", isi_m2m->id);
+
+ vdev->fops = &mxc_isi_m2m_fops;
+ vdev->ioctl_ops = &mxc_isi_m2m_ioctl_ops;
+ vdev->v4l2_dev = v4l2_dev;
+ vdev->minor = -1;
+ vdev->release = video_device_release_empty;
+ vdev->vfl_dir = VFL_DIR_M2M;
+ vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_M2M_MPLANE;
+
+ ret = mxc_isi_m2m_ctrls_create(isi_m2m);
+ if (ret)
+ goto free_m2m;
+
+ ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "%s fail to register video device\n", __func__);
+ goto ctrl_free;
+ }
+
+ vdev->ctrl_handler = &isi_m2m->ctrls.handler;
+ video_set_drvdata(vdev, isi_m2m);
+ platform_set_drvdata(pdev, isi_m2m);
+ pm_runtime_enable(&pdev->dev);
+
+ dev_info(&pdev->dev, "Register m2m success for ISI.%d\n", isi_m2m->id);
+
+ return 0;
+
+ctrl_free:
+ mxc_isi_m2m_ctrls_delete(isi_m2m);
+free_m2m:
+ v4l2_m2m_release(isi_m2m->m2m_dev);
+ return ret;
+
+}
+
+static int isi_m2m_remove(struct platform_device *pdev)
+{
+ struct mxc_isi_m2m_dev *isi_m2m = platform_get_drvdata(pdev);
+ struct video_device *vdev = &isi_m2m->vdev;
+
+ if (video_is_registered(vdev)) {
+ video_unregister_device(vdev);
+ mxc_isi_m2m_ctrls_delete(isi_m2m);
+ media_entity_cleanup(&vdev->entity);
+ }
+ v4l2_m2m_release(isi_m2m->m2m_dev);
+ v4l2_device_unregister(&isi_m2m->v4l2_dev);
+ pm_runtime_disable(&isi_m2m->pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id isi_m2m_of_match[] = {
+ {.compatible = "imx-isi-m2m",},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, isi_m2m_of_match);
+
+static struct platform_driver isi_m2m_driver = {
+ .probe = isi_m2m_probe,
+ .remove = isi_m2m_remove,
+ .driver = {
+ .of_match_table = isi_m2m_of_match,
+ .name = "isi-m2m",
+ },
+};
+
+static int __init mxc_isi_m2m_init(void)
+{
+ return platform_driver_register(&isi_m2m_driver);
+}
+late_initcall(mxc_isi_m2m_init);
+
+static void __exit mxc_isi_m2m_exit(void)
+{
+ platform_driver_unregister(&isi_m2m_driver);
+}
+module_exit(mxc_isi_m2m_exit);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("IMX8 Image Sensor Interface memory to memory driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ISI M2M");
+MODULE_VERSION("1.0");
diff --git a/drivers/staging/media/imx/imx8-media-dev.c b/drivers/staging/media/imx/imx8-media-dev.c
new file mode 100644
index 000000000000..875c9e92d531
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-media-dev.c
@@ -0,0 +1,1079 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Media Controller Driver for NXP IMX8QXP/QM SOC
+ *
+ * Copyright (c) 2019 NXP Semiconductor
+ *
+ */
+
+#include <linux/bug.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <media/v4l2-device.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-async.h>
+#include <media/v4l2-ctrls.h>
+#include <media/media-device.h>
+
+#include "imx8-common.h"
+
+#define MXC_MD_DRIVER_NAME "mxc-md"
+#define ISI_OF_NODE_NAME "isi"
+#define MIPI_CSI2_OF_NODE_NAME "csi"
+
+#define MXC_MAX_SENSORS 3
+#define MXC_MIPI_CSI2_MAX_DEVS 2
+
+#define MXC_NAME_LENS 32
+
+/*
+ * The subdevices' group IDs.
+ */
+#define GRP_ID_MXC_SENSOR BIT(8)
+#define GRP_ID_MXC_ISI BIT(9)
+#define GRP_ID_MXC_MIPI_CSI2 BIT(11)
+#define GRP_ID_MXC_HDMI_RX BIT(12)
+#define GRP_ID_MXC_MJPEG_DEC BIT(13)
+#define GRP_ID_MXC_MJPEG_ENC BIT(14)
+#define GRP_ID_MXC_PARALLEL_CSI BIT(15)
+
+enum mxc_subdev_index {
+ IDX_SENSOR,
+ IDX_ISI,
+ IDX_MIPI_CSI2,
+ IDX_HDMI_RX,
+ IDX_MJPEG_ENC,
+ IDX_MJPEG_DEC,
+ IDX_PARALLEL_CSI,
+ IDX_MAX,
+};
+
+struct mxc_isi_info {
+ struct v4l2_subdev *sd;
+ struct media_entity *entity;
+ struct device_node *node;
+ u32 interface[MAX_PORTS];
+
+ char vdev_name[MXC_NAME_LENS];
+ char sd_name[MXC_NAME_LENS];
+ int id;
+};
+
+struct mxc_mipi_csi2_info {
+ struct v4l2_subdev *sd;
+ struct media_entity *entity;
+ struct device_node *node;
+
+ char sd_name[MXC_NAME_LENS];
+ int id;
+ bool vchannel;
+};
+
+struct mxc_parallel_csi_info {
+ struct v4l2_subdev *sd;
+ struct media_entity *entity;
+ struct device_node *node;
+
+ char sd_name[MXC_NAME_LENS];
+ int id;
+};
+
+struct mxc_sensor_info {
+ int id;
+ struct v4l2_subdev *sd;
+ struct v4l2_async_subdev asd;
+ bool mipi_mode;
+};
+
+struct mxc_md {
+ struct mxc_isi_info mxc_isi[MXC_ISI_MAX_DEVS];
+ struct mxc_mipi_csi2_info mipi_csi2[MXC_MIPI_CSI2_MAX_DEVS];
+ struct mxc_parallel_csi_info pcsidev;
+ struct mxc_sensor_info sensor[MXC_MAX_SENSORS];
+
+ int link_status;
+ int num_sensors;
+ int valid_num_sensors;
+ unsigned int nr_isi;
+ bool parallel_csi;
+
+ struct media_device media_dev;
+ struct v4l2_device v4l2_dev;
+ struct platform_device *pdev;
+
+ struct v4l2_async_notifier subdev_notifier;
+ struct v4l2_async_subdev *async_subdevs[MXC_MAX_SENSORS];
+};
+
+static inline struct mxc_md *notifier_to_mxc_md(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct mxc_md, subdev_notifier);
+};
+
+static void mxc_md_unregister_entities(struct mxc_md *mxc_md)
+{
+ struct mxc_parallel_csi_info *pcsidev = &mxc_md->pcsidev;
+ int i;
+
+ for (i = 0; i < MXC_ISI_MAX_DEVS; i++) {
+ struct mxc_isi_info *isi = &mxc_md->mxc_isi[i];
+
+ if (!isi->sd)
+ continue;
+ v4l2_device_unregister_subdev(isi->sd);
+ memset(isi, 0, sizeof(*isi));
+ }
+
+ for (i = 0; i < MXC_MIPI_CSI2_MAX_DEVS; i++) {
+ struct mxc_mipi_csi2_info *mipi_csi2 = &mxc_md->mipi_csi2[i];
+ if (!mipi_csi2->sd)
+ continue;
+ v4l2_device_unregister_subdev(mipi_csi2->sd);
+ memset(mipi_csi2, 0, sizeof(*mipi_csi2));
+ }
+
+ if (pcsidev->sd)
+ v4l2_device_unregister_subdev(pcsidev->sd);
+
+ v4l2_info(&mxc_md->v4l2_dev, "Unregistered all entities\n");
+}
+
+static struct media_entity *find_entity_by_name(struct mxc_md *mxc_md,
+ const char *name)
+{
+ struct media_entity *ent = NULL;
+
+ if (!mxc_md || !name)
+ return NULL;
+
+ media_device_for_each_entity(ent, &mxc_md->media_dev) {
+ if (!strcmp(ent->name, name)) {
+ dev_dbg(&mxc_md->pdev->dev,
+ "%s entity is found\n", ent->name);
+ return ent;
+ }
+ }
+
+ return NULL;
+}
+
+static int mxc_md_do_clean(struct mxc_md *mxc_md, struct media_pad *pad)
+{
+ struct device *dev = &mxc_md->pdev->dev;
+ struct media_pad *remote_pad;
+ struct v4l2_subdev *subdev;
+
+ if (!pad->entity->num_links)
+ return 0;
+
+ remote_pad = media_entity_remote_pad(pad);
+ if (remote_pad == NULL) {
+ dev_err(dev, "%s get remote pad fail\n", __func__);
+ return -ENODEV;
+ }
+
+ subdev = media_entity_to_v4l2_subdev(remote_pad->entity);
+ if (subdev == NULL) {
+ dev_err(dev, "%s media entity to v4l2 subdev fail\n", __func__);
+ return -ENODEV;
+ }
+
+ v4l2_device_unregister_subdev(subdev);
+ media_entity_cleanup(&subdev->entity);
+
+ pr_info("clean ISI channel: %s\n", subdev->name);
+
+ return 0;
+}
+
+static int mxc_md_clean_channel(struct mxc_md *mxc_md, int index)
+{
+ struct mxc_sensor_info *sensor = &mxc_md->sensor[index];
+ struct mxc_mipi_csi2_info *mipi_csi2;
+ struct mxc_parallel_csi_info *pcsidev;
+ struct media_pad *local_pad;
+ struct media_entity *local_en;
+ u32 i, mipi_vc = 0;
+ int ret;
+
+ if (mxc_md->mipi_csi2[index].sd) {
+ mipi_csi2 = &mxc_md->mipi_csi2[index];
+
+ if (mipi_csi2->vchannel == true)
+ mipi_vc = 4;
+ else
+ mipi_vc = 1;
+
+ local_en = &mipi_csi2->sd->entity;
+ if (local_en == NULL)
+ return -ENODEV;
+
+ for (i = 0; i < mipi_vc; i++) {
+ local_pad = &local_en->pads[MXC_MIPI_CSI2_VC0_PAD_SOURCE + i];
+ ret = mxc_md_do_clean(mxc_md, local_pad);
+ if (ret < 0)
+ return -ENODEV;
+ }
+ } else if (mxc_md->parallel_csi && !sensor->mipi_mode) {
+ pcsidev = &mxc_md->pcsidev;
+ if (pcsidev->sd == NULL)
+ return -ENODEV;
+
+ local_en = &pcsidev->sd->entity;
+ if (local_en == NULL)
+ return -ENODEV;
+
+ local_pad = &local_en->pads[MXC_PARALLEL_CSI_PAD_SOURCE];
+ ret = mxc_md_do_clean(mxc_md, local_pad);
+ if (ret < 0)
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int mxc_md_clean_unlink_channels(struct mxc_md *mxc_md)
+{
+ struct mxc_sensor_info *sensor;
+ int num_subdevs = mxc_md->num_sensors;
+ int i, ret;
+
+ for (i = 0; i < num_subdevs; i++) {
+ sensor = &mxc_md->sensor[i];
+ if (sensor->sd != NULL)
+ continue;
+
+ ret = mxc_md_clean_channel(mxc_md, i);
+ if (ret < 0) {
+ pr_err("%s: clean channel fail(%d)\n", __func__, i);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void mxc_md_unregister_all(struct mxc_md *mxc_md)
+{
+ struct mxc_isi_info *mxc_isi;
+ int i;
+
+ for (i = 0; i < MXC_ISI_MAX_DEVS; i++) {
+ mxc_isi = &mxc_md->mxc_isi[i];
+ if (!mxc_isi->sd)
+ continue;
+
+ v4l2_device_unregister_subdev(mxc_isi->sd);
+ media_entity_cleanup(&mxc_isi->sd->entity);
+
+ pr_info("unregister ISI channel: %s\n", mxc_isi->sd->name);
+ }
+}
+
+static int mxc_md_create_links(struct mxc_md *mxc_md)
+{
+ struct media_entity *source, *sink;
+ struct mxc_isi_info *mxc_isi;
+ struct mxc_sensor_info *sensor;
+ struct mxc_mipi_csi2_info *mipi_csi2;
+ struct mxc_parallel_csi_info *pcsidev;
+ int num_sensors = mxc_md->num_sensors;
+ int i, j, ret = 0;
+ u16 source_pad, sink_pad;
+ u32 flags;
+ u32 mipi_vc = 0;
+
+ /* Create links between each ISI's subdev and video node */
+ flags = MEDIA_LNK_FL_ENABLED;
+ for (i = 0; i < MXC_ISI_MAX_DEVS; i++) {
+ mxc_isi = &mxc_md->mxc_isi[i];
+ if (!mxc_isi->sd)
+ continue;
+
+ /* Connect ISI source to video device */
+ source = find_entity_by_name(mxc_md, mxc_isi->sd_name);
+ sink = find_entity_by_name(mxc_md, mxc_isi->vdev_name);
+ sink_pad = 0;
+
+ switch (mxc_isi->interface[OUT_PORT]) {
+ case ISI_OUTPUT_INTERFACE_DC0:
+ source_pad = MXC_ISI_SD_PAD_SOURCE_DC0;
+ break;
+ case ISI_OUTPUT_INTERFACE_DC1:
+ source_pad = MXC_ISI_SD_PAD_SOURCE_DC1;
+ break;
+ case ISI_OUTPUT_INTERFACE_MEM:
+ source_pad = MXC_ISI_SD_PAD_SOURCE_MEM;
+ break;
+ default:
+ v4l2_err(&mxc_md->v4l2_dev, "Wrong output interface: %x\n",
+ mxc_isi->interface[OUT_PORT]);
+ return -EINVAL;
+ }
+
+ ret = media_create_pad_link(source, source_pad,
+ sink, sink_pad, flags);
+ if (ret) {
+ v4l2_err(&mxc_md->v4l2_dev,
+ "Failed created link [%s] %c> [%s]\n",
+ source->name, flags ? '=' : '-', sink->name);
+ break;
+ }
+
+ /* Notify capture subdev entity ,ISI cap link setup */
+ ret = media_entity_call(source, link_setup, &source->pads[source_pad],
+ &sink->pads[sink_pad], flags);
+ if (ret) {
+ v4l2_err(&mxc_md->v4l2_dev,
+ "failed call link_setup [%s] %c> [%s]\n",
+ source->name, flags ? '=' : '-', sink->name);
+ break;
+ }
+
+ v4l2_info(&mxc_md->v4l2_dev, "created link [%s] %c> [%s]\n",
+ source->name, flags ? '=' : '-', sink->name);
+
+ /* Connect MIPI/HDMI/Mem source to ISI sink */
+ sink = find_entity_by_name(mxc_md, mxc_isi->sd_name);
+
+ switch (mxc_isi->interface[IN_PORT]) {
+ case ISI_INPUT_INTERFACE_MIPI0_CSI2:
+ mipi_csi2 = &mxc_md->mipi_csi2[0];
+ if (!mipi_csi2->sd)
+ continue;
+ source = find_entity_by_name(mxc_md, mipi_csi2->sd_name);
+
+ switch (mxc_isi->interface[SUB_IN_PORT]) {
+ case ISI_INPUT_SUB_INTERFACE_VC1:
+ source_pad = MXC_MIPI_CSI2_VC1_PAD_SOURCE;
+ sink_pad = MXC_ISI_SD_PAD_SINK_MIPI0_VC1;
+ break;
+ case ISI_INPUT_SUB_INTERFACE_VC2:
+ source_pad = MXC_MIPI_CSI2_VC2_PAD_SOURCE;
+ sink_pad = MXC_ISI_SD_PAD_SINK_MIPI0_VC2;
+ break;
+ case ISI_INPUT_SUB_INTERFACE_VC3:
+ source_pad = MXC_MIPI_CSI2_VC3_PAD_SOURCE;
+ sink_pad = MXC_ISI_SD_PAD_SINK_MIPI0_VC3;
+ break;
+ default:
+ source_pad = MXC_MIPI_CSI2_VC0_PAD_SOURCE;
+ sink_pad = MXC_ISI_SD_PAD_SINK_MIPI0_VC0;
+ break;
+ }
+ break;
+
+ case ISI_INPUT_INTERFACE_MIPI1_CSI2:
+ mipi_csi2 = &mxc_md->mipi_csi2[1];
+ if (!mipi_csi2->sd)
+ continue;
+ source = find_entity_by_name(mxc_md, mipi_csi2->sd_name);
+
+ switch (mxc_isi->interface[SUB_IN_PORT]) {
+ case ISI_INPUT_SUB_INTERFACE_VC1:
+ source_pad = MXC_MIPI_CSI2_VC1_PAD_SOURCE;
+ sink_pad = MXC_ISI_SD_PAD_SINK_MIPI1_VC1;
+ break;
+ case ISI_INPUT_SUB_INTERFACE_VC2:
+ source_pad = MXC_MIPI_CSI2_VC2_PAD_SOURCE;
+ sink_pad = MXC_ISI_SD_PAD_SINK_MIPI1_VC2;
+ break;
+ case ISI_INPUT_SUB_INTERFACE_VC3:
+ source_pad = MXC_MIPI_CSI2_VC3_PAD_SOURCE;
+ sink_pad = MXC_ISI_SD_PAD_SINK_MIPI1_VC3;
+ break;
+ default:
+ source_pad = MXC_MIPI_CSI2_VC0_PAD_SOURCE;
+ sink_pad = MXC_ISI_SD_PAD_SINK_MIPI1_VC0;
+ break;
+ }
+ break;
+
+ case ISI_INPUT_INTERFACE_PARALLEL_CSI:
+ pcsidev = &mxc_md->pcsidev;
+ if (!pcsidev->sd)
+ continue;
+ source = find_entity_by_name(mxc_md, pcsidev->sd_name);
+ source_pad = MXC_PARALLEL_CSI_PAD_SOURCE;
+ sink_pad = MXC_ISI_SD_PAD_SINK_PARALLEL_CSI;
+ break;
+
+ case ISI_INPUT_INTERFACE_HDMI:
+ case ISI_INPUT_INTERFACE_DC0:
+ case ISI_INPUT_INTERFACE_DC1:
+ case ISI_INPUT_INTERFACE_MEM:
+ default:
+ v4l2_err(&mxc_md->v4l2_dev,
+ "Not support input interface: %x\n",
+ mxc_isi->interface[IN_PORT]);
+ return -EINVAL;
+ }
+
+ /* Create link MIPI/HDMI to ISI */
+ ret = media_create_pad_link(source, source_pad, sink, sink_pad, flags);
+ if (ret) {
+ v4l2_err(&mxc_md->v4l2_dev,
+ "created link [%s] %c> [%s] fail\n",
+ source->name, flags ? '=' : '-', sink->name);
+ break;
+ }
+
+ /* Notify ISI subdev entity */
+ ret = media_entity_call(sink, link_setup,
+ &sink->pads[sink_pad],
+ &source->pads[source_pad], 0);
+ if (ret)
+ break;
+
+ /* Notify MIPI/HDMI entity */
+ ret = media_entity_call(source, link_setup,
+ &source->pads[source_pad],
+ &sink->pads[sink_pad], 0);
+ if (ret)
+ break;
+
+ v4l2_info(&mxc_md->v4l2_dev, "created link [%s] %c> [%s]\n",
+ source->name, flags ? '=' : '-', sink->name);
+ }
+
+ /* Connect MIPI Sensor to MIPI CSI2 */
+ for (i = 0; i < num_sensors; i++) {
+ sensor = &mxc_md->sensor[i];
+ if (!sensor || !sensor->sd)
+ continue;
+
+ if (mxc_md->parallel_csi && !sensor->mipi_mode) {
+ pcsidev = &mxc_md->pcsidev;
+ if (!pcsidev->sd)
+ continue;
+ source = &sensor->sd->entity;
+ sink = find_entity_by_name(mxc_md, pcsidev->sd_name);
+
+ source_pad = 0;
+ sink_pad = MXC_PARALLEL_CSI_PAD_SINK;
+
+ ret = media_create_pad_link(source,
+ source_pad,
+ sink,
+ sink_pad,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ /* Notify MIPI subdev entity */
+ ret = media_entity_call(sink, link_setup,
+ &sink->pads[sink_pad],
+ &source->pads[source_pad], 0);
+ if (ret)
+ return ret;
+
+ /* Notify MIPI sensor subdev entity */
+ ret = media_entity_call(source, link_setup,
+ &source->pads[source_pad],
+ &sink->pads[sink_pad],
+ 0);
+ if (ret)
+ return ret;
+ v4l2_info(&mxc_md->v4l2_dev,
+ "created link [%s] => [%s]\n",
+ source->name, sink->name);
+ } else if (mxc_md->mipi_csi2[sensor->id].sd) {
+ mipi_csi2 = &mxc_md->mipi_csi2[sensor->id];
+
+ source = &sensor->sd->entity;
+ sink = find_entity_by_name(mxc_md, mipi_csi2->sd_name);
+ source_pad = 0;
+ sink_pad = source_pad;
+
+ mipi_vc = (mipi_csi2->vchannel) ? 4 : 1;
+ for (j = 0; j < mipi_vc; j++) {
+ ret = media_create_pad_link(source,
+ source_pad + j,
+ sink,
+ sink_pad + j,
+ MEDIA_LNK_FL_IMMUTABLE |
+ MEDIA_LNK_FL_ENABLED);
+ if (ret)
+ return ret;
+
+ /* Notify MIPI subdev entity */
+ ret = media_entity_call(sink, link_setup,
+ &sink->pads[sink_pad + j],
+ &source->pads[source_pad + j],
+ 0);
+ if (ret)
+ return ret;
+
+ /* Notify MIPI sensor subdev entity */
+ ret = media_entity_call(source, link_setup,
+ &source->pads[source_pad + j],
+ &sink->pads[sink_pad + j],
+ 0);
+ if (ret)
+ return ret;
+ }
+ v4l2_info(&mxc_md->v4l2_dev,
+ "created link [%s] => [%s]\n",
+ source->name, sink->name);
+ }
+ }
+ dev_info(&mxc_md->pdev->dev, "%s\n", __func__);
+ return 0;
+}
+
+static int subdev_notifier_bound(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd,
+ struct v4l2_async_subdev *asd)
+{
+ struct mxc_md *mxc_md = notifier_to_mxc_md(notifier);
+ struct mxc_sensor_info *sensor = NULL;
+ int i;
+
+ dev_dbg(&mxc_md->pdev->dev, "%s\n", __func__);
+
+ /* Find platform data for this sensor subdev */
+ for (i = 0; i < ARRAY_SIZE(mxc_md->sensor); i++) {
+ if (mxc_md->sensor[i].asd.match.fwnode ==
+ of_fwnode_handle(sd->dev->of_node)) {
+ sensor = &mxc_md->sensor[i];
+ }
+ }
+
+ if (!sensor)
+ return -EINVAL;
+
+ sd->grp_id = GRP_ID_MXC_SENSOR;
+ sensor->sd = sd;
+ mxc_md->valid_num_sensors++;
+
+ v4l2_info(&mxc_md->v4l2_dev, "Registered sensor subdevice: %s (%d)\n",
+ sd->name, mxc_md->valid_num_sensors);
+
+ return 0;
+}
+
+static int subdev_notifier_complete(struct v4l2_async_notifier *notifier)
+{
+ struct mxc_md *mxc_md = notifier_to_mxc_md(notifier);
+ int ret;
+
+ dev_dbg(&mxc_md->pdev->dev, "%s\n", __func__);
+ mutex_lock(&mxc_md->media_dev.graph_mutex);
+
+ ret = mxc_md_create_links(mxc_md);
+ if (ret < 0)
+ goto unlock;
+
+ mxc_md->link_status = 1;
+
+ ret = v4l2_device_register_subdev_nodes(&mxc_md->v4l2_dev);
+unlock:
+ mutex_unlock(&mxc_md->media_dev.graph_mutex);
+ if (ret < 0) {
+ v4l2_err(&mxc_md->v4l2_dev, "%s error exit\n", __func__);
+ return ret;
+ }
+
+ if (mxc_md->media_dev.devnode)
+ return ret;
+
+ return media_device_register(&mxc_md->media_dev);
+}
+
+static const struct v4l2_async_notifier_operations sd_async_notifier_ops = {
+ .bound = subdev_notifier_bound,
+ .complete = subdev_notifier_complete,
+};
+
+void mxc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification,
+ void *arg)
+{
+}
+
+static int mxc_md_link_notify(struct media_link *link, unsigned int flags,
+ unsigned int notification)
+{
+ return 0;
+}
+
+static const struct media_device_ops mxc_md_ops = {
+ .link_notify = mxc_md_link_notify,
+};
+
+static struct mxc_isi_info *mxc_md_parse_isi_entity(struct mxc_md *mxc_md,
+ struct device_node *node)
+{
+ struct device *dev = &mxc_md->pdev->dev;
+ struct mxc_isi_info *mxc_isi;
+ struct device_node *child;
+ int ret, id = -1;
+
+ if (!mxc_md || !node)
+ return NULL;
+
+ id = of_alias_get_id(node, ISI_OF_NODE_NAME);
+ if (id < 0 || id >= MXC_ISI_MAX_DEVS)
+ return NULL;
+
+ mxc_isi = &mxc_md->mxc_isi[id];
+
+ child = of_get_child_by_name(node, "cap_device");
+ if (!child) {
+ dev_err(dev, "Can not get child node for %s.%d\n",
+ ISI_OF_NODE_NAME, id);
+ return NULL;
+ }
+ of_node_put(child);
+
+ mxc_isi->id = id;
+ mxc_isi->node = child;
+ sprintf(mxc_isi->sd_name, "mxc_isi.%d", mxc_isi->id);
+ sprintf(mxc_isi->vdev_name, "mxc_isi.%d.capture", mxc_isi->id);
+
+ ret = of_property_read_u32_array(node, "interface",
+ mxc_isi->interface, 3);
+ if (ret < 0) {
+ dev_err(dev, "%s node has not interface property\n", child->name);
+ return NULL;
+ }
+
+ return mxc_isi;
+}
+
+static struct mxc_mipi_csi2_info *
+mxc_md_parse_csi_entity(struct mxc_md *mxc_md,
+ struct device_node *node)
+{
+ struct mxc_mipi_csi2_info *mipi_csi2;
+ int id = -1;
+
+ if (!mxc_md || !node)
+ return NULL;
+
+ id = of_alias_get_id(node, MIPI_CSI2_OF_NODE_NAME);
+ if (id < 0 || id >= MXC_MIPI_CSI2_MAX_DEVS)
+ return NULL;
+
+ mipi_csi2 = &mxc_md->mipi_csi2[id];
+ if (!mipi_csi2)
+ return NULL;
+
+ mipi_csi2->vchannel = of_property_read_bool(node, "virtual-channel");
+ mipi_csi2->id = id;
+ mipi_csi2->node = node;
+ sprintf(mipi_csi2->sd_name, "mxc-mipi-csi2.%d", mipi_csi2->id);
+
+ return mipi_csi2;
+}
+
+static struct mxc_parallel_csi_info*
+mxc_md_parse_pcsi_entity(struct mxc_md *mxc_md, struct device_node *node)
+{
+ struct mxc_parallel_csi_info *pcsidev;
+
+ if (!mxc_md || !node)
+ return NULL;
+
+ pcsidev = &mxc_md->pcsidev;
+ if (!pcsidev)
+ return NULL;
+
+ pcsidev->node = node;
+ sprintf(pcsidev->sd_name, "mxc-parallel-csi");
+
+ return pcsidev;
+}
+
+static struct v4l2_subdev *get_subdev_by_node(struct device_node *node)
+{
+ struct platform_device *pdev;
+ struct v4l2_subdev *sd = NULL;
+ struct device *dev;
+ void *drvdata;
+
+ pdev = of_find_device_by_node(node);
+ if (!pdev)
+ return NULL;
+
+ dev = &pdev->dev;
+ device_lock(&pdev->dev);
+ if (!dev->driver || !try_module_get(dev->driver->owner))
+ goto dev_unlock;
+
+ drvdata = dev_get_drvdata(dev);
+ if (!drvdata)
+ goto module_put;
+
+ sd = (struct v4l2_subdev *)drvdata;
+
+module_put:
+ module_put(dev->driver->owner);
+dev_unlock:
+ device_unlock(dev);
+ return sd;
+}
+
+static int register_isi_entity(struct mxc_md *mxc_md,
+ struct mxc_isi_info *mxc_isi)
+{
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = get_subdev_by_node(mxc_isi->node);
+ if (!sd) {
+ dev_info(&mxc_md->pdev->dev,
+ "deferring %s device registration\n",
+ mxc_isi->node->name);
+ return -EPROBE_DEFER;
+ }
+
+ if (mxc_isi->id >= MXC_ISI_MAX_DEVS)
+ return -EBUSY;
+
+ sd->grp_id = GRP_ID_MXC_ISI;
+
+ ret = v4l2_device_register_subdev(&mxc_md->v4l2_dev, sd);
+ if (!ret)
+ mxc_isi->sd = sd;
+ else
+ v4l2_err(&mxc_md->v4l2_dev, "Failed to register ISI.%d (%d)\n",
+ mxc_isi->id, ret);
+ return ret;
+}
+
+static int register_mipi_csi2_entity(struct mxc_md *mxc_md,
+ struct mxc_mipi_csi2_info *mipi_csi2)
+{
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = get_subdev_by_node(mipi_csi2->node);
+ if (!sd) {
+ dev_info(&mxc_md->pdev->dev,
+ "deferring %s device registration\n",
+ mipi_csi2->node->name);
+ return -EPROBE_DEFER;
+ }
+
+ if (mipi_csi2->id >= MXC_MIPI_CSI2_MAX_DEVS)
+ return -EBUSY;
+
+ sd->grp_id = GRP_ID_MXC_MIPI_CSI2;
+
+ ret = v4l2_device_register_subdev(&mxc_md->v4l2_dev, sd);
+ if (!ret)
+ mipi_csi2->sd = sd;
+ else
+ v4l2_err(&mxc_md->v4l2_dev, "Failed to register MIPI-CSI.%d (%d)\n",
+ mipi_csi2->id, ret);
+ return ret;
+}
+
+static int register_parallel_csi_entity(struct mxc_md *mxc_md,
+ struct mxc_parallel_csi_info *pcsidev)
+{
+ struct v4l2_subdev *sd;
+ int ret;
+
+ sd = get_subdev_by_node(pcsidev->node);
+ if (!sd) {
+ dev_info(&mxc_md->pdev->dev,
+ "deferring %s device registration\n",
+ pcsidev->node->name);
+ return -EPROBE_DEFER;
+ }
+
+ sd->grp_id = GRP_ID_MXC_PARALLEL_CSI;
+
+ ret = v4l2_device_register_subdev(&mxc_md->v4l2_dev, sd);
+ if (!ret)
+ pcsidev->sd = sd;
+ else
+ v4l2_err(&mxc_md->v4l2_dev,
+ "Failed to register Parallel (%d)\n", ret);
+ return ret;
+}
+
+
+static int mxc_md_register_platform_entity(struct mxc_md *mxc_md,
+ struct device_node *node,
+ int plat_entity)
+{
+ struct device *dev = &mxc_md->pdev->dev;
+ struct mxc_isi_info *isi;
+ struct mxc_mipi_csi2_info *mipi_csi2;
+ struct mxc_parallel_csi_info *pcsidev;
+ int ret = -EINVAL;
+
+ switch (plat_entity) {
+ case IDX_ISI:
+ isi = mxc_md_parse_isi_entity(mxc_md, node);
+ if (!isi)
+ return -ENODEV;
+ ret = register_isi_entity(mxc_md, isi);
+ break;
+ case IDX_MIPI_CSI2:
+ mipi_csi2 = mxc_md_parse_csi_entity(mxc_md, node);
+ if (!mipi_csi2)
+ return -ENODEV;
+ ret = register_mipi_csi2_entity(mxc_md, mipi_csi2);
+ break;
+ case IDX_PARALLEL_CSI:
+ pcsidev = mxc_md_parse_pcsi_entity(mxc_md, node);
+ if (!pcsidev)
+ return -ENODEV;
+ ret = register_parallel_csi_entity(mxc_md, pcsidev);
+ break;
+ default:
+ dev_err(dev, "Invalid platform entity (%d)", plat_entity);
+ return ret;
+ }
+
+ return ret;
+}
+
+static int mxc_md_register_platform_entities(struct mxc_md *mxc_md,
+ struct device_node *parent)
+{
+ struct device_node *node;
+ int ret = 0;
+
+ for_each_available_child_of_node(parent, node) {
+ int plat_entity = -1;
+
+ if (!of_device_is_available(node))
+ continue;
+
+ /* If driver of any entity isn't ready try all again later. */
+ if (!strcmp(node->name, ISI_OF_NODE_NAME))
+ plat_entity = IDX_ISI;
+ else if (!strcmp(node->name, MIPI_CSI2_OF_NODE_NAME))
+ plat_entity = IDX_MIPI_CSI2;
+ else if (!strcmp(node->name, PARALLEL_OF_NODE_NAME))
+ plat_entity = IDX_PARALLEL_CSI;
+
+ if (plat_entity >= IDX_SENSOR && plat_entity < IDX_MAX) {
+ ret = mxc_md_register_platform_entity(mxc_md, node,
+ plat_entity);
+ if (ret < 0)
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int register_sensor_entities(struct mxc_md *mxc_md)
+{
+ struct device_node *parent = mxc_md->pdev->dev.of_node;
+ struct device_node *node, *ep, *rem;
+ struct v4l2_fwnode_endpoint endpoint;
+ struct i2c_client *client;
+ int index = 0;
+ int ret;
+
+ mxc_md->num_sensors = 0;
+
+ /* Attach sensors linked to MIPI CSI2 / paralle csi / HDMI Rx */
+ for_each_available_child_of_node(parent, node) {
+ struct device_node *port;
+
+ if (of_node_cmp(node->name, MIPI_CSI2_OF_NODE_NAME) &&
+ of_node_cmp(node->name, PARALLEL_OF_NODE_NAME))
+ continue;
+
+ if (!of_device_is_available(node))
+ continue;
+
+ /* csi2 node have only port */
+ port = of_get_next_child(node, NULL);
+ if (!port)
+ continue;
+
+ /* port can have only endpoint */
+ ep = of_get_next_child(port, NULL);
+ if (!ep)
+ return -EINVAL;
+
+ memset(&endpoint, 0, sizeof(endpoint));
+ ret = v4l2_fwnode_endpoint_parse(of_fwnode_handle(ep), &endpoint);
+ if (WARN_ON(endpoint.base.port >= MXC_MAX_SENSORS || ret)) {
+ v4l2_err(&mxc_md->v4l2_dev,
+ "Failed to get sensor endpoint\n");
+ return -EINVAL;
+ }
+
+ mxc_md->sensor[index].id = endpoint.base.port;
+
+ if (!of_node_cmp(node->name, MIPI_CSI2_OF_NODE_NAME))
+ mxc_md->sensor[index].mipi_mode = true;
+
+ /* remote port---sensor node */
+ rem = of_graph_get_remote_port_parent(ep);
+ of_node_put(ep);
+ if (!rem) {
+ v4l2_info(&mxc_md->v4l2_dev,
+ "Remote device at %s not found\n",
+ ep->full_name);
+ continue;
+ }
+
+ /*
+ * Need to wait sensor driver probed for the first time
+ */
+ client = of_find_i2c_device_by_node(rem);
+ if (!client) {
+ v4l2_info(&mxc_md->v4l2_dev,
+ "Can't find i2c client device for %s\n",
+ of_node_full_name(rem));
+ return -EPROBE_DEFER;
+ }
+
+ mxc_md->sensor[index].asd.match_type = V4L2_ASYNC_MATCH_FWNODE;
+ mxc_md->sensor[index].asd.match.fwnode = of_fwnode_handle(rem);
+ v4l2_async_notifier_add_subdev(&mxc_md->subdev_notifier,
+ &mxc_md->sensor[index].asd);
+ mxc_md->num_sensors++;
+
+ index++;
+ }
+
+ return 0;
+}
+
+static int mxc_md_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *nd = dev->of_node;
+ struct v4l2_device *v4l2_dev;
+ struct mxc_md *mxc_md;
+ int ret;
+
+ mxc_md = devm_kzalloc(dev, sizeof(*mxc_md), GFP_KERNEL);
+ if (!mxc_md)
+ return -ENOMEM;
+
+ mxc_md->pdev = pdev;
+ platform_set_drvdata(pdev, mxc_md);
+
+ mxc_md->parallel_csi = of_property_read_bool(nd, "parallel_csi");
+
+ /* register media device */
+ strlcpy(mxc_md->media_dev.model, "FSL Capture Media Device",
+ sizeof(mxc_md->media_dev.model));
+ mxc_md->media_dev.ops = &mxc_md_ops;
+ mxc_md->media_dev.dev = dev;
+
+ /* register v4l2 device */
+ v4l2_dev = &mxc_md->v4l2_dev;
+ v4l2_dev->mdev = &mxc_md->media_dev;
+ v4l2_dev->notify = mxc_sensor_notify;
+ strlcpy(v4l2_dev->name, "mx8-img-md", sizeof(v4l2_dev->name));
+
+ media_device_init(&mxc_md->media_dev);
+
+ ret = v4l2_device_register(dev, &mxc_md->v4l2_dev);
+ if (ret < 0) {
+ v4l2_err(v4l2_dev, "Failed to register v4l2_device (%d)\n", ret);
+ goto clean_md;
+ }
+
+ v4l2_async_notifier_init(&mxc_md->subdev_notifier);
+ ret = mxc_md_register_platform_entities(mxc_md, dev->of_node);
+ if (ret < 0)
+ goto clean_v4l2;
+
+ ret = register_sensor_entities(mxc_md);
+ if (ret < 0)
+ goto clean_ents;
+
+ if (mxc_md->num_sensors > 0) {
+ mxc_md->subdev_notifier.ops = &sd_async_notifier_ops;
+ mxc_md->valid_num_sensors = 0;
+ mxc_md->link_status = 0;
+
+ ret = v4l2_async_notifier_register(&mxc_md->v4l2_dev,
+ &mxc_md->subdev_notifier);
+ if (ret < 0) {
+ dev_warn(&mxc_md->pdev->dev, "Sensor register failed\n");
+ return ret;
+ }
+
+ if (!mxc_md->link_status) {
+ if (mxc_md->valid_num_sensors > 0) {
+ ret = subdev_notifier_complete(&mxc_md->subdev_notifier);
+ if (ret < 0)
+ goto clean_ents;
+
+ mxc_md_clean_unlink_channels(mxc_md);
+ } else {
+ /* no sensors connected */
+ mxc_md_unregister_all(mxc_md);
+ }
+ }
+ }
+
+ return 0;
+
+clean_ents:
+ mxc_md_unregister_entities(mxc_md);
+clean_v4l2:
+ v4l2_device_unregister(&mxc_md->v4l2_dev);
+clean_md:
+ media_device_cleanup(&mxc_md->media_dev);
+ return ret;
+}
+
+static int mxc_md_remove(struct platform_device *pdev)
+{
+ struct mxc_md *mxc_md = platform_get_drvdata(pdev);
+
+ if (!mxc_md)
+ return 0;
+
+ v4l2_async_notifier_unregister(&mxc_md->subdev_notifier);
+
+ v4l2_device_unregister(&mxc_md->v4l2_dev);
+ mxc_md_unregister_entities(mxc_md);
+ media_device_unregister(&mxc_md->media_dev);
+ media_device_cleanup(&mxc_md->media_dev);
+
+ return 0;
+}
+
+static const struct of_device_id mxc_md_of_match[] = {
+ { .compatible = "fsl,mxc-md",},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mxc_md_of_match);
+
+static struct platform_driver mxc_md_driver = {
+ .driver = {
+ .name = MXC_MD_DRIVER_NAME,
+ .of_match_table = mxc_md_of_match,
+ },
+ .probe = mxc_md_probe,
+ .remove = mxc_md_remove,
+};
+
+module_platform_driver(mxc_md_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC Media Device driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MXC_MD_DRIVER_NAME);
diff --git a/drivers/staging/media/imx/imx8-mipi-csi2-sam.c b/drivers/staging/media/imx/imx8-mipi-csi2-sam.c
new file mode 100644
index 000000000000..b10abd0d1a04
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-mipi-csi2-sam.c
@@ -0,0 +1,1739 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Freescale i.MX8MN/P SoC series MIPI-CSI V3.3 receiver driver
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2019 NXP
+ * Copyright 2020 NXP
+ *
+ * Samsung S5P/EXYNOS SoC series MIPI-CSI receiver driver
+ *
+ * Copyright (C) 2011 - 2013 Samsung Electronics Co., Ltd.
+ * Copyright (C) 2015-2016 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright 2019 NXP
+ * Author: Sylwester Nawrocki <s.nawrocki@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <linux/reset.h>
+
+#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#ifdef MODVERSIONS
+#include <config/modversions.h>
+#endif
+
+#define CSIS_DRIVER_NAME "mxc-mipi-csi2-sam"
+#define CSIS_SUBDEV_NAME "mxc-mipi-csi2"
+#define CSIS_MAX_ENTITIES 2
+#define CSIS0_MAX_LANES 4
+#define CSIS1_MAX_LANES 2
+
+#define MIPI_CSIS_OF_NODE_NAME "csi"
+
+#define MIPI_CSIS_VC0_PAD_SINK 0
+#define MIPI_CSIS_VC1_PAD_SINK 1
+#define MIPI_CSIS_VC2_PAD_SINK 2
+#define MIPI_CSIS_VC3_PAD_SINK 3
+
+#define MIPI_CSIS_VC0_PAD_SOURCE 4
+#define MIPI_CSIS_VC1_PAD_SOURCE 5
+#define MIPI_CSIS_VC2_PAD_SOURCE 6
+#define MIPI_CSIS_VC3_PAD_SOURCE 7
+#define MIPI_CSIS_VCX_PADS_NUM 8
+
+#define MIPI_CSIS_DEF_PIX_WIDTH 1920
+#define MIPI_CSIS_DEF_PIX_HEIGHT 1080
+
+/* Register map definition */
+
+/* CSIS version */
+#define MIPI_CSIS_VERSION 0x00
+
+/* CSIS common control */
+#define MIPI_CSIS_CMN_CTRL 0x04
+#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW (1 << 16)
+#define MIPI_CSIS_CMN_CTRL_HDR_MODE (1 << 11)
+#define MIPI_CSIS_CMN_CTRL_INTER_MODE (1 << 10)
+#define MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET 8
+#define MIPI_CSIS_CMN_CTRL_LANE_NR_MASK (3 << 8)
+#define MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL (1 << 2)
+#define MIPI_CSIS_CMN_CTRL_RESET (1 << 1)
+#define MIPI_CSIS_CMN_CTRL_ENABLE (1 << 0)
+
+/* CSIS clock control */
+#define MIPI_CSIS_CLK_CTRL 0x08
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH3(x) (x << 28)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH2(x) (x << 24)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH1(x) (x << 20)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH0(x) (x << 16)
+#define MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MSK (0xf << 4)
+#define MIPI_CSIS_CLK_CTRL_WCLK_SRC (1 << 0)
+
+/* CSIS Interrupt mask */
+#define MIPI_CSIS_INTMSK 0x10
+#define MIPI_CSIS_INTMSK_EVEN_BEFORE (1 << 31)
+#define MIPI_CSIS_INTMSK_EVEN_AFTER (1 << 30)
+#define MIPI_CSIS_INTMSK_ODD_BEFORE (1 << 29)
+#define MIPI_CSIS_INTMSK_ODD_AFTER (1 << 28)
+#define MIPI_CSIS_INTMSK_FRAME_START (1 << 24)
+#define MIPI_CSIS_INTMSK_FRAME_END (1 << 20)
+#define MIPI_CSIS_INTMSK_ERR_SOT_HS (1 << 16)
+#define MIPI_CSIS_INTMSK_ERR_LOST_FS (1 << 12)
+#define MIPI_CSIS_INTMSK_ERR_LOST_FE (1 << 8)
+#define MIPI_CSIS_INTMSK_ERR_OVER (1 << 4)
+#define MIPI_CSIS_INTMSK_ERR_WRONG_CFG (1 << 3)
+#define MIPI_CSIS_INTMSK_ERR_ECC (1 << 2)
+#define MIPI_CSIS_INTMSK_ERR_CRC (1 << 1)
+#define MIPI_CSIS_INTMSK_ERR_UNKNOWN (1 << 0)
+
+/* CSIS Interrupt source */
+#define MIPI_CSIS_INTSRC 0x14
+#define MIPI_CSIS_INTSRC_EVEN_BEFORE (1 << 31)
+#define MIPI_CSIS_INTSRC_EVEN_AFTER (1 << 30)
+#define MIPI_CSIS_INTSRC_EVEN (0x3 << 30)
+#define MIPI_CSIS_INTSRC_ODD_BEFORE (1 << 29)
+#define MIPI_CSIS_INTSRC_ODD_AFTER (1 << 28)
+#define MIPI_CSIS_INTSRC_ODD (0x3 << 28)
+#define MIPI_CSIS_INTSRC_NON_IMAGE_DATA (0xf << 28)
+#define MIPI_CSIS_INTSRC_FRAME_START (1 << 24)
+#define MIPI_CSIS_INTSRC_FRAME_END (1 << 20)
+#define MIPI_CSIS_INTSRC_ERR_SOT_HS (1 << 16)
+#define MIPI_CSIS_INTSRC_ERR_LOST_FS (1 << 12)
+#define MIPI_CSIS_INTSRC_ERR_LOST_FE (1 << 8)
+#define MIPI_CSIS_INTSRC_ERR_OVER (1 << 4)
+#define MIPI_CSIS_INTSRC_ERR_WRONG_CFG (1 << 3)
+#define MIPI_CSIS_INTSRC_ERR_ECC (1 << 2)
+#define MIPI_CSIS_INTSRC_ERR_CRC (1 << 1)
+#define MIPI_CSIS_INTSRC_ERR_UNKNOWN (1 << 0)
+#define MIPI_CSIS_INTSRC_ERRORS 0xfffff
+
+/* D-PHY status control */
+#define MIPI_CSIS_DPHYSTATUS 0x20
+#define MIPI_CSIS_DPHYSTATUS_ULPS_DAT (1 << 8)
+#define MIPI_CSIS_DPHYSTATUS_STOPSTATE_DAT (1 << 4)
+#define MIPI_CSIS_DPHYSTATUS_ULPS_CLK (1 << 1)
+#define MIPI_CSIS_DPHYSTATUS_STOPSTATE_CLK (1 << 0)
+
+/* D-PHY common control */
+#define MIPI_CSIS_DPHYCTRL 0x24
+#define MIPI_CSIS_DPHYCTRL_HSS_MASK (0xff << 24)
+#define MIPI_CSIS_DPHYCTRL_HSS_OFFSET 24
+#define MIPI_CSIS_DPHYCTRL_SCLKS_MASK (0x3 << 22)
+#define MIPI_CSIS_DPHYCTRL_SCLKS_OFFSET 22
+#define MIPI_CSIS_DPHYCTRL_DPDN_SWAP_CLK (1 << 6)
+#define MIPI_CSIS_DPHYCTRL_DPDN_SWAP_DAT (1 << 5)
+#define MIPI_CSIS_DPHYCTRL_ENABLE_DAT (1 << 1)
+#define MIPI_CSIS_DPHYCTRL_ENABLE_CLK (1 << 0)
+#define MIPI_CSIS_DPHYCTRL_ENABLE (0x1f << 0)
+
+/* D-PHY Master and Slave Control register Low */
+#define MIPI_CSIS_DPHYBCTRL_L 0x30
+/* D-PHY Master and Slave Control register High */
+#define MIPI_CSIS_DPHYBCTRL_H 0x34
+/* D-PHY Slave Control register Low */
+#define MIPI_CSIS_DPHYSCTRL_L 0x38
+/* D-PHY Slave Control register High */
+#define MIPI_CSIS_DPHYSCTRL_H 0x3c
+
+
+/* ISP Configuration register */
+#define MIPI_CSIS_ISPCONFIG_CH0 0x40
+#define MIPI_CSIS_ISPCONFIG_CH0_PIXEL_MODE_MASK (0x3 << 12)
+#define MIPI_CSIS_ISPCONFIG_CH0_PIXEL_MODE_SHIFT 12
+
+#define MIPI_CSIS_ISPCONFIG_CH1 0x50
+#define MIPI_CSIS_ISPCONFIG_CH1_PIXEL_MODE_MASK (0x3 << 12)
+#define MIPI_CSIS_ISPCONFIG_CH1_PIXEL_MODE_SHIFT 12
+
+#define MIPI_CSIS_ISPCONFIG_CH2 0x60
+#define MIPI_CSIS_ISPCONFIG_CH2_PIXEL_MODE_MASK (0x3 << 12)
+#define MIPI_CSIS_ISPCONFIG_CH2_PIXEL_MODE_SHIFT 12
+
+#define MIPI_CSIS_ISPCONFIG_CH3 0x70
+#define MIPI_CSIS_ISPCONFIG_CH3_PIXEL_MODE_MASK (0x3 << 12)
+#define MIPI_CSIS_ISPCONFIG_CH3_PIXEL_MODE_SHIFT 12
+
+#define PIXEL_MODE_SINGLE_PIXEL_MODE 0x0
+#define PIXEL_MODE_DUAL_PIXEL_MODE 0x1
+#define PIXEL_MODE_QUAD_PIXEL_MODE 0x2
+#define PIXEL_MODE_INVALID_PIXEL_MODE 0x3
+
+
+#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP_MSK (0xff << 24)
+#define MIPI_CSIS_ISPCFG_MEM_FULL_GAP(x) (x << 24)
+#define MIPI_CSIS_ISPCFG_DOUBLE_CMPNT (1 << 12)
+#define MIPI_CSIS_ISPCFG_ALIGN_32BIT (1 << 11)
+#define MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT (0x1e << 2)
+#define MIPI_CSIS_ISPCFG_FMT_RAW8 (0x2a << 2)
+#define MIPI_CSIS_ISPCFG_FMT_RAW10 (0x2b << 2)
+#define MIPI_CSIS_ISPCFG_FMT_RAW12 (0x2c << 2)
+#define MIPI_CSIS_ISPCFG_FMT_RGB888 (0x24 << 2)
+#define MIPI_CSIS_ISPCFG_FMT_RGB565 (0x22 << 2)
+/* User defined formats, x = 1...4 */
+#define MIPI_CSIS_ISPCFG_FMT_USER(x) ((0x30 + x - 1) << 2)
+#define MIPI_CSIS_ISPCFG_FMT_MASK (0x3f << 2)
+
+/* ISP Image Resolution register */
+#define MIPI_CSIS_ISPRESOL_CH0 0x44
+#define MIPI_CSIS_ISPRESOL_CH1 0x54
+#define MIPI_CSIS_ISPRESOL_CH2 0x64
+#define MIPI_CSIS_ISPRESOL_CH3 0x74
+#define CSIS_MAX_PIX_WIDTH 0xffff
+#define CSIS_MAX_PIX_HEIGHT 0xffff
+
+/* ISP SYNC register */
+#define MIPI_CSIS_ISPSYNC_CH0 0x48
+#define MIPI_CSIS_ISPSYNC_CH1 0x58
+#define MIPI_CSIS_ISPSYNC_CH2 0x68
+#define MIPI_CSIS_ISPSYNC_CH3 0x78
+
+#define MIPI_CSIS_ISPSYNC_HSYNC_LINTV_OFFSET 18
+#define MIPI_CSIS_ISPSYNC_VSYNC_SINTV_OFFSET 12
+#define MIPI_CSIS_ISPSYNC_VSYNC_EINTV_OFFSET 0
+
+#define MIPI_CSIS_FRAME_COUNTER_CH0 0x0100
+#define MIPI_CSIS_FRAME_COUNTER_CH1 0x0104
+#define MIPI_CSIS_FRAME_COUNTER_CH2 0x0108
+#define MIPI_CSIS_FRAME_COUNTER_CH3 0x010C
+
+/* Non-image packet data buffers */
+#define MIPI_CSIS_PKTDATA_ODD 0x2000
+#define MIPI_CSIS_PKTDATA_EVEN 0x3000
+#define MIPI_CSIS_PKTDATA_SIZE SZ_4K
+
+#define DEFAULT_SCLK_CSIS_FREQ 166000000UL
+
+/* display_mix_clk_en_csr */
+#define DISP_MIX_GASKET_0_CTRL 0x00
+#define GASKET_0_CTRL_DATA_TYPE(x) (((x) & (0x3F)) << 8)
+#define GASKET_0_CTRL_DATA_TYPE_MASK ((0x3FUL) << (8))
+
+#define GASKET_0_CTRL_DATA_TYPE_YUV420_8 0x18
+#define GASKET_0_CTRL_DATA_TYPE_YUV420_10 0x19
+#define GASKET_0_CTRL_DATA_TYPE_LE_YUV420_8 0x1a
+#define GASKET_0_CTRL_DATA_TYPE_CS_YUV420_8 0x1c
+#define GASKET_0_CTRL_DATA_TYPE_CS_YUV420_10 0x1d
+#define GASKET_0_CTRL_DATA_TYPE_YUV422_8 0x1e
+#define GASKET_0_CTRL_DATA_TYPE_YUV422_10 0x1f
+#define GASKET_0_CTRL_DATA_TYPE_RGB565 0x22
+#define GASKET_0_CTRL_DATA_TYPE_RGB666 0x23
+#define GASKET_0_CTRL_DATA_TYPE_RGB888 0x24
+#define GASKET_0_CTRL_DATA_TYPE_RAW6 0x28
+#define GASKET_0_CTRL_DATA_TYPE_RAW7 0x29
+#define GASKET_0_CTRL_DATA_TYPE_RAW8 0x2a
+#define GASKET_0_CTRL_DATA_TYPE_RAW10 0x2b
+#define GASKET_0_CTRL_DATA_TYPE_RAW12 0x2c
+#define GASKET_0_CTRL_DATA_TYPE_RAW14 0x2d
+
+#define GASKET_0_CTRL_DUAL_COMP_ENABLE BIT(1)
+#define GASKET_0_CTRL_ENABLE BIT(0)
+
+#define DISP_MIX_GASKET_0_HSIZE 0x04
+#define DISP_MIX_GASKET_0_VSIZE 0x08
+
+struct mipi_csis_event {
+ u32 mask;
+ const char * const name;
+ unsigned int counter;
+};
+
+/**
+ * struct csis_pix_format - CSIS pixel format description
+ * @pix_width_alignment: horizontal pixel alignment, width will be
+ * multiple of 2^pix_width_alignment
+ * @code: corresponding media bus code
+ * @fmt_reg: MIPI_CSIS_CONFIG register value
+ * @data_alignment: MIPI-CSI data alignment in bits
+ */
+struct csis_pix_format {
+ unsigned int pix_width_alignment;
+ u32 code;
+ u32 fmt_reg;
+ u8 data_alignment;
+};
+
+struct csis_pktbuf {
+ u32 *data;
+ unsigned int len;
+};
+
+struct csis_hw_reset1 {
+ struct regmap *src;
+ u8 req_src;
+ u8 rst_bit;
+};
+
+enum {
+ VVCSIOC_RESET = 0x100,
+ VVCSIOC_POWERON,
+ VVCSIOC_POWEROFF,
+ VVCSIOC_STREAMON,
+ VVCSIOC_STREAMOFF,
+ VVCSIOC_S_FMT,
+ VVCSIOC_S_HDR,
+};
+
+struct csi_sam_format {
+ int64_t format;
+ __u32 width;
+ __u32 height;
+};
+
+struct csi_state;
+typedef int (*mipi_csis_phy_reset_t)(struct csi_state *state);
+
+static const struct mipi_csis_event mipi_csis_events[] = {
+ /* Errors */
+ { MIPI_CSIS_INTSRC_ERR_SOT_HS, "SOT Error" },
+ { MIPI_CSIS_INTSRC_ERR_LOST_FS, "Lost Frame Start Error" },
+ { MIPI_CSIS_INTSRC_ERR_LOST_FE, "Lost Frame End Error" },
+ { MIPI_CSIS_INTSRC_ERR_OVER, "FIFO Overflow Error" },
+ { MIPI_CSIS_INTSRC_ERR_ECC, "ECC Error" },
+ { MIPI_CSIS_INTSRC_ERR_CRC, "CRC Error" },
+ { MIPI_CSIS_INTSRC_ERR_UNKNOWN, "Unknown Error" },
+ /* Non-image data receive events */
+ { MIPI_CSIS_INTSRC_EVEN_BEFORE, "Non-image data before even frame" },
+ { MIPI_CSIS_INTSRC_EVEN_AFTER, "Non-image data after even frame" },
+ { MIPI_CSIS_INTSRC_ODD_BEFORE, "Non-image data before odd frame" },
+ { MIPI_CSIS_INTSRC_ODD_AFTER, "Non-image data after odd frame" },
+ /* Frame start/end */
+ { MIPI_CSIS_INTSRC_FRAME_START, "Frame Start" },
+ { MIPI_CSIS_INTSRC_FRAME_END, "Frame End" },
+};
+#define MIPI_CSIS_NUM_EVENTS ARRAY_SIZE(mipi_csis_events)
+
+/**
+ * struct csi_state - the driver's internal state data structure
+ * @lock: mutex serializing the subdev and power management operations,
+ * protecting @format and @flags members
+ * @sd: v4l2_subdev associated with CSIS device instance
+ * @index: the hardware instance index
+ * @pdev: CSIS platform device
+ * @phy: pointer to the CSIS generic PHY
+ * @regs: mmaped I/O registers memory
+ * @supplies: CSIS regulator supplies
+ * @clock: CSIS clocks
+ * @irq: requested s5p-mipi-csis irq number
+ * @flags: the state variable for power and streaming control
+ * @clock_frequency: device bus clock frequency
+ * @hs_settle: HS-RX settle time
+ * @clk_settle: Clk settle time
+ * @num_lanes: number of MIPI-CSI data lanes used
+ * @max_num_lanes: maximum number of MIPI-CSI data lanes supported
+ * @wclk_ext: CSI wrapper clock: 0 - bus clock, 1 - external SCLK_CAM
+ * @csis_fmt: current CSIS pixel format
+ * @format: common media bus format for the source and sink pad
+ * @slock: spinlock protecting structure members below
+ * @pkt_buf: the frame embedded (non-image) data buffer
+ * @events: MIPI-CSIS event (error) counters
+ */
+struct csi_state {
+ struct v4l2_subdev sd;
+ struct mutex lock;
+ struct device *dev;
+ struct v4l2_device v4l2_dev;
+
+ struct media_pad pads[MIPI_CSIS_VCX_PADS_NUM];
+
+ u8 index;
+ struct platform_device *pdev;
+ struct phy *phy;
+ void __iomem *regs;
+ struct clk *mipi_clk;
+ struct clk *disp_axi;
+ struct clk *disp_apb;
+ int irq;
+ u32 flags;
+
+ u32 clk_frequency;
+ u32 hs_settle;
+ u32 clk_settle;
+ u32 num_lanes;
+ u32 max_num_lanes;
+ u8 wclk_ext;
+
+ u8 vchannel;
+ const struct csis_pix_format *csis_fmt;
+ struct v4l2_mbus_framefmt format;
+
+ spinlock_t slock;
+ struct csis_pktbuf pkt_buf;
+ struct mipi_csis_event events[MIPI_CSIS_NUM_EVENTS];
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_async_notifier subdev_notifier;
+ struct v4l2_async_subdev *async_subdevs[2];
+
+ struct csis_hw_reset1 hw_reset;
+ struct regulator *mipi_phy_regulator;
+
+ struct regmap *gasket;
+ struct regmap *mix_gpr;
+
+ struct reset_control *soft_resetn;
+ struct reset_control *clk_enable;
+ struct reset_control *mipi_reset;
+
+ mipi_csis_phy_reset_t phy_reset_fn;
+ bool hdr;
+};
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+static const struct csis_pix_format mipi_csis_formats[] = {
+ {
+ .code = MEDIA_BUS_FMT_YUYV8_2X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_RGB888_1X24,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RGB888,
+ .data_alignment = 24,
+ }, {
+ .code = MEDIA_BUS_FMT_UYVY8_2X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_VYUY8_2X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR8_1X8,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW8,
+ .data_alignment = 8,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB10_1X10,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW10,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SBGGR12_1X12,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SGBRG12_1X12,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SGRBG12_1X12,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_alignment = 16,
+ }, {
+ .code = MEDIA_BUS_FMT_SRGGB12_1X12,
+ .fmt_reg = MIPI_CSIS_ISPCFG_FMT_RAW12,
+ .data_alignment = 16,
+ },
+};
+
+#define mipi_csis_write(__csis, __r, __v) writel(__v, __csis->regs + __r)
+#define mipi_csis_read(__csis, __r) readl(__csis->regs + __r)
+
+static void dump_csis_regs(struct csi_state *state, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x00, "CSIS_VERSION" },
+ { 0x04, "CSIS_CMN_CTRL" },
+ { 0x08, "CSIS_CLK_CTRL" },
+ { 0x10, "CSIS_INTMSK" },
+ { 0x14, "CSIS_INTSRC" },
+ { 0x20, "CSIS_DPHYSTATUS" },
+ { 0x24, "CSIS_DPHYCTRL" },
+ { 0x30, "CSIS_DPHYBCTRL_L" },
+ { 0x34, "CSIS_DPHYBCTRL_H" },
+ { 0x38, "CSIS_DPHYSCTRL_L" },
+ { 0x3C, "CSIS_DPHYSCTRL_H" },
+ { 0x40, "CSIS_ISPCONFIG_CH0" },
+ { 0x50, "CSIS_ISPCONFIG_CH1" },
+ { 0x60, "CSIS_ISPCONFIG_CH2" },
+ { 0x70, "CSIS_ISPCONFIG_CH3" },
+ { 0x44, "CSIS_ISPRESOL_CH0" },
+ { 0x54, "CSIS_ISPRESOL_CH1" },
+ { 0x64, "CSIS_ISPRESOL_CH2" },
+ { 0x74, "CSIS_ISPRESOL_CH3" },
+ { 0x48, "CSIS_ISPSYNC_CH0" },
+ { 0x58, "CSIS_ISPSYNC_CH1" },
+ { 0x68, "CSIS_ISPSYNC_CH2" },
+ { 0x78, "CSIS_ISPSYNC_CH3" },
+ };
+ u32 i;
+
+ v4l2_dbg(2, debug, &state->sd, "--- %s ---\n", label);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 cfg = mipi_csis_read(state, registers[i].offset);
+ v4l2_dbg(2, debug, &state->sd, "%20s[%x]: 0x%.8x\n", registers[i].name, registers[i].offset, cfg);
+ }
+}
+
+static void dump_gasket_regs(struct csi_state *state, const char *label)
+{
+ struct {
+ u32 offset;
+ const char * const name;
+ } registers[] = {
+ { 0x60, "GPR_GASKET_0_CTRL" },
+ { 0x64, "GPR_GASKET_0_HSIZE" },
+ { 0x68, "GPR_GASKET_0_VSIZE" },
+ };
+ u32 i, cfg;
+
+ v4l2_dbg(2, debug, &state->sd, "--- %s ---\n", label);
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ regmap_read(state->gasket, registers[i].offset, &cfg);
+ v4l2_dbg(2, debug, &state->sd, "%20s[%x]: 0x%.8x\n", registers[i].name, registers[i].offset, cfg);
+ }
+}
+
+static inline struct csi_state *mipi_sd_to_csi_state(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct csi_state, sd);
+}
+
+static inline struct csi_state *notifier_to_mipi_dev(struct v4l2_async_notifier *n)
+{
+ return container_of(n, struct csi_state, subdev_notifier);
+}
+
+static struct media_pad *csis_get_remote_sensor_pad(struct csi_state *state)
+{
+ struct v4l2_subdev *subdev = &state->sd;
+ struct media_pad *sink_pad, *source_pad;
+ int i;
+
+ while (1) {
+ source_pad = NULL;
+ for (i = 0; i < subdev->entity.num_pads; i++) {
+ sink_pad = &subdev->entity.pads[i];
+
+ if (sink_pad->flags & MEDIA_PAD_FL_SINK) {
+ source_pad = media_entity_remote_pad(sink_pad);
+ if (source_pad)
+ break;
+ }
+ }
+ /* return first pad point in the loop */
+ return source_pad;
+ }
+
+ if (i == subdev->entity.num_pads)
+ v4l2_err(&state->sd, "%s, No remote pad found!\n", __func__);
+
+ return NULL;
+}
+
+static struct v4l2_subdev *csis_get_remote_subdev(struct csi_state *state,
+ const char * const label)
+{
+ struct media_pad *source_pad;
+ struct v4l2_subdev *sen_sd;
+
+ /* Get remote source pad */
+ source_pad = csis_get_remote_sensor_pad(state);
+ if (!source_pad) {
+ v4l2_err(&state->sd, "%s, No remote pad found!\n", label);
+ return NULL;
+ }
+
+ /* Get remote source pad subdev */
+ sen_sd = media_entity_to_v4l2_subdev(source_pad->entity);
+ if (!sen_sd) {
+ v4l2_err(&state->sd, "%s, No remote subdev found!\n", label);
+ return NULL;
+ }
+
+ return sen_sd;
+}
+
+static const struct csis_pix_format *find_csis_format(u32 code)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(mipi_csis_formats); i++)
+ if (code == mipi_csis_formats[i].code)
+ return &mipi_csis_formats[i];
+ return NULL;
+}
+
+static void mipi_csis_clean_irq(struct csi_state *state)
+{
+ u32 status;
+
+ status = mipi_csis_read(state, MIPI_CSIS_INTSRC);
+ mipi_csis_write(state, MIPI_CSIS_INTSRC, status);
+
+ status = mipi_csis_read(state, MIPI_CSIS_INTMSK);
+ mipi_csis_write(state, MIPI_CSIS_INTMSK, status);
+}
+
+static void mipi_csis_enable_interrupts(struct csi_state *state, bool on)
+{
+ u32 val;
+
+ mipi_csis_clean_irq(state);
+
+ val = mipi_csis_read(state, MIPI_CSIS_INTMSK);
+ if (on)
+ val |= 0x0FFFFF1F;
+ else
+ val &= ~0x0FFFFF1F;
+ mipi_csis_write(state, MIPI_CSIS_INTMSK, val);
+}
+
+static void mipi_csis_sw_reset(struct csi_state *state)
+{
+ u32 val;
+
+ val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
+ val |= MIPI_CSIS_CMN_CTRL_RESET;
+ mipi_csis_write(state, MIPI_CSIS_CMN_CTRL, val);
+
+ udelay(20);
+}
+
+static int mipi_csis_phy_init(struct csi_state *state)
+{
+ state->mipi_phy_regulator = devm_regulator_get(state->dev, "mipi-phy");
+ if (IS_ERR(state->mipi_phy_regulator)) {
+ dev_err(state->dev, "Fail to get mipi-phy regulator\n");
+ return PTR_ERR(state->mipi_phy_regulator);
+ }
+
+ regulator_set_voltage(state->mipi_phy_regulator, 1000000, 1000000);
+ return 0;
+}
+
+static void mipi_csis_phy_reset_mx8mn(struct csi_state *state)
+{
+ struct reset_control *reset = state->mipi_reset;
+
+ reset_control_assert(reset);
+ usleep_range(10, 20);
+
+ reset_control_deassert(reset);
+ usleep_range(10, 20);
+
+ /* temporary place */
+ if (state->mix_gpr)
+ regmap_write(state->mix_gpr, 0x138, 0x8d8360);
+}
+
+static void mipi_csis_system_enable(struct csi_state *state, int on)
+{
+ u32 val, mask;
+
+ val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
+ if (on)
+ val |= MIPI_CSIS_CMN_CTRL_ENABLE;
+ else
+ val &= ~MIPI_CSIS_CMN_CTRL_ENABLE;
+ mipi_csis_write(state, MIPI_CSIS_CMN_CTRL, val);
+
+ val = mipi_csis_read(state, MIPI_CSIS_DPHYCTRL);
+ val &= ~MIPI_CSIS_DPHYCTRL_ENABLE;
+ if (on) {
+ mask = (1 << (state->num_lanes + 1)) - 1;
+ val |= (mask & MIPI_CSIS_DPHYCTRL_ENABLE);
+ }
+ mipi_csis_write(state, MIPI_CSIS_DPHYCTRL, val);
+}
+
+/* Called with the state.lock mutex held */
+static void __mipi_csis_set_format(struct csi_state *state)
+{
+ struct v4l2_mbus_framefmt *mf = &state->format;
+ u32 val;
+
+ v4l2_dbg(1, debug, &state->sd, "fmt: %#x, %d x %d\n",
+ mf->code, mf->width, mf->height);
+
+ /* Color format */
+ val = mipi_csis_read(state, MIPI_CSIS_ISPCONFIG_CH0);
+ val &= ~MIPI_CSIS_ISPCFG_FMT_MASK;
+ val |= state->csis_fmt->fmt_reg;
+ mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH0, val);
+
+ val = mipi_csis_read(state, MIPI_CSIS_ISPCONFIG_CH0);
+ val &= ~MIPI_CSIS_ISPCONFIG_CH0_PIXEL_MODE_MASK;
+ if (state->csis_fmt->fmt_reg == MIPI_CSIS_ISPCFG_FMT_YCBCR422_8BIT)
+ val |= (PIXEL_MODE_DUAL_PIXEL_MODE <<
+ MIPI_CSIS_ISPCONFIG_CH0_PIXEL_MODE_SHIFT);
+ mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH0, val);
+
+ /* Pixel resolution */
+ val = mf->width | (mf->height << 16);
+ mipi_csis_write(state, MIPI_CSIS_ISPRESOL_CH0, val);
+ if (state->hdr) {
+ mipi_csis_write(state, MIPI_CSIS_ISPRESOL_CH1, val);
+ mipi_csis_write(state, MIPI_CSIS_ISPRESOL_CH2, val);
+ mipi_csis_write(state, MIPI_CSIS_ISPRESOL_CH3, val);
+ val = state->csis_fmt->fmt_reg;
+ mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH1, val | 1);
+ mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH2, val | 2);
+ mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH3, val | 3);
+ }
+}
+
+static void mipi_csis_set_hsync_settle(struct csi_state *state)
+{
+ u32 val;
+
+ val = mipi_csis_read(state, MIPI_CSIS_DPHYCTRL);
+ val &= ~MIPI_CSIS_DPHYCTRL_HSS_MASK;
+ val |= (state->hs_settle << 24) | (state->clk_settle << 22);
+ mipi_csis_write(state, MIPI_CSIS_DPHYCTRL, val);
+}
+
+static void mipi_csis_set_params(struct csi_state *state)
+{
+ u32 val;
+
+ val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
+ val &= ~MIPI_CSIS_CMN_CTRL_LANE_NR_MASK;
+ val |= (state->num_lanes - 1) << MIPI_CSIS_CMN_CTRL_LANE_NR_OFFSET;
+ val |= MIPI_CSIS_CMN_CTRL_HDR_MODE;
+ mipi_csis_write(state, MIPI_CSIS_CMN_CTRL, val);
+
+ __mipi_csis_set_format(state);
+ mipi_csis_set_hsync_settle(state);
+
+ val = mipi_csis_read(state, MIPI_CSIS_ISPCONFIG_CH0);
+ if (state->csis_fmt->data_alignment == 32)
+ val |= MIPI_CSIS_ISPCFG_ALIGN_32BIT;
+ else /* Normal output */
+ val &= ~MIPI_CSIS_ISPCFG_ALIGN_32BIT;
+ mipi_csis_write(state, MIPI_CSIS_ISPCONFIG_CH0, val);
+
+ val = (0 << MIPI_CSIS_ISPSYNC_HSYNC_LINTV_OFFSET) |
+ (0 << MIPI_CSIS_ISPSYNC_VSYNC_SINTV_OFFSET) |
+ (0 << MIPI_CSIS_ISPSYNC_VSYNC_EINTV_OFFSET);
+ mipi_csis_write(state, MIPI_CSIS_ISPSYNC_CH0, val);
+
+ val = mipi_csis_read(state, MIPI_CSIS_CLK_CTRL);
+ val &= ~MIPI_CSIS_CLK_CTRL_WCLK_SRC;
+ if (state->wclk_ext)
+ val |= MIPI_CSIS_CLK_CTRL_WCLK_SRC;
+ val |= MIPI_CSIS_CLK_CTRL_CLKGATE_TRAIL_CH0(15);
+ val &= ~MIPI_CSIS_CLK_CTRL_CLKGATE_EN_MSK;
+ mipi_csis_write(state, MIPI_CSIS_CLK_CTRL, val);
+
+ mipi_csis_write(state, MIPI_CSIS_DPHYBCTRL_L, 0x1f4);
+ mipi_csis_write(state, MIPI_CSIS_DPHYBCTRL_H, 0);
+
+ /* Update the shadow register. */
+ val = mipi_csis_read(state, MIPI_CSIS_CMN_CTRL);
+ val |= (MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW |
+ MIPI_CSIS_CMN_CTRL_UPDATE_SHADOW_CTRL);
+ if (state->hdr) {
+ val |= MIPI_CSIS_CMN_CTRL_HDR_MODE;
+ val |= 0xE0000;
+ }
+
+ mipi_csis_write(state, MIPI_CSIS_CMN_CTRL, val);
+}
+
+static int mipi_csis_clk_enable(struct csi_state *state)
+{
+ struct device *dev = state->dev;
+ int ret;
+
+ ret = clk_prepare_enable(state->mipi_clk);
+ if (ret) {
+ dev_err(dev, "enable mipi_clk failed!\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(state->disp_axi);
+ if (ret) {
+ dev_err(dev, "enable disp_axi clk failed!\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(state->disp_apb);
+ if (ret) {
+ dev_err(dev, "enable disp_apb clk failed!\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void mipi_csis_clk_disable(struct csi_state *state)
+{
+ clk_disable_unprepare(state->mipi_clk);
+ clk_disable_unprepare(state->disp_axi);
+ clk_disable_unprepare(state->disp_apb);
+}
+
+static int mipi_csis_clk_get(struct csi_state *state)
+{
+ struct device *dev = &state->pdev->dev;
+ int ret = true;
+
+ state->mipi_clk = devm_clk_get(dev, "mipi_clk");
+ if (IS_ERR(state->mipi_clk)) {
+ dev_err(dev, "Could not get mipi csi clock\n");
+ return -ENODEV;
+ }
+
+ state->disp_axi = devm_clk_get(dev, "disp_axi");
+ if (IS_ERR(state->disp_axi)) {
+ dev_warn(dev, "Could not get disp_axi clock\n");
+ return -ENODEV;
+ }
+
+ state->disp_apb = devm_clk_get(dev, "disp_apb");
+ if (IS_ERR(state->disp_apb)) {
+ dev_warn(dev, "Could not get disp apb clock\n");
+ return -ENODEV;
+ }
+
+ /* Set clock rate */
+ if (state->clk_frequency) {
+ ret = clk_set_rate(state->mipi_clk, state->clk_frequency);
+ if (ret < 0) {
+ dev_err(dev, "set rate filed, rate=%d\n", state->clk_frequency);
+ return -EINVAL;
+ }
+ } else {
+ dev_WARN(dev, "No clock frequency specified!\n");
+ }
+
+ return 0;
+}
+
+static int disp_mix_sft_rstn(struct reset_control *reset, bool enable)
+{
+ int ret;
+
+ if (!reset)
+ return 0;
+
+ ret = enable ? reset_control_assert(reset) :
+ reset_control_deassert(reset);
+ return ret;
+}
+
+static int disp_mix_clks_enable(struct reset_control *reset, bool enable)
+{
+ int ret;
+
+ if (!reset)
+ return 0;
+
+ ret = enable ? reset_control_assert(reset) :
+ reset_control_deassert(reset);
+ return ret;
+}
+
+static void disp_mix_gasket_config(struct csi_state *state)
+{
+ struct regmap *gasket = state->gasket;
+ struct csis_pix_format const *fmt = state->csis_fmt;
+ struct v4l2_mbus_framefmt *mf = &state->format;
+ s32 fmt_val = -EINVAL;
+ u32 val;
+
+ switch (fmt->code) {
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RGB888;
+ break;
+ case MEDIA_BUS_FMT_YUYV8_2X8:
+ case MEDIA_BUS_FMT_YVYU8_2X8:
+ case MEDIA_BUS_FMT_UYVY8_2X8:
+ case MEDIA_BUS_FMT_VYUY8_2X8:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_YUV422_8;
+ break;
+ case MEDIA_BUS_FMT_SBGGR8_1X8:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RAW8;
+ break;
+ case MEDIA_BUS_FMT_SBGGR10_1X10:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RAW10;
+ break;
+ case MEDIA_BUS_FMT_SGBRG10_1X10:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RAW10;
+ break;
+ case MEDIA_BUS_FMT_SGRBG10_1X10:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RAW10;
+ break;
+ case MEDIA_BUS_FMT_SRGGB10_1X10:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RAW10;
+ break;
+ case MEDIA_BUS_FMT_SBGGR12_1X12:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RAW12;
+ break;
+ case MEDIA_BUS_FMT_SGBRG12_1X12:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RAW12;
+ break;
+ case MEDIA_BUS_FMT_SGRBG12_1X12:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RAW12;
+ break;
+ case MEDIA_BUS_FMT_SRGGB12_1X12:
+ fmt_val = GASKET_0_CTRL_DATA_TYPE_RAW12;
+ break;
+ default:
+ pr_err("gasket not support format %d\n", fmt->code);
+ return;
+ }
+
+ regmap_read(gasket, DISP_MIX_GASKET_0_CTRL, &val);
+ if (fmt_val == GASKET_0_CTRL_DATA_TYPE_YUV422_8)
+ val |= GASKET_0_CTRL_DUAL_COMP_ENABLE;
+ val |= GASKET_0_CTRL_DATA_TYPE(fmt_val);
+ regmap_write(gasket, DISP_MIX_GASKET_0_CTRL, val);
+
+ if (WARN_ON(!mf->width || !mf->height))
+ return;
+
+ regmap_write(gasket, DISP_MIX_GASKET_0_HSIZE, mf->width);
+ regmap_write(gasket, DISP_MIX_GASKET_0_VSIZE, mf->height);
+}
+
+static void disp_mix_gasket_enable(struct csi_state *state, bool enable)
+{
+ struct regmap *gasket = state->gasket;
+
+ if (enable)
+ regmap_update_bits(gasket, DISP_MIX_GASKET_0_CTRL,
+ GASKET_0_CTRL_ENABLE,
+ GASKET_0_CTRL_ENABLE);
+ else
+ regmap_update_bits(gasket, DISP_MIX_GASKET_0_CTRL,
+ GASKET_0_CTRL_ENABLE,
+ 0);
+}
+
+static void mipi_csis_start_stream(struct csi_state *state)
+{
+ mipi_csis_sw_reset(state);
+
+ disp_mix_gasket_config(state);
+ mipi_csis_set_params(state);
+
+ mipi_csis_system_enable(state, true);
+ disp_mix_gasket_enable(state, true);
+ mipi_csis_enable_interrupts(state, true);
+ msleep(5);
+}
+
+static void mipi_csis_stop_stream(struct csi_state *state)
+{
+ mipi_csis_enable_interrupts(state, false);
+ mipi_csis_system_enable(state, false);
+ disp_mix_gasket_enable(state, false);
+}
+
+static void mipi_csis_clear_counters(struct csi_state *state)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&state->slock, flags);
+ for (i = 0; i < MIPI_CSIS_NUM_EVENTS; i++)
+ state->events[i].counter = 0;
+ spin_unlock_irqrestore(&state->slock, flags);
+}
+
+static void mipi_csis_log_counters(struct csi_state *state, bool non_errors)
+{
+ int i = non_errors ? MIPI_CSIS_NUM_EVENTS : MIPI_CSIS_NUM_EVENTS - 4;
+ unsigned long flags;
+
+ spin_lock_irqsave(&state->slock, flags);
+
+ for (i--; i >= 0; i--) {
+ if (state->events[i].counter > 0 || debug)
+ v4l2_info(&state->sd, "%s events: %d\n",
+ state->events[i].name,
+ state->events[i].counter);
+ }
+ spin_unlock_irqrestore(&state->slock, flags);
+}
+
+static int mipi_csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ return 0;
+}
+
+static const struct media_entity_operations mipi_csi2_sd_media_ops = {
+ .link_setup = mipi_csi2_link_setup,
+};
+
+/*
+ * V4L2 subdev operations
+ */
+static int mipi_csis_s_power(struct v4l2_subdev *mipi_sd, int on)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+ struct v4l2_subdev *sen_sd;
+
+ /* Get remote source pad subdev */
+ sen_sd = csis_get_remote_subdev(state, __func__);
+ if (!sen_sd) {
+ v4l2_err(&state->sd, "%s, No remote subdev found!\n", __func__);
+ return -EINVAL;
+ }
+
+ return v4l2_subdev_call(sen_sd, core, s_power, on);
+}
+
+static int mipi_csis_s_stream(struct v4l2_subdev *mipi_sd, int enable)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+
+ v4l2_dbg(1, debug, mipi_sd, "%s: %d, state: 0x%x\n",
+ __func__, enable, state->flags);
+
+ if (enable) {
+ pm_runtime_get_sync(state->dev);
+ mipi_csis_clear_counters(state);
+ mipi_csis_start_stream(state);
+ dump_csis_regs(state, __func__);
+ dump_gasket_regs(state, __func__);
+ } else {
+ mipi_csis_stop_stream(state);
+ if (debug > 0)
+ mipi_csis_log_counters(state, true);
+ pm_runtime_put(state->dev);
+ }
+
+ return 0;
+}
+
+static int mipi_csis_set_fmt(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+ struct v4l2_mbus_framefmt *mf = &format->format;
+ struct csis_pix_format const *csis_fmt;
+ struct media_pad *source_pad;
+ struct v4l2_subdev *sen_sd;
+ int ret;
+
+ /* Get remote source pad */
+ source_pad = csis_get_remote_sensor_pad(state);
+ if (!source_pad) {
+ v4l2_err(&state->sd, "%s, No remote pad found!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Get remote source pad subdev */
+ sen_sd = csis_get_remote_subdev(state, __func__);
+ if (!sen_sd) {
+ v4l2_err(&state->sd, "%s, No remote subdev found!\n", __func__);
+ return -EINVAL;
+ }
+
+ format->pad = source_pad->index;
+ mf->code = MEDIA_BUS_FMT_UYVY8_2X8;
+ ret = v4l2_subdev_call(sen_sd, pad, set_fmt, NULL, format);
+ if (ret < 0) {
+ v4l2_err(&state->sd, "%s, set sensor format fail\n", __func__);
+ return -EINVAL;
+ }
+
+ csis_fmt = find_csis_format(mf->code);
+ if (!csis_fmt) {
+ csis_fmt = &mipi_csis_formats[0];
+ mf->code = csis_fmt->code;
+ }
+
+ return 0;
+}
+
+static int mipi_csis_get_fmt(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *format)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+ struct v4l2_mbus_framefmt *mf = &state->format;
+ struct media_pad *source_pad;
+ struct v4l2_subdev *sen_sd;
+ int ret;
+
+ /* Get remote source pad */
+ source_pad = csis_get_remote_sensor_pad(state);
+ if (!source_pad) {
+ v4l2_err(&state->sd, "%s, No remote pad found!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Get remote source pad subdev */
+ sen_sd = csis_get_remote_subdev(state, __func__);
+ if (!sen_sd) {
+ v4l2_err(&state->sd, "%s, No remote subdev found!\n", __func__);
+ return -EINVAL;
+ }
+
+ format->pad = source_pad->index;
+ ret = v4l2_subdev_call(sen_sd, pad, get_fmt, NULL, format);
+ if (ret < 0) {
+ v4l2_err(&state->sd, "%s, call get_fmt of subdev failed!\n", __func__);
+ return ret;
+ }
+
+ memcpy(mf, &format->format, sizeof(struct v4l2_mbus_framefmt));
+ return 0;
+}
+
+static int mipi_csis_s_rx_buffer(struct v4l2_subdev *mipi_sd, void *buf,
+ unsigned int *size)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+ unsigned long flags;
+
+ *size = min_t(unsigned int, *size, MIPI_CSIS_PKTDATA_SIZE);
+
+ spin_lock_irqsave(&state->slock, flags);
+ state->pkt_buf.data = buf;
+ state->pkt_buf.len = *size;
+ spin_unlock_irqrestore(&state->slock, flags);
+
+ return 0;
+}
+
+static int mipi_csis_s_frame_interval(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+ struct v4l2_subdev *sen_sd;
+
+ /* Get remote source pad subdev */
+ sen_sd = csis_get_remote_subdev(state, __func__);
+ if (!sen_sd) {
+ v4l2_err(&state->sd, "%s, No remote subdev found!\n", __func__);
+ return -EINVAL;
+ }
+
+ return v4l2_subdev_call(sen_sd, video, s_frame_interval, interval);
+}
+
+static int mipi_csis_g_frame_interval(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+ struct v4l2_subdev *sen_sd;
+
+ /* Get remote source pad subdev */
+ sen_sd = csis_get_remote_subdev(state, __func__);
+ if (!sen_sd) {
+ v4l2_err(&state->sd, "%s, No remote subdev found!\n", __func__);
+ return -EINVAL;
+ }
+
+ return v4l2_subdev_call(sen_sd, video, g_frame_interval, interval);
+}
+
+static int mipi_csis_enum_framesizes(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+ struct v4l2_subdev *sen_sd;
+
+ /* Get remote source pad subdev */
+ sen_sd = csis_get_remote_subdev(state, __func__);
+ if (!sen_sd) {
+ v4l2_err(&state->sd, "%s, No remote subdev found!\n", __func__);
+ return -EINVAL;
+ }
+
+ return v4l2_subdev_call(sen_sd, pad, enum_frame_size, NULL, fse);
+}
+
+static int mipi_csis_enum_frameintervals(struct v4l2_subdev *mipi_sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+ struct v4l2_subdev *sen_sd;
+
+ /* Get remote source pad subdev */
+ sen_sd = csis_get_remote_subdev(state, __func__);
+ if (!sen_sd) {
+ v4l2_err(&state->sd, "%s, No remote subdev found!\n", __func__);
+ return -EINVAL;
+ }
+
+ return v4l2_subdev_call(sen_sd, pad, enum_frame_interval, NULL, fie);
+}
+
+static int mipi_csis_log_status(struct v4l2_subdev *mipi_sd)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(mipi_sd);
+
+ mutex_lock(&state->lock);
+ mipi_csis_log_counters(state, true);
+ if (debug) {
+ dump_csis_regs(state, __func__);
+ dump_gasket_regs(state, __func__);
+ }
+ mutex_unlock(&state->lock);
+ return 0;
+}
+
+static int csis_s_fmt(struct v4l2_subdev *sd, struct csi_sam_format *fmt)
+{
+ u32 code;
+ const struct csis_pix_format *csis_format;
+ struct csi_state *state = container_of(sd, struct csi_state, sd);
+
+ switch (fmt->format) {
+ case V4L2_PIX_FMT_SBGGR10:
+ code = MEDIA_BUS_FMT_SBGGR10_1X10;
+ break;
+ case V4L2_PIX_FMT_SGBRG10:
+ code = MEDIA_BUS_FMT_SGBRG10_1X10;
+ break;
+ case V4L2_PIX_FMT_SGRBG10:
+ code = MEDIA_BUS_FMT_SGRBG10_1X10;
+ break;
+ case V4L2_PIX_FMT_SRGGB10:
+ code = MEDIA_BUS_FMT_SRGGB10_1X10;
+ break;
+ case V4L2_PIX_FMT_SBGGR12:
+ code = MEDIA_BUS_FMT_SBGGR12_1X12;
+ break;
+ case V4L2_PIX_FMT_SGBRG12:
+ code = MEDIA_BUS_FMT_SGBRG12_1X12;
+ break;
+ case V4L2_PIX_FMT_SGRBG12:
+ code = MEDIA_BUS_FMT_SGRBG12_1X12;
+ break;
+ case V4L2_PIX_FMT_SRGGB12:
+ code = MEDIA_BUS_FMT_SRGGB12_1X12;
+ break;
+ default:
+ return -EINVAL;
+ }
+ csis_format = find_csis_format(code);
+ if (csis_format == NULL)
+ return -EINVAL;
+
+ state->csis_fmt = csis_format;
+ state->format.width = fmt->width;
+ state->format.height = fmt->height;
+ disp_mix_gasket_config(state);
+ mipi_csis_set_params(state);
+ return 0;
+}
+
+static int csis_s_hdr(struct v4l2_subdev *sd, bool enable)
+{
+ struct csi_state *state = container_of(sd, struct csi_state, sd);
+
+ v4l2_dbg(2, debug, &state->sd, "%s: %d\n", __func__, enable);
+ state->hdr = enable;
+ return 0;
+}
+
+static int csis_ioc_qcap(struct v4l2_subdev *dev, void *args)
+{
+ struct csi_state *state = mipi_sd_to_csi_state(dev);
+ struct v4l2_capability *cap = (struct v4l2_capability *)args;
+ strcpy((char *)cap->driver, "csi_sam_subdev");
+ cap->bus_info[0] = state->index;
+ return 0;
+}
+
+#ifdef CONFIG_HARDENED_USERCOPY
+#define USER_TO_KERNEL(TYPE) \
+ do {\
+ TYPE tmp; \
+ arg = (void *)(&tmp); \
+ copy_from_user(arg, arg_user, sizeof(TYPE));\
+ } while (0)
+
+#define KERNEL_TO_USER(TYPE) \
+ copy_to_user(arg_user, arg, sizeof(TYPE));
+#else
+#define USER_TO_KERNEL(TYPE)
+#define KERNEL_TO_USER(TYPE)
+#endif
+static long csis_priv_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg_user)
+{
+ int ret = 1;
+ struct csi_state *state = container_of(sd, struct csi_state, sd);
+ void *arg = arg_user;
+
+ pm_runtime_get_sync(state->dev);
+
+ switch (cmd) {
+ case VVCSIOC_RESET:
+ mipi_csis_sw_reset(state);
+ ret = 0;
+ break;
+ case VVCSIOC_POWERON:
+ ret = mipi_csis_s_power(sd, 1);
+ break;
+ case VVCSIOC_POWEROFF:
+ ret = mipi_csis_s_power(sd, 0);
+ break;
+ case VVCSIOC_STREAMON:
+ ret = mipi_csis_s_stream(sd, 1);
+ break;
+ case VVCSIOC_STREAMOFF:
+ ret = mipi_csis_s_stream(sd, 0);
+ break;
+ case VVCSIOC_S_FMT: {
+ USER_TO_KERNEL(struct csi_sam_format);
+ ret = csis_s_fmt(sd, (struct csi_sam_format *)arg);
+ break;
+ }
+ case VVCSIOC_S_HDR: {
+ USER_TO_KERNEL(bool);
+ ret = csis_s_hdr(sd, *(bool *) arg);
+ break;
+ }
+ case VIDIOC_QUERYCAP:
+ ret = csis_ioc_qcap(sd, arg);
+ break;
+ default:
+ v4l2_err(&state->sd, "unsupported csi-sam command %d.", cmd);
+ ret = -EINVAL;
+ break;
+ }
+ pm_runtime_put(state->dev);
+
+ return ret;
+}
+
+static struct v4l2_subdev_core_ops mipi_csis_core_ops = {
+ .s_power = mipi_csis_s_power,
+ .log_status = mipi_csis_log_status,
+ .ioctl = csis_priv_ioctl,
+};
+
+static struct v4l2_subdev_video_ops mipi_csis_video_ops = {
+ .s_rx_buffer = mipi_csis_s_rx_buffer,
+ .s_stream = mipi_csis_s_stream,
+
+ .g_frame_interval = mipi_csis_g_frame_interval,
+ .s_frame_interval = mipi_csis_s_frame_interval,
+};
+
+static const struct v4l2_subdev_pad_ops mipi_csis_pad_ops = {
+ .enum_frame_size = mipi_csis_enum_framesizes,
+ .enum_frame_interval = mipi_csis_enum_frameintervals,
+ .get_fmt = mipi_csis_get_fmt,
+ .set_fmt = mipi_csis_set_fmt,
+};
+
+static struct v4l2_subdev_ops mipi_csis_subdev_ops = {
+ .core = &mipi_csis_core_ops,
+ .video = &mipi_csis_video_ops,
+ .pad = &mipi_csis_pad_ops,
+};
+
+static irqreturn_t mipi_csis_irq_handler(int irq, void *dev_id)
+{
+ struct csi_state *state = dev_id;
+ struct csis_pktbuf *pktbuf = &state->pkt_buf;
+ unsigned long flags;
+ u32 status;
+
+ status = mipi_csis_read(state, MIPI_CSIS_INTSRC);
+
+ spin_lock_irqsave(&state->slock, flags);
+ if ((status & MIPI_CSIS_INTSRC_NON_IMAGE_DATA) && pktbuf->data) {
+ u32 offset;
+
+ if (status & MIPI_CSIS_INTSRC_EVEN)
+ offset = MIPI_CSIS_PKTDATA_EVEN;
+ else
+ offset = MIPI_CSIS_PKTDATA_ODD;
+
+ memcpy(pktbuf->data, state->regs + offset, pktbuf->len);
+ pktbuf->data = NULL;
+ rmb();
+ }
+
+ /* Update the event/error counters */
+ if ((status & MIPI_CSIS_INTSRC_ERRORS) || debug) {
+ int i;
+ for (i = 0; i < MIPI_CSIS_NUM_EVENTS; i++) {
+ if (!(status & state->events[i].mask))
+ continue;
+ state->events[i].counter++;
+ v4l2_dbg(2, debug, &state->sd, "%s: %d\n",
+ state->events[i].name,
+ state->events[i].counter);
+ }
+ v4l2_dbg(2, debug, &state->sd, "status: %08x\n", status);
+ }
+ spin_unlock_irqrestore(&state->slock, flags);
+
+ mipi_csis_write(state, MIPI_CSIS_INTSRC, status);
+ return IRQ_HANDLED;
+}
+
+static int mipi_csis_parse_dt(struct platform_device *pdev,
+ struct csi_state *state)
+{
+ struct device_node *node = pdev->dev.of_node;
+
+ state->index = of_alias_get_id(node, "csi");
+
+ if (of_property_read_u32(node, "clock-frequency", &state->clk_frequency))
+ state->clk_frequency = DEFAULT_SCLK_CSIS_FREQ;
+
+ if (of_property_read_u32(node, "bus-width", &state->max_num_lanes))
+ return -EINVAL;
+
+ node = of_graph_get_next_endpoint(node, NULL);
+ if (!node) {
+ dev_err(&pdev->dev, "No port node at\n");
+ return -EINVAL;
+ }
+
+ /* Get MIPI CSI-2 bus configration from the endpoint node. */
+ of_property_read_u32(node, "csis-hs-settle", &state->hs_settle);
+ of_property_read_u32(node, "csis-clk-settle", &state->clk_settle);
+ of_property_read_u32(node, "data-lanes", &state->num_lanes);
+
+ state->wclk_ext = of_property_read_bool(node, "csis-wclk");
+
+ of_node_put(node);
+ return 0;
+}
+
+static const struct of_device_id mipi_csis_of_match[];
+
+/* init subdev */
+static int mipi_csis_subdev_init(struct v4l2_subdev *mipi_sd,
+ struct platform_device *pdev,
+ const struct v4l2_subdev_ops *ops)
+{
+ struct csi_state *state = platform_get_drvdata(pdev);
+ int ret = 0;
+
+ v4l2_subdev_init(mipi_sd, ops);
+ mipi_sd->owner = THIS_MODULE;
+ snprintf(mipi_sd->name, sizeof(mipi_sd->name), "%s.%d",
+ CSIS_SUBDEV_NAME, state->index);
+ mipi_sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ mipi_sd->entity.function = MEDIA_ENT_F_IO_V4L;
+ mipi_sd->dev = &pdev->dev;
+
+ state->csis_fmt = &mipi_csis_formats[0];
+ state->format.code = mipi_csis_formats[0].code;
+ state->format.width = MIPI_CSIS_DEF_PIX_WIDTH;
+ state->format.height = MIPI_CSIS_DEF_PIX_HEIGHT;
+
+ /* This allows to retrieve the platform device id by the host driver */
+ v4l2_set_subdevdata(mipi_sd, state);
+
+ return ret;
+}
+
+static int mipi_csis_of_parse_resets(struct csi_state *state)
+{
+ int ret;
+ struct device *dev = state->dev;
+ struct device_node *np = dev->of_node;
+ struct device_node *parent, *child;
+ struct of_phandle_args args;
+ struct reset_control *rstc;
+ const char *compat;
+ uint32_t len, rstc_num = 0;
+
+ ret = of_parse_phandle_with_args(np, "resets", "#reset-cells",
+ 0, &args);
+ if (ret)
+ return ret;
+
+ parent = args.np;
+ for_each_child_of_node(parent, child) {
+ compat = of_get_property(child, "compatible", NULL);
+ if (!compat)
+ continue;
+
+ rstc = of_reset_control_array_get(child, false, false, true);
+ if (IS_ERR(rstc))
+ continue;
+
+ len = strlen(compat);
+ if (!of_compat_cmp("csi,soft-resetn", compat, len)) {
+ state->soft_resetn = rstc;
+ rstc_num++;
+ } else if (!of_compat_cmp("csi,clk-enable", compat, len)) {
+ state->clk_enable = rstc;
+ rstc_num++;
+ } else if (!of_compat_cmp("csi,mipi-reset", compat, len)) {
+ state->mipi_reset = rstc;
+ rstc_num++;
+ } else {
+ dev_warn(dev, "invalid csis reset node: %s\n", compat);
+ }
+ }
+
+ if (!rstc_num) {
+ dev_err(dev, "no invalid reset control exists\n");
+ return -EINVAL;
+ }
+ of_node_put(parent);
+
+ return 0;
+}
+
+static int mipi_csis_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct v4l2_subdev *mipi_sd;
+ struct resource *mem_res;
+ struct csi_state *state;
+ const struct of_device_id *of_id;
+ mipi_csis_phy_reset_t phy_reset_fn;
+ int ret = -ENOMEM;
+
+ state = devm_kzalloc(dev, sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ mutex_init(&state->lock);
+ spin_lock_init(&state->slock);
+
+ state->pdev = pdev;
+ mipi_sd = &state->sd;
+ state->dev = dev;
+
+ ret = mipi_csis_parse_dt(pdev, state);
+ if (ret < 0)
+ return ret;
+
+ if (state->num_lanes == 0 || state->num_lanes > state->max_num_lanes) {
+ dev_err(dev, "Unsupported number of data lanes: %d (max. %d)\n",
+ state->num_lanes, state->max_num_lanes);
+ return -EINVAL;
+ }
+
+ ret = mipi_csis_phy_init(state);
+ if (ret < 0)
+ return ret;
+
+ of_id = of_match_node(mipi_csis_of_match, dev->of_node);
+ if (!of_id || !of_id->data) {
+ dev_err(dev, "No match data for %s\n", dev_name(dev));
+ return -EINVAL;
+ }
+ phy_reset_fn = of_id->data;
+ state->phy_reset_fn = phy_reset_fn;
+
+ state->gasket = syscon_regmap_lookup_by_phandle(dev->of_node, "csi-gpr");
+ if (IS_ERR(state->gasket)) {
+ dev_err(dev, "failed to get csi gasket\n");
+ return PTR_ERR(state->gasket);
+ }
+
+ if (!of_property_read_bool(dev->of_node, "no-reset-control")) {
+ ret = mipi_csis_of_parse_resets(state);
+ if (ret < 0) {
+ dev_err(dev, "Can not parse reset control\n");
+ return ret;
+ }
+ }
+
+ state->mix_gpr = syscon_regmap_lookup_by_phandle(dev->of_node, "gpr");
+ if (IS_ERR(state->mix_gpr)) {
+ dev_warn(dev, "failed to get mix gpr\n");
+ state->mix_gpr = NULL;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ state->regs = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(state->regs))
+ return PTR_ERR(state->regs);
+ state->irq = platform_get_irq(pdev, 0);
+ if (state->irq < 0) {
+ dev_err(dev, "Failed to get irq\n");
+ return state->irq;
+ }
+ ret = mipi_csis_clk_get(state);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_csis_clk_enable(state);
+ if (ret < 0)
+ return ret;
+
+ disp_mix_clks_enable(state->clk_enable, true);
+ disp_mix_sft_rstn(state->soft_resetn, false);
+ phy_reset_fn(state);
+
+ /*mipi_csis_clk_disable(state);*/
+ ret = devm_request_irq(dev, state->irq, mipi_csis_irq_handler, 0,
+ dev_name(dev), state);
+ if (ret) {
+ dev_err(dev, "Interrupt request failed\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, state);
+ ret = mipi_csis_subdev_init(&state->sd, pdev, &mipi_csis_subdev_ops);
+ if (ret < 0) {
+ dev_err(dev, "mipi csi subdev init failed\n");
+ return ret;
+ }
+
+ state->pads[MIPI_CSIS_VC0_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[MIPI_CSIS_VC1_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[MIPI_CSIS_VC2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[MIPI_CSIS_VC3_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ state->pads[MIPI_CSIS_VC0_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[MIPI_CSIS_VC1_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[MIPI_CSIS_VC2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ state->pads[MIPI_CSIS_VC3_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&state->sd.entity, MIPI_CSIS_VCX_PADS_NUM, state->pads);
+ if (ret < 0) {
+ dev_err(dev, "mipi csi entity pad init failed\n");
+ return ret;
+ }
+
+ memcpy(state->events, mipi_csis_events, sizeof(state->events));
+ state->sd.entity.ops = &mipi_csi2_sd_media_ops;
+
+ pm_runtime_enable(dev);
+
+ dev_info(&pdev->dev, "lanes: %d, hs_settle: %d, clk_settle: %d, wclk: %d, freq: %u\n",
+ state->num_lanes, state->hs_settle, state->clk_settle,
+ state->wclk_ext, state->clk_frequency);
+ return 0;
+}
+
+static int mipi_csis_system_suspend(struct device *dev)
+{
+ return pm_runtime_force_suspend(dev);;
+}
+
+static int mipi_csis_system_resume(struct device *dev)
+{
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret < 0) {
+ dev_err(dev, "force resume %s failed!\n", dev_name(dev));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int mipi_csis_runtime_suspend(struct device *dev)
+{
+ struct csi_state *state = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_disable(state->mipi_phy_regulator);
+ if (ret < 0)
+ return ret;
+
+ disp_mix_clks_enable(state->clk_enable, false);
+ mipi_csis_clk_disable(state);
+ return 0;
+}
+
+static int mipi_csis_runtime_resume(struct device *dev)
+{
+ struct csi_state *state = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_enable(state->mipi_phy_regulator);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_csis_clk_enable(state);
+ if (ret < 0)
+ return ret;
+
+ disp_mix_clks_enable(state->clk_enable, true);
+ disp_mix_sft_rstn(state->soft_resetn, false);
+
+ if (state->phy_reset_fn)
+ state->phy_reset_fn(state);
+
+ return 0;
+}
+
+static int mipi_csis_remove(struct platform_device *pdev)
+{
+ struct csi_state *state = platform_get_drvdata(pdev);
+
+ media_entity_cleanup(&state->sd.entity);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops mipi_csis_pm_ops = {
+ SET_RUNTIME_PM_OPS(mipi_csis_runtime_suspend, mipi_csis_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(mipi_csis_system_suspend, mipi_csis_system_resume)
+};
+
+static const struct of_device_id mipi_csis_of_match[] = {
+ { .compatible = "fsl,imx8mn-mipi-csi",
+ .data = (void *)&mipi_csis_phy_reset_mx8mn,
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mipi_csis_of_match);
+
+static struct platform_driver mipi_csis_driver = {
+ .driver = {
+ .name = CSIS_DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .pm = &mipi_csis_pm_ops,
+ .of_match_table = mipi_csis_of_match,
+ },
+ .probe = mipi_csis_probe,
+ .remove = mipi_csis_remove,
+};
+module_platform_driver(mipi_csis_driver);
+
+MODULE_DESCRIPTION("Freescale MIPI-CSI2 receiver driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/staging/media/imx/imx8-mipi-csi2.c b/drivers/staging/media/imx/imx8-mipi-csi2.c
new file mode 100644
index 000000000000..8881a5a3596c
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-mipi-csi2.c
@@ -0,0 +1,1170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Capture CSI Subdev for Freescale i.MX8QM/QXP SOC
+ *
+ * Copyright (c) 2019 NXP Semiconductor
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/firmware/imx/sci.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+
+#include "imx8-common.h"
+
+#define MXC_MIPI_CSI2_DRIVER_NAME "mxc-mipi-csi2"
+#define MXC_MIPI_CSI2_SUBDEV_NAME MXC_MIPI_CSI2_DRIVER_NAME
+#define MXC_MIPI_CSI2_MAX_LANES 4
+
+/* Subsystem CSR */
+#define CSI2SS_BASE_OFFSET 0x0
+
+#define CSI2SS_PLM_CTRL (CSI2SS_BASE_OFFSET + 0x0)
+#define CSI2SS_PLM_CTRL_PL_CLK_RUN 0x80000000
+#define CSI2SS_PLM_CTRL_VSYNC_OVERRIDE 0x200
+#define CSI2SS_PLM_CTRL_HSYNC_OVERRIDE 0x400
+#define CSI2SS_PLM_CTRL_VALID_OVERRIDE 0x800
+#define CSI2SS_PLM_CTRL_POLARITY_MASK 0x1000
+#define CSI2SS_PLM_CTRL_POLARITY_HIGH 0x1000
+#define CSI2SS_PLM_CTRL_POLARITY_LOW 0x0
+#define CSI2SS_PLM_CTRL_ENABLE_PL 1
+#define CSI2SS_PLM_CTRL_ENABLE_PL_OFFSET 0
+#define CSI2SS_PLM_CTRL_ENABLE_PL_MASK 1
+
+#define CSI2SS_PHY_CTRL (CSI2SS_BASE_OFFSET + 0x4)
+#define CSI2SS_PHY_CTRL_PD 1
+#define CSI2SS_PHY_CTRL_PD_OFFSET 22
+#define CSI2SS_PHY_CTRL_PD_MASK 0x400000
+#define CSI2SS_PHY_CTRL_RTERM_SEL 1
+#define CSI2SS_PHY_CTRL_RTERM_SEL_OFFSET 21
+#define CSI2SS_PHY_CTRL_RTERM_SEL_MASK 0x200000
+#define CSI2SS_PHY_CTRL_RX_HS_SETTLE_OFFSET 4
+#define CSI2SS_PHY_CTRL_RX_HS_SETTLE_MASK 0x3F0
+#define CSI2SS_PHY_CTRL_CONT_CLK_MODE 1
+#define CSI2SS_PHY_CTRL_CONT_CLK_MODE_OFFSET 3
+#define CSI2SS_PHY_CTRL_CONT_CLK_MODE_MASK 0x8
+#define CSI2SS_PHY_CTRL_DDRCLK_EN 1
+#define CSI2SS_PHY_CTRL_DDRCLK_EN_OFFSET 2
+#define CSI2SS_PHY_CTRL_DDRCLK_EN_MASK 0x4
+#define CSI2SS_PHY_CTRL_AUTO_PD_EN 1
+#define CSI2SS_PHY_CTRL_AUTO_PD_EN_OFFSET 1
+#define CSI2SS_PHY_CTRL_AUTO_PD_EN_MASK 0x2
+#define CSI2SS_PHY_CTRL_RX_ENABLE 1
+#define CSI2SS_PHY_CTRL_RX_ENABLE_OFFSET 0
+#define CSI2SS_PHY_CTRL_RX_ENABLE_MASK 0x1
+
+#define CSI2SS_PHY_STATUS (CSI2SS_BASE_OFFSET + 0x8)
+#define CSI2SS_PHY_TEST_STATUS (CSI2SS_BASE_OFFSET + 0x10)
+#define CSI2SS_PHY_TEST_STATUS_D0 (CSI2SS_BASE_OFFSET + 0x14)
+#define CSI2SS_PHY_TEST_STATUS_D1 (CSI2SS_BASE_OFFSET + 0x18)
+#define CSI2SS_PHY_TEST_STATUS_D2 (CSI2SS_BASE_OFFSET + 0x1C)
+#define CSI2SS_PHY_TEST_STATUS_D3 (CSI2SS_BASE_OFFSET + 0x20)
+
+#define CSI2SS_VC_INTERLACED (CSI2SS_BASE_OFFSET + 0x30)
+#define CSI2SS_VC_INTERLACED_VC0 1
+#define CSI2SS_VC_INTERLACED_VC1 2
+#define CSI2SS_VC_INTERLACED_VC2 4
+#define CSI2SS_VC_INTERLACED_VC3 8
+#define CSI2SS_VC_INTERLACED_OFFSET 0
+#define CSI2SS_VC_INTERLACED_MASK 0xF
+
+#define CSI2SS_DATA_TYPE (CSI2SS_BASE_OFFSET + 0x38)
+#define CSI2SS_DATA_TYPE_LEGACY_YUV420_8BIT BIT(2)
+#define CSI2SS_DATA_TYPE_YUV422_8BIT BIT(6)
+#define CSI2SS_DATA_TYPE_YUV422_10BIT BIT(7)
+#define CSI2SS_DATA_TYPE_RGB444 BIT(8)
+#define CSI2SS_DATA_TYPE_RGB555 BIT(9)
+#define CSI2SS_DATA_TYPE_RGB565 BIT(10)
+#define CSI2SS_DATA_TYPE_RGB666 BIT(11)
+#define CSI2SS_DATA_TYPE_RGB888 BIT(12)
+#define CSI2SS_DATA_TYPE_RAW6 BIT(16)
+#define CSI2SS_DATA_TYPE_RAW8 BIT(18)
+#define CSI2SS_DATA_TYPE_RAW10 BIT(19)
+#define CSI2SS_DATA_TYPE_RAW12 BIT(20)
+#define CSI2SS_DATA_TYPE_RAW14 BIT(21)
+
+#define CSI2SS_YUV420_1ST_LINE_DATA_TYPE (CSI2SS_BASE_OFFSET + 0x40)
+#define CSI2SS_YUV420_1ST_LINE_DATA_TYPE_ODD 0
+#define CSI2SS_YUV420_1ST_LINE_DATA_TYPE_EVEN 1
+#define CSI2SS_YUV420_1ST_LINE_DATA_TYPE_OFFSET 0
+#define CSI2SS_YUV420_1ST_LINE_DATA_TYPE_MASK 1
+
+#define CSI2SS_CTRL_CLK_RESET (CSI2SS_BASE_OFFSET + 0x44)
+#define CSI2SS_CTRL_CLK_RESET_EN 1
+#define CSI2SS_CTRL_CLK_RESET_OFFSET 0
+#define CSI2SS_CTRL_CLK_RESET_MASK 1
+#define CSI2SS_CTRL_CLK_RESET_CLK_OFF 1
+#define CSI2SS_CTRL_CLK_RESET_CLK_OFFSET 1
+#define CSI2SS_CTRL_CLK_RESET_CLK_MASK 0x1
+
+#define CSI2SS_STREAM_FENCE_CTRL (CSI2SS_BASE_OFFSET + 0x48)
+#define CSI2SS_STREAM_FENCE_VC0 1
+#define CSI2SS_STREAM_FENCE_VC1 2
+#define CSI2SS_STREAM_FENCE_VC2 4
+#define CSI2SS_STREAM_FENCE_VC3 8
+#define CSI2SS_STREAM_FENCE_CTRL_OFFSET 0
+#define CSI2SS_STREAM_FENCE_CTRL_MASK 0xF
+
+#define CSI2SS_STREAM_FENCE_STATUS (CSI2SS_BASE_OFFSET + 0x4C)
+
+/* CSI-2 controller CSR */
+#define CSI2RX_BASE_OFFSET (0x100)
+
+#define CSI2RX_CFG_NUM_LANES (CSI2RX_BASE_OFFSET + 0x0)
+#define CSI2RX_CFG_NUM_LANES_OFFSET 0
+#define CSI2RX_CFG_NUM_LANES_MASK 0x3
+
+#define CSI2RX_CFG_DISABLE_DATA_LANES (CSI2RX_BASE_OFFSET + 0x4)
+#define CSI2RX_CFG_DISABLE_DATA_LANES_3 8
+#define CSI2RX_CFG_DISABLE_DATA_LANES_2 4
+#define CSI2RX_CFG_DISABLE_DATA_LANES_1 2
+#define CSI2RX_CFG_DISABLE_DATA_LANES_0 1
+#define CSI2RX_CFG_DISABLE_DATA_LANES_OFFSET 0
+#define CSI2RX_CFG_DISABLE_DATA_LANES_MASK 0xF
+
+#define CSI2RX_BIT_ERR (CSI2RX_BASE_OFFSET + 0x8)
+
+#define CSI2RX_IRQ_STATUS (CSI2RX_BASE_OFFSET + 0xC)
+#define CSI2RX_IRQ_STATUS_CRC_ERROR 0x1
+#define CSI2RX_IRQ_STATUS_1BIT_CRC_ERROR 0x2
+#define CSI2RX_IRQ_STATUS_2BIT_CRC_ERROR 0x4
+#define CSI2RX_IRQ_STATUS_ULPS_CHANGE 0x8
+#define CSI2RX_IRQ_STATUS_DPHY_ERRSOTHS 0x10
+#define CSI2RX_IRQ_STATUS_DPHY_ERRSOTSYNC_HS 0x20
+#define CSI2RX_IRQ_STATUS_DPHY_ERRESC 0x40
+#define CSI2RX_IRQ_STATUS_DPHY_ERRSYNCESC 0x80
+#define CSI2RX_IRQ_STATUS_DPHY_ERRCTRL 0x100
+
+#define CSI2RX_IRQ_MASK (CSI2RX_BASE_OFFSET + 0x10)
+#define CSI2RX_IRQ_MASK_CRC_ERROR 0x1
+#define CSI2RX_IRQ_MASK_1BIT_CRC_ERROR 0x2
+#define CSI2RX_IRQ_MASK_2BIT_CRC_ERROR 0x4
+#define CSI2RX_IRQ_MASK_ULPS_CHANGE 0x8
+#define CSI2RX_IRQ_MASK_DPHY_ERRSOTHS 0x10
+#define CSI2RX_IRQ_MASK_DPHY_ERRSOTSYNC_HS 0x20
+#define CSI2RX_IRQ_MASK_DPHY_ERRESC 0x40
+#define CSI2RX_IRQ_MASK_DPHY_ERRSYNCESC 0x80
+#define CSI2RX_IRQ_MASK_DPHY_ERRCTRL 0x100
+
+#define CSI2RX_ULPS_STATUS (CSI2RX_BASE_OFFSET + 0x14)
+#define CSI2RX_ULPS_STATUS_CLK_LANE_ULPS 0x1
+#define CSI2RX_ULPS_STATUS_DAT_LANE0_ULPS 0x2
+#define CSI2RX_ULPS_STATUS_DAT_LANE1_ULPS 0x4
+#define CSI2RX_ULPS_STATUS_DAT_LANE2_ULPS 0x8
+#define CSI2RX_ULPS_STATUS_DAT_LANE3_ULPS 0x10
+#define CSI2RX_ULPS_STATUS_CLK_LANE_MARK 0x20
+#define CSI2RX_ULPS_STATUS_DAT_LANE0_MARK 0x40
+#define CSI2RX_ULPS_STATUS_DAT_LANE1_MARK 0x80
+#define CSI2RX_ULPS_STATUS_DAT_LANE2_MARK 0x100
+#define CSI2RX_ULPS_STATUS_DAT_LANE3_MARK 0x200
+
+#define CSI2RX_PPI_ERRSOT_HS (CSI2RX_BASE_OFFSET + 0x18)
+#define CSI2RX_PPI_ERRSOT_HS_DAT_LANE0 0x1
+#define CSI2RX_PPI_ERRSOT_HS_DAT_LANE1 0x2
+#define CSI2RX_PPI_ERRSOT_HS_DAT_LANE2 0x4
+#define CSI2RX_PPI_ERRSOT_HS_DAT_LANE3 0x8
+
+#define CSI2RX_PPI_ERRSOTSYNC_HS (CSI2RX_BASE_OFFSET + 0x1C)
+#define CSI2RX_PPI_ERRSOTSYNC_HS_DAT_LANE0 0x1
+#define CSI2RX_PPI_ERRSOTSYNC_HS_DAT_LANE1 0x2
+#define CSI2RX_PPI_ERRSOTSYNC_HS_DAT_LANE2 0x4
+#define CSI2RX_PPI_ERRSOTSYNC_HS_DAT_LANE3 0x8
+
+#define CSI2RX_PPI_ERRESC (CSI2RX_BASE_OFFSET + 0x20)
+#define CSI2RX_PPI_ERRESC_DAT_LANE0 0x1
+#define CSI2RX_PPI_ERRESC_DAT_LANE1 0x2
+#define CSI2RX_PPI_ERRESC_DAT_LANE2 0x4
+#define CSI2RX_PPI_ERRESC_DAT_LANE3 0x8
+
+#define CSI2RX_PPI_ERRSYNCESC (CSI2RX_BASE_OFFSET + 0x24)
+#define CSI2RX_PPI_ERRSYNCESC_DAT_LANE0 0x1
+#define CSI2RX_PPI_ERRSYNCESC_DAT_LANE1 0x2
+#define CSI2RX_PPI_ERRSYNCESC_DAT_LANE2 0x4
+#define CSI2RX_PPI_ERRSYNCESC_DAT_LANE3 0x8
+
+#define CSI2RX_PPI_ERRCONTROL (CSI2RX_BASE_OFFSET + 0x28)
+#define CSI2RX_PPI_ERRCONTROL_DAT_LANE0 0x1
+#define CSI2RX_PPI_ERRCONTROL_DAT_LANE1 0x2
+#define CSI2RX_PPI_ERRCONTROL_DAT_LANE2 0x4
+#define CSI2RX_PPI_ERRCONTROL_DAT_LANE3 0x8
+
+#define CSI2RX_CFG_DISABLE_PAYLOAD_0 (CSI2RX_BASE_OFFSET + 0x2C)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_LEGACY_YUV420_8BIT BIT(10)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_YUV422_8BIT BIT(14)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_YUV422_10BIT BIT(15)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RGB444 BIT(16)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RGB555 BIT(17)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RGB565 BIT(18)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RGB666 BIT(19)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RGB888 BIT(20)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RAW6 BIT(24)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RAW7 BIT(25)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RAW8 BIT(26)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RAW10 BIT(27)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RAW12 BIT(28)
+#define CSI2RX_CFG_DISABLE_PAYLOAD_TYPE_RAW14 BIT(29)
+
+#define CSI2RX_CFG_DISABLE_PAYLOAD_1 (CSI2RX_BASE_OFFSET + 0x30)
+
+struct csis_hw_reset {
+ struct regmap *src;
+ u8 req_src;
+ u8 rst_val;
+};
+
+struct csis_phy_gpr {
+ struct regmap *gpr;
+ u8 req_src;
+};
+
+struct mxc_mipi_csi2_dev {
+ struct v4l2_subdev sd;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev *sensor_sd;
+
+ struct media_pad pads[MXC_MIPI_CSI2_VCX_PADS_NUM];
+ struct v4l2_mbus_framefmt format;
+
+ void __iomem *csr_regs;
+ void __iomem *base_regs;
+ struct platform_device *pdev;
+ u32 flags;
+ int irq;
+
+ struct clk *clk_core;
+ struct clk *clk_esc;
+ struct clk *clk_pxl;
+
+ struct csis_hw_reset hw_reset;
+ struct csis_phy_gpr phy_gpr;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_async_notifier subdev_notifier;
+ struct v4l2_async_subdev *async_subdevs[2];
+
+ struct device *pd_csi;
+ struct device *pd_isi;
+ struct device_link *pd_csi_link;
+ struct device_link *pd_isi_link;
+
+ struct mutex lock;
+
+ int id;
+ u32 hs_settle;
+ u32 send_level;
+ u32 num_lanes;
+ u8 data_lanes[4];
+ u8 vchannel;
+ u8 running;
+};
+
+struct mxc_hs_info {
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ u32 val;
+};
+
+enum mxc_mipi_csi2_pm_state {
+ MXC_MIPI_CSI2_PM_POWERED = 0x1,
+ MXC_MIPI_CSI2_PM_SUSPENDED = 0x2,
+ MXC_MIPI_CSI2_RUNTIME_SUSPENDED = 0x4,
+};
+
+/* 0~ 80Mbps: 0xB
+ * 80~250Mbps: 0x8
+ * 250~1.5Gbps: 0x6
+ */
+static u8 rxhs_settle[3] = {0xD, 0xA, 0x7};
+
+static struct mxc_hs_info hs_setting[] = {
+ {2592, 1944, 30, 0x0B},
+ {2592, 1944, 15, 0x10},
+
+ {1920, 1080, 30, 0x0B},
+ {1920, 1080, 15, 0x10},
+
+ {1280, 720, 30, 0x11},
+ {1280, 720, 15, 0x16},
+
+ {1024, 768, 30, 0x11},
+ {1024, 768, 15, 0x23},
+
+ {720, 576, 30, 0x1E},
+ {720, 576, 15, 0x23},
+
+ {720, 480, 30, 0x1E},
+ {720, 480, 15, 0x23},
+
+ {640, 480, 30, 0x1E},
+ {640, 480, 15, 0x23},
+
+ {320, 240, 30, 0x1E},
+ {320, 240, 15, 0x23},
+
+ {176, 144, 30, 0x1E},
+ {176, 144, 15, 0x23},
+};
+
+static struct imx_sc_ipc *pm_ipc_handle;
+
+static inline struct mxc_mipi_csi2_dev *sd_to_mxc_mipi_csi2_dev(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct mxc_mipi_csi2_dev, sd);
+}
+
+/****************************************
+ * rxhs-settle calculate
+ * UI = 1000 / mipi csi phy clock
+ * THS-SETTLE_mim = 85ns + 6 * UI
+ * THS-SETTLE_max = 145ns +10 * UI
+ * THS-SETTLE = (THS-SETTLE_mim + THS-SETTLE_max) / 2
+ * PRG_RXHS_SETTLE = THS-SETTLE / (Tperiod of RxClk_ESC) + 1
+ ****************************************/
+static int calc_hs_settle(struct mxc_mipi_csi2_dev *csi2dev, u32 dphy_clk)
+{
+ u32 esc_rate;
+ u32 hs_settle;
+ u32 rxhs_settle;
+ u32 hs_settle_min;
+ u32 hs_settle_max;
+
+ esc_rate = clk_get_rate(csi2dev->clk_esc) / 1000000;
+ hs_settle_min = 85 + 6 * 1000 / dphy_clk;
+ hs_settle_max = 145 + 10 * 1000 / dphy_clk;
+ hs_settle = (hs_settle_min + hs_settle_max) >> 1;
+ rxhs_settle = hs_settle / (1000 / esc_rate) - 1;
+ return rxhs_settle;
+}
+
+static void mxc_mipi_csi2_reg_dump(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ struct device *dev = &csi2dev->pdev->dev;
+ struct {
+ u32 offset;
+ const char name[32];
+ } registers[] = {
+ { 0x100, "MIPI CSI2 HC num of lanes" },
+ { 0x104, "MIPI CSI2 HC dis lanes" },
+ { 0x108, "MIPI CSI2 HC BIT ERR" },
+ { 0x10C, "MIPI CSI2 HC IRQ STATUS" },
+ { 0x110, "MIPI CSI2 HC IRQ MASK" },
+ { 0x114, "MIPI CSI2 HC ULPS STATUS" },
+ { 0x118, "MIPI CSI2 HC DPHY ErrSotHS" },
+ { 0x11c, "MIPI CSI2 HC DPHY ErrSotSync" },
+ { 0x120, "MIPI CSI2 HC DPHY ErrEsc" },
+ { 0x124, "MIPI CSI2 HC DPHY ErrSyncEsc" },
+ { 0x128, "MIPI CSI2 HC DPHY ErrControl" },
+ { 0x12C, "MIPI CSI2 HC DISABLE_PAYLOAD" },
+ { 0x130, "MIPI CSI2 HC DISABLE_PAYLOAD" },
+ { 0x180, "MIPI CSI2 HC IGNORE_VC" },
+ { 0x184, "MIPI CSI2 HC VID_VC" },
+ { 0x188, "MIPI CSI2 HC FIFO_SEND_LEVEL" },
+ { 0x18C, "MIPI CSI2 HC VID_VSYNC" },
+ { 0x190, "MIPI CSI2 HC VID_SYNC_FP" },
+ { 0x194, "MIPI CSI2 HC VID_HSYNC" },
+ { 0x198, "MIPI CSI2 HC VID_HSYNC_BP" },
+ { 0x000, "MIPI CSI2 CSR PLM_CTRL" },
+ { 0x004, "MIPI CSI2 CSR PHY_CTRL" },
+ { 0x008, "MIPI CSI2 CSR PHY_Status" },
+ { 0x010, "MIPI CSI2 CSR PHY_Test_Status" },
+ { 0x014, "MIPI CSI2 CSR PHY_Test_Status" },
+ { 0x018, "MIPI CSI2 CSR PHY_Test_Status" },
+ { 0x01C, "MIPI CSI2 CSR PHY_Test_Status" },
+ { 0x020, "MIPI CSI2 CSR PHY_Test_Status" },
+ { 0x030, "MIPI CSI2 CSR VC Interlaced" },
+ { 0x038, "MIPI CSI2 CSR Data Type Dis" },
+ { 0x040, "MIPI CSI2 CSR 420 1st type" },
+ { 0x044, "MIPI CSI2 CSR Ctr_Ck_Rst_Ctr" },
+ { 0x048, "MIPI CSI2 CSR Stream Fencing" },
+ { 0x04C, "MIPI CSI2 CSR Stream Fencing" },
+ };
+ u32 i;
+
+ dev_dbg(dev, "MIPI CSI2 CSR and HC register dump, mipi csi%d\n", csi2dev->id);
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 reg = readl(csi2dev->base_regs + registers[i].offset);
+ dev_dbg(dev, "%20s[0x%.3x]: 0x%.3x\n",
+ registers[i].name, registers[i].offset, reg);
+ }
+}
+
+static int mipi_sc_fw_init(struct mxc_mipi_csi2_dev *csi2dev, char enable)
+{
+ struct device *dev = &csi2dev->pdev->dev;
+ u32 rsrc_id;
+ int ret;
+
+ ret = imx_scu_get_handle(&pm_ipc_handle);
+ if (ret) {
+ dev_err(dev, "sc_misc_MIPI get ipc handle failed! ret = (%d)\n", ret);
+ return ret;
+ }
+
+ if (csi2dev->id == 1)
+ rsrc_id = IMX_SC_R_CSI_1;
+ else
+ rsrc_id = IMX_SC_R_CSI_0;
+
+ ret = imx_sc_misc_set_control(pm_ipc_handle,
+ rsrc_id, IMX_SC_C_MIPI_RESET, enable);
+ if (ret < 0) {
+ dev_err(dev, "sc_misc_MIPI reset failed! ret = (%d)\n", ret);
+ return ret;
+ }
+
+ msleep(10);
+ return 0;
+}
+
+static uint16_t find_hs_configure(struct v4l2_subdev_format *sd_fmt)
+{
+ struct v4l2_mbus_framefmt *fmt = &sd_fmt->format;
+ u32 frame_rate = fmt->reserved[1];
+ int i;
+
+ if (!fmt)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(hs_setting); i++) {
+ if (hs_setting[i].width == fmt->width &&
+ hs_setting[i].height == fmt->height &&
+ hs_setting[i].frame_rate == frame_rate)
+ return hs_setting[i].val;
+ }
+
+ if (i == ARRAY_SIZE(hs_setting))
+ pr_err("can not find HS setting for w/h@fps=(%d, %d)@%d\n",
+ fmt->width, fmt->height, frame_rate);
+
+ return -EINVAL;
+}
+
+static void mxc_mipi_csi2_reset(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ u32 val;
+
+ /* Reset MIPI CSI */
+ val = CSI2SS_CTRL_CLK_RESET_EN | CSI2SS_CTRL_CLK_RESET_CLK_OFF;
+ writel(val, csi2dev->csr_regs + CSI2SS_CTRL_CLK_RESET);
+}
+
+static void mxc_mipi_csi2_enable(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ struct device *dev = &csi2dev->pdev->dev;
+ u32 val = 0;
+
+ val = readl(csi2dev->csr_regs + CSI2SS_PLM_CTRL);
+ while (val & CSI2SS_PLM_CTRL_PL_CLK_RUN) {
+ msleep(10);
+ val = readl(csi2dev->csr_regs + CSI2SS_PLM_CTRL);
+ dev_dbg(dev, "Waiting pl clk running, val=0x%x\n", val);
+ }
+
+ /* Enable Pixel link Master*/
+ val = readl(csi2dev->csr_regs + CSI2SS_PLM_CTRL);
+ val |= CSI2SS_PLM_CTRL_ENABLE_PL;
+ writel(val, csi2dev->csr_regs + CSI2SS_PLM_CTRL);
+
+ val |= CSI2SS_PLM_CTRL_VALID_OVERRIDE;
+ writel(val, csi2dev->csr_regs + CSI2SS_PLM_CTRL);
+
+ /* PHY Enable */
+ val = readl(csi2dev->csr_regs + CSI2SS_PHY_CTRL);
+ val &= ~(CSI2SS_PHY_CTRL_PD_MASK | CSI2SS_PLM_CTRL_POLARITY_MASK);
+ writel(val, csi2dev->csr_regs + CSI2SS_PHY_CTRL);
+
+ /* Deassert reset */
+ writel(1, csi2dev->csr_regs + CSI2SS_CTRL_CLK_RESET);
+}
+
+static void mxc_mipi_csi2_disable(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ /* Disable Data lanes */
+ writel(0xf, csi2dev->base_regs + CSI2RX_CFG_DISABLE_DATA_LANES);
+
+ /* Disable Pixel Link */
+ writel(0, csi2dev->csr_regs + CSI2SS_PLM_CTRL);
+
+ /* Disable PHY */
+ writel(0, csi2dev->csr_regs + CSI2SS_PHY_CTRL);
+
+ /* Reset */
+ writel(2, csi2dev->csr_regs + CSI2SS_CTRL_CLK_RESET);
+}
+
+static void mxc_mipi_csi2_csr_config(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ u32 val;
+
+ /* format */
+ val = 0;
+ writel(val, csi2dev->csr_regs + CSI2SS_DATA_TYPE);
+
+ /* polarity */
+ val = readl(csi2dev->csr_regs + CSI2SS_PLM_CTRL);
+ val &= ~(CSI2SS_PLM_CTRL_VSYNC_OVERRIDE |
+ CSI2SS_PLM_CTRL_HSYNC_OVERRIDE |
+ CSI2SS_PLM_CTRL_VALID_OVERRIDE |
+ CSI2SS_PLM_CTRL_POLARITY_MASK);
+
+ writel(val, csi2dev->csr_regs + CSI2SS_PLM_CTRL);
+
+ val = CSI2SS_PHY_CTRL_RX_ENABLE |
+ CSI2SS_PHY_CTRL_DDRCLK_EN << CSI2SS_PHY_CTRL_DDRCLK_EN_OFFSET |
+ CSI2SS_PHY_CTRL_CONT_CLK_MODE << CSI2SS_PHY_CTRL_CONT_CLK_MODE_OFFSET |
+ csi2dev->hs_settle << CSI2SS_PHY_CTRL_RX_HS_SETTLE_OFFSET |
+ CSI2SS_PHY_CTRL_PD << CSI2SS_PHY_CTRL_PD_OFFSET |
+ CSI2SS_PHY_CTRL_RTERM_SEL << CSI2SS_PHY_CTRL_RTERM_SEL_OFFSET |
+ CSI2SS_PHY_CTRL_AUTO_PD_EN << CSI2SS_PHY_CTRL_AUTO_PD_EN_OFFSET;
+
+ writel(val, csi2dev->csr_regs + CSI2SS_PHY_CTRL);
+}
+
+static void mxc_mipi_csi2_hc_config(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ u32 val0, val1;
+ u32 i;
+
+ val0 = 0;
+
+ /* Lanes */
+ writel(csi2dev->num_lanes - 1, csi2dev->base_regs + CSI2RX_CFG_NUM_LANES);
+
+ for (i = 0; i < csi2dev->num_lanes; i++)
+ val0 |= (1 << (csi2dev->data_lanes[i] - 1));
+
+ val1 = 0xF & ~val0;
+ writel(val1, csi2dev->base_regs + CSI2RX_CFG_DISABLE_DATA_LANES);
+
+ /* Mask interrupt */
+ writel(0x1FF, csi2dev->base_regs + CSI2RX_IRQ_MASK);
+
+ /* vid_vc */
+ writel(3, csi2dev->base_regs + 0x184);
+}
+
+static struct media_pad *mxc_csi2_get_remote_sensor_pad(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ struct v4l2_subdev *subdev = &csi2dev->sd;
+ struct media_pad *sink_pad, *source_pad;
+ int i;
+
+ while (1) {
+ source_pad = NULL;
+ for (i = 0; i < subdev->entity.num_pads; i++) {
+ sink_pad = &subdev->entity.pads[i];
+
+ if (sink_pad->flags & MEDIA_PAD_FL_SINK) {
+ source_pad = media_entity_remote_pad(sink_pad);
+ if (source_pad)
+ break;
+ }
+ }
+ /* return first pad point in the loop */
+ return source_pad;
+ }
+
+ if (i == subdev->entity.num_pads)
+ v4l2_err(&csi2dev->sd, "%s, No remote pad found!\n", __func__);
+
+ return NULL;
+}
+
+static struct v4l2_subdev *mxc_get_remote_subdev(struct mxc_mipi_csi2_dev *csi2dev,
+ const char * const label)
+{
+ struct media_pad *source_pad;
+ struct v4l2_subdev *sen_sd;
+
+ /* Get remote source pad */
+ source_pad = mxc_csi2_get_remote_sensor_pad(csi2dev);
+ if (!source_pad) {
+ v4l2_err(&csi2dev->sd, "%s, No remote pad found!\n", label);
+ return NULL;
+ }
+
+ /* Get remote source pad subdev */
+ sen_sd = media_entity_to_v4l2_subdev(source_pad->entity);
+ if (!sen_sd) {
+ v4l2_err(&csi2dev->sd, "%s, No remote subdev found!\n", label);
+ return NULL;
+ }
+
+ return sen_sd;
+}
+
+static int mxc_csi2_get_sensor_fmt(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ struct v4l2_mbus_framefmt *mf = &csi2dev->format;
+ struct v4l2_subdev *sen_sd;
+ struct v4l2_subdev_format src_fmt;
+ struct media_pad *source_pad;
+ int ret;
+
+ /* Get remote source pad */
+ source_pad = mxc_csi2_get_remote_sensor_pad(csi2dev);
+ if (!source_pad) {
+ v4l2_err(&csi2dev->sd, "%s, No remote pad found!\n", __func__);
+ return -EINVAL;
+ }
+
+ sen_sd = mxc_get_remote_subdev(csi2dev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ memset(&src_fmt, 0, sizeof(src_fmt));
+ src_fmt.pad = source_pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sen_sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EINVAL;
+
+ /* Update input frame size and formate */
+ memcpy(mf, &src_fmt.format, sizeof(struct v4l2_mbus_framefmt));
+
+ dev_dbg(&csi2dev->pdev->dev, "width=%d, height=%d, fmt.code=0x%x\n",
+ mf->width, mf->height, mf->code);
+
+ /* Get rxhs settle */
+ if (src_fmt.format.reserved[0] != 0) {
+ csi2dev->hs_settle =
+ calc_hs_settle(csi2dev, src_fmt.format.reserved[0]);
+ } else if (src_fmt.format.reserved[1] != 0) {
+ csi2dev->hs_settle = find_hs_configure(&src_fmt);
+ } else {
+ if (src_fmt.format.height * src_fmt.format.width > 1024 * 768)
+ csi2dev->hs_settle = rxhs_settle[2];
+ else if (src_fmt.format.height * src_fmt.format.width < 480 * 320)
+ csi2dev->hs_settle = rxhs_settle[0];
+ else
+ csi2dev->hs_settle = rxhs_settle[1];
+ }
+
+ return 0;
+}
+
+static int mipi_csi2_clk_init(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ struct device *dev = &csi2dev->pdev->dev;
+
+ csi2dev->clk_core = devm_clk_get(dev, "clk_core");
+ if (IS_ERR(csi2dev->clk_core)) {
+ dev_err(dev, "failed to get csi core clk\n");
+ return PTR_ERR(csi2dev->clk_core);
+ }
+
+ csi2dev->clk_esc = devm_clk_get(dev, "clk_esc");
+ if (IS_ERR(csi2dev->clk_esc)) {
+ dev_err(dev, "failed to get csi esc clk\n");
+ return PTR_ERR(csi2dev->clk_esc);
+ }
+
+ csi2dev->clk_pxl = devm_clk_get(dev, "clk_pxl");
+ if (IS_ERR(csi2dev->clk_pxl)) {
+ dev_err(dev, "failed to get csi pixel link clk\n");
+ return PTR_ERR(csi2dev->clk_pxl);
+ }
+
+ return 0;
+}
+
+static int mipi_csi2_attach_pd(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ struct device *dev = &csi2dev->pdev->dev;
+ struct device_link *link;
+
+ csi2dev->pd_csi = dev_pm_domain_attach_by_name(dev, "pd_csi");
+ if (IS_ERR(csi2dev->pd_csi)) {
+ if (PTR_ERR(csi2dev->pd_csi) != -EPROBE_DEFER) {
+ dev_err(dev, "attach pd_csi domain for csi fail\n");
+ return PTR_ERR(csi2dev->pd_csi);
+ } else {
+ return PTR_ERR(csi2dev->pd_csi);
+ }
+ }
+ link = device_link_add(dev, csi2dev->pd_csi,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+ csi2dev->pd_csi_link = link;
+
+ csi2dev->pd_isi = dev_pm_domain_attach_by_name(dev, "pd_isi_ch0");
+ if (IS_ERR(csi2dev->pd_isi)) {
+ if (PTR_ERR(csi2dev->pd_isi) != -EPROBE_DEFER) {
+ dev_err(dev, "attach pd_isi_ch0 domain for csi fail\n");
+ return PTR_ERR(csi2dev->pd_isi);
+ } else {
+ return PTR_ERR(csi2dev->pd_isi);
+ }
+ }
+ link = device_link_add(dev, csi2dev->pd_isi,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+ csi2dev->pd_isi_link = link;
+
+ return 0;
+}
+
+static void mipi_csi2_detach_pd(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ device_link_del(csi2dev->pd_csi_link);
+ device_link_del(csi2dev->pd_isi_link);
+ dev_pm_domain_detach(csi2dev->pd_csi, true);
+ dev_pm_domain_detach(csi2dev->pd_isi, true);
+}
+
+static int mipi_csi2_clk_enable(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ struct device *dev = &csi2dev->pdev->dev;
+ int ret;
+
+ ret = clk_prepare_enable(csi2dev->clk_core);
+ if (ret < 0) {
+ dev_err(dev, "%s, pre clk_core error\n", __func__);
+ return ret;
+ }
+ ret = clk_prepare_enable(csi2dev->clk_esc);
+ if (ret < 0) {
+ dev_err(dev, "%s, prepare clk_esc error\n", __func__);
+ return ret;
+ }
+ ret = clk_prepare_enable(csi2dev->clk_pxl);
+ if (ret < 0) {
+ dev_err(dev, "%s, prepare clk_pxl error\n", __func__);
+ return ret;
+ }
+
+ return ret;
+}
+
+static void mipi_csi2_clk_disable(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ clk_disable_unprepare(csi2dev->clk_core);
+ clk_disable_unprepare(csi2dev->clk_esc);
+ clk_disable_unprepare(csi2dev->clk_pxl);
+}
+
+static int mipi_csi2_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
+{
+ return 0;
+}
+
+/* mipi csi2 subdev media entity operations */
+static int mipi_csi2_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ /* TODO */
+ /* Add MIPI source and sink pad link configuration */
+ if (local->flags & MEDIA_PAD_FL_SOURCE) {
+ switch (local->index) {
+ case MXC_MIPI_CSI2_VC0_PAD_SOURCE:
+ case MXC_MIPI_CSI2_VC1_PAD_SOURCE:
+ case MXC_MIPI_CSI2_VC2_PAD_SOURCE:
+ case MXC_MIPI_CSI2_VC3_PAD_SOURCE:
+ break;
+ default:
+ return 0;
+ }
+ } else if (local->flags & MEDIA_PAD_FL_SINK) {
+ switch (local->index) {
+ case MXC_MIPI_CSI2_VC0_PAD_SINK:
+ case MXC_MIPI_CSI2_VC1_PAD_SINK:
+ case MXC_MIPI_CSI2_VC2_PAD_SINK:
+ case MXC_MIPI_CSI2_VC3_PAD_SINK:
+ break;
+ default:
+ return 0;
+ }
+ }
+ return 0;
+}
+
+static const struct media_entity_operations mipi_csi2_sd_media_ops = {
+ .link_setup = mipi_csi2_link_setup,
+};
+
+/*
+ * V4L2 subdev operations
+ */
+static int mipi_csi2_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(csi2dev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, core, s_power, on);
+}
+
+static int mipi_csi2_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(csi2dev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, video, g_frame_interval, interval);
+}
+
+static int mipi_csi2_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(csi2dev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, video, s_frame_interval, interval);
+}
+
+static int mipi_csi2_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+ struct device *dev = &csi2dev->pdev->dev;
+ int ret = 0;
+
+ dev_dbg(&csi2dev->pdev->dev, "%s: %d, csi2dev: 0x%x\n",
+ __func__, enable, csi2dev->flags);
+
+ if (enable) {
+ pm_runtime_get_sync(dev);
+ if (!csi2dev->running++) {
+ mxc_csi2_get_sensor_fmt(csi2dev);
+ mxc_mipi_csi2_hc_config(csi2dev);
+ mxc_mipi_csi2_reset(csi2dev);
+ mxc_mipi_csi2_csr_config(csi2dev);
+ mxc_mipi_csi2_enable(csi2dev);
+ mxc_mipi_csi2_reg_dump(csi2dev);
+ }
+ } else {
+ if (!--csi2dev->running)
+ mxc_mipi_csi2_disable(csi2dev);
+
+ pm_runtime_put(dev);
+ }
+
+ return ret;
+}
+
+static int mipi_csi2_enum_framesizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(csi2dev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, pad, enum_frame_size, NULL, fse);
+}
+
+static int mipi_csi2_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(csi2dev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, pad, enum_frame_interval, NULL, fie);
+}
+
+static int mipi_csi2_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ mxc_csi2_get_sensor_fmt(csi2dev);
+
+ memcpy(mf, &csi2dev->format, sizeof(struct v4l2_mbus_framefmt));
+ /* Source/Sink pads crop rectangle size */
+
+ return 0;
+}
+
+static int mipi_csi2_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+ struct v4l2_subdev *sen_sd;
+ struct media_pad *source_pad;
+ int ret;
+
+ /* Get remote source pad */
+ source_pad = mxc_csi2_get_remote_sensor_pad(csi2dev);
+ if (!source_pad) {
+ v4l2_err(&csi2dev->sd, "%s, No remote pad found!\n", __func__);
+ return -EINVAL;
+ }
+
+ sen_sd = mxc_get_remote_subdev(csi2dev, __func__);
+ if (!sd)
+ return -EINVAL;
+
+ fmt->pad = source_pad->index;
+ ret = v4l2_subdev_call(sen_sd, pad, set_fmt, NULL, fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ return 0;
+}
+
+static const struct v4l2_subdev_internal_ops mipi_csi2_sd_internal_ops = {
+ .open = mipi_csi2_open,
+};
+
+static struct v4l2_subdev_pad_ops mipi_csi2_pad_ops = {
+ .enum_frame_size = mipi_csi2_enum_framesizes,
+ .enum_frame_interval = mipi_csi2_enum_frame_interval,
+ .get_fmt = mipi_csi2_get_fmt,
+ .set_fmt = mipi_csi2_set_fmt,
+};
+
+static struct v4l2_subdev_core_ops mipi_csi2_core_ops = {
+ .s_power = mipi_csi2_s_power,
+};
+
+static struct v4l2_subdev_video_ops mipi_csi2_video_ops = {
+ .g_frame_interval = mipi_csi2_g_frame_interval,
+ .s_frame_interval = mipi_csi2_s_frame_interval,
+ .s_stream = mipi_csi2_s_stream,
+};
+
+static struct v4l2_subdev_ops mipi_csi2_subdev_ops = {
+ .core = &mipi_csi2_core_ops,
+ .video = &mipi_csi2_video_ops,
+ .pad = &mipi_csi2_pad_ops,
+};
+
+static int mipi_csi2_parse_dt(struct mxc_mipi_csi2_dev *csi2dev)
+{
+ struct device *dev = &csi2dev->pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct v4l2_fwnode_endpoint endpoint;
+ u32 i;
+
+ csi2dev->id = of_alias_get_id(node, "csi");
+
+ csi2dev->vchannel = of_property_read_bool(node, "virtual-channel");
+
+ node = of_graph_get_next_endpoint(node, NULL);
+ if (!node) {
+ dev_err(dev, "No port node at %s\n", node->full_name);
+ return -EINVAL;
+ }
+
+ /* Get port node */
+ memset(&endpoint, 0x0, sizeof(endpoint));
+ v4l2_fwnode_endpoint_parse(of_fwnode_handle(node), &endpoint);
+
+ csi2dev->num_lanes = endpoint.bus.mipi_csi2.num_data_lanes;
+ for (i = 0; i < 4; i++)
+ csi2dev->data_lanes[i] = endpoint.bus.mipi_csi2.data_lanes[i];
+
+ of_node_put(node);
+ return 0;
+}
+
+static int mipi_csi2_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem_res;
+ struct mxc_mipi_csi2_dev *csi2dev;
+ int ret = -ENOMEM;
+
+ csi2dev = devm_kzalloc(dev, sizeof(*csi2dev), GFP_KERNEL);
+ if (!csi2dev)
+ return -ENOMEM;
+
+ csi2dev->pdev = pdev;
+ mutex_init(&csi2dev->lock);
+
+ ret = mipi_csi2_parse_dt(csi2dev);
+ if (ret < 0)
+ return ret;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ csi2dev->base_regs = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(csi2dev->base_regs)) {
+ dev_err(dev, "Failed to get mipi csi2 HC register\n");
+ return PTR_ERR(csi2dev->base_regs);
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ csi2dev->csr_regs = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(csi2dev->csr_regs)) {
+ dev_err(dev, "Failed to get mipi CSR register\n");
+ return PTR_ERR(csi2dev->csr_regs);
+ }
+
+ ret = mipi_csi2_clk_init(csi2dev);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_csi2_attach_pd(csi2dev);
+ if (ret < 0)
+ return ret;
+
+ v4l2_subdev_init(&csi2dev->sd, &mipi_csi2_subdev_ops);
+
+ csi2dev->sd.owner = THIS_MODULE;
+ snprintf(csi2dev->sd.name, sizeof(csi2dev->sd.name), "%s.%d",
+ MXC_MIPI_CSI2_SUBDEV_NAME, csi2dev->id);
+
+ csi2dev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ csi2dev->sd.entity.function = MEDIA_ENT_F_IO_V4L;
+ csi2dev->sd.dev = dev;
+
+ csi2dev->pads[MXC_MIPI_CSI2_VC0_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ csi2dev->pads[MXC_MIPI_CSI2_VC1_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ csi2dev->pads[MXC_MIPI_CSI2_VC2_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ csi2dev->pads[MXC_MIPI_CSI2_VC3_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ csi2dev->pads[MXC_MIPI_CSI2_VC0_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ csi2dev->pads[MXC_MIPI_CSI2_VC1_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ csi2dev->pads[MXC_MIPI_CSI2_VC2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ csi2dev->pads[MXC_MIPI_CSI2_VC3_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+ ret = media_entity_pads_init(&csi2dev->sd.entity,
+ MXC_MIPI_CSI2_VCX_PADS_NUM, csi2dev->pads);
+ if (ret < 0)
+ goto e_clkdis;
+
+ csi2dev->sd.entity.ops = &mipi_csi2_sd_media_ops;
+
+ v4l2_set_subdevdata(&csi2dev->sd, pdev);
+ platform_set_drvdata(pdev, csi2dev);
+
+ mipi_sc_fw_init(csi2dev, 1);
+
+ csi2dev->running = 0;
+ pm_runtime_enable(dev);
+
+ dev_info(&pdev->dev, "lanes: %d, name: %s\n",
+ csi2dev->num_lanes, csi2dev->sd.name);
+
+ return 0;
+
+e_clkdis:
+ media_entity_cleanup(&csi2dev->sd.entity);
+ return ret;
+}
+
+static int mipi_csi2_remove(struct platform_device *pdev)
+{
+ struct v4l2_subdev *sd = platform_get_drvdata(pdev);
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+
+ mipi_sc_fw_init(csi2dev, 0);
+ mipi_csi2_detach_pd(csi2dev);
+ media_entity_cleanup(&csi2dev->sd.entity);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int mipi_csi2_pm_suspend(struct device *dev)
+{
+ struct v4l2_subdev *sd = dev_get_drvdata(dev);
+ struct mxc_mipi_csi2_dev *csi2dev = sd_to_mxc_mipi_csi2_dev(sd);
+
+ if (csi2dev->running > 0) {
+ dev_warn(dev, "running, prevent entering suspend.\n");
+ return -EAGAIN;
+ }
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int mipi_csi2_pm_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+
+static int mipi_csi2_runtime_suspend(struct device *dev)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = dev_get_drvdata(dev);
+
+ mipi_csi2_clk_disable(csi2dev);
+ return 0;
+}
+
+static int mipi_csi2_runtime_resume(struct device *dev)
+{
+ struct mxc_mipi_csi2_dev *csi2dev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = mipi_csi2_clk_enable(csi2dev);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static const struct dev_pm_ops mipi_csi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mipi_csi2_pm_suspend, mipi_csi2_pm_resume)
+ SET_RUNTIME_PM_OPS(mipi_csi2_runtime_suspend, mipi_csi2_runtime_resume, NULL)
+};
+
+static const struct of_device_id mipi_csi2_of_match[] = {
+ { .compatible = "fsl,mxc-mipi-csi2", },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, mipi_csi2_of_match);
+
+static struct platform_driver mipi_csi2_driver = {
+ .driver = {
+ .name = MXC_MIPI_CSI2_DRIVER_NAME,
+ .of_match_table = mipi_csi2_of_match,
+ .pm = &mipi_csi_pm_ops,
+ },
+ .probe = mipi_csi2_probe,
+ .remove = mipi_csi2_remove,
+};
+
+module_platform_driver(mipi_csi2_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC MIPI CSI2 driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MXC_MIPI_CSI2_DRIVER_NAME);
diff --git a/drivers/staging/media/imx/imx8-parallel-csi.c b/drivers/staging/media/imx/imx8-parallel-csi.c
new file mode 100644
index 000000000000..408082a29192
--- /dev/null
+++ b/drivers/staging/media/imx/imx8-parallel-csi.c
@@ -0,0 +1,837 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * V4L2 Capture CSI Subdev for Freescale i.MX8QM/QXP SOC
+ *
+ * Copyright (c) 2019 NXP Semiconductor
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/videodev2.h>
+#include <media/v4l2-fwnode.h>
+#include <media/v4l2-subdev.h>
+#include <media/v4l2-device.h>
+#include <linux/firmware/imx/sci.h>
+#include <dt-bindings/pinctrl/pads-imx8qxp.h>
+#include <linux/init.h>
+#include <linux/pm_domain.h>
+
+#include "imx8-common.h"
+
+#define MXC_PARALLEL_CSI_DRIVER_NAME "mxc-parallel-csi"
+#define MXC_PARALLEL_CSI_SUBDEV_NAME MXC_PARALLEL_CSI_DRIVER_NAME
+
+#define BIT_U(nr) (1U << (nr))
+#define CI_PI_BASE_OFFSET 0x0U
+
+/* CI_PI INTERFACE Control */
+#define IF_CTRL_REG (CI_PI_BASE_OFFSET + 0x00)
+#define IF_CTRL_REG_PL_ENABLE BIT_U(0)
+#define IF_CTRL_REG_PL_VALID BIT_U(1)
+#define IF_CTRL_REG_PL_ADDR(x) (((x) & 0x7U) << 2)
+#define IF_CTRL_REG_IF_FORCE(x) (((x) & 0x7U) << 5)
+#define IF_CTRL_REG_DATA_TYPE_SEL BIT_U(8)
+#define IF_CTRL_REG_DATA_TYPE(x) (((x) & 0x1FU) << 9)
+
+#define DATA_TYPE_OUT_NULL (0x00)
+#define DATA_TYPE_OUT_RGB (0x04)
+#define DATA_TYPE_OUT_YUV444 (0x08)
+#define DATA_TYPE_OUT_YYU420_ODD (0x10)
+#define DATA_TYPE_OUT_YYU420_EVEN (0x12)
+#define DATA_TYPE_OUT_YYY_ODD (0x18)
+#define DATA_TYPE_OUT_UYVY_EVEN (0x1A)
+#define DATA_TYPE_OUT_RAW (0x1C)
+
+#define IF_CTRL_REG_IF_FORCE_HSYNV_OVERRIDE 0x4
+#define IF_CTRL_REG_IF_FORCE_VSYNV_OVERRIDE 0x2
+#define IF_CTRL_REG_IF_FORCE_DATA_ENABLE_OVERRIDE 0x1
+
+#define IF_CTRL_REG_SET (CI_PI_BASE_OFFSET + 0x04)
+#define IF_CTRL_REG_CLR (CI_PI_BASE_OFFSET + 0x08)
+#define IF_CTRL_REG_TOG (CI_PI_BASE_OFFSET + 0x0C)
+
+/* CSI INTERFACE CONTROL REG */
+#define CSI_CTRL_REG (CI_PI_BASE_OFFSET + 0x10)
+#define CSI_CTRL_REG_CSI_EN BIT_U(0)
+#define CSI_CTRL_REG_PIXEL_CLK_POL BIT_U(1)
+#define CSI_CTRL_REG_HSYNC_POL BIT_U(2)
+#define CSI_CTRL_REG_VSYNC_POL BIT_U(3)
+#define CSI_CTRL_REG_DE_POL BIT_U(4)
+#define CSI_CTRL_REG_PIXEL_DATA_POL BIT_U(5)
+#define CSI_CTRL_REG_CCIR_EXT_VSYNC_EN BIT_U(6)
+#define CSI_CTRL_REG_CCIR_EN BIT_U(7)
+#define CSI_CTRL_REG_CCIR_VIDEO_MODE BIT_U(8)
+#define CSI_CTRL_REG_CCIR_NTSC_EN BIT_U(9)
+#define CSI_CTRL_REG_CCIR_VSYNC_RESET_EN BIT_U(10)
+#define CSI_CTRL_REG_CCIR_ECC_ERR_CORRECT_EN BIT_U(11)
+#define CSI_CTRL_REG_HSYNC_FORCE_EN BIT_U(12)
+#define CSI_CTRL_REG_VSYNC_FORCE_EN BIT_U(13)
+#define CSI_CTRL_REG_GCLK_MODE_EN BIT_U(14)
+#define CSI_CTRL_REG_VALID_SEL BIT_U(15)
+#define CSI_CTRL_REG_RAW_OUT_SEL BIT_U(16)
+#define CSI_CTRL_REG_HSYNC_OUT_SEL BIT_U(17)
+#define CSI_CTRL_REG_HSYNC_PULSE(x) (((x) & 0x7U) << 19)
+#define CSI_CTRL_REG_UV_SWAP_EN BIT_U(22)
+#define CSI_CTRL_REG_DATA_TYPE_IN(x) (((x) & 0xFU) << 23)
+#define CSI_CTRL_REG_MASK_VSYNC_COUNTER(x) (((x) & 0x3U) << 27)
+#define CSI_CTRL_REG_SOFTRST BIT_U(31)
+
+#define DATA_TYPE_IN_UYVY_BT656_8BITS 0x0
+#define DATA_TYPE_IN_UYVY_BT656_10BITS 0x1
+#define DATA_TYPE_IN_RGB_8BITS 0x2
+#define DATA_TYPE_IN_BGR_8BITS 0x3
+#define DATA_TYPE_IN_RGB_24BITS 0x4
+#define DATA_TYPE_IN_YVYU_8BITS 0x5
+#define DATA_TYPE_IN_YUV_8BITS 0x6
+#define DATA_TYPE_IN_YVYU_16BITS 0x7
+#define DATA_TYPE_IN_YUV_24BITS 0x8
+#define DATA_TYPE_IN_BAYER_8BITS 0x9
+#define DATA_TYPE_IN_BAYER_10BITS 0xA
+#define DATA_TYPE_IN_BAYER_12BITS 0xB
+#define DATA_TYPE_IN_BAYER_16BITS 0xC
+
+#define CSI_CTRL_REG_SET (CI_PI_BASE_OFFSET + 0x14)
+#define CSI_CTRL_REG_CLR (CI_PI_BASE_OFFSET + 0x18)
+#define CSI_CTRL_REG_TOG (CI_PI_BASE_OFFSET + 0x1C)
+
+/* CSI interface Status */
+#define CSI_STATUS (CI_PI_BASE_OFFSET + 0x20)
+#define CSI_STATUS_FIELD_TOGGLE BIT_U(0)
+#define CSI_STATUS_ECC_ERROR BIT_U(1)
+
+#define CSI_STATUS_SET (CI_PI_BASE_OFFSET + 0x24)
+#define CSI_STATUS_CLR (CI_PI_BASE_OFFSET + 0x28)
+#define CSI_STATUS_TOG (CI_PI_BASE_OFFSET + 0x2C)
+
+/* CSI INTERFACE CONTROL REG1 */
+#define CSI_CTRL_REG1 (CI_PI_BASE_OFFSET + 0x30)
+#define CSI_CTRL_REG1_PIXEL_WIDTH(v) (((v) & 0xFFFFU) << 0)
+#define CSI_CTRL_REG1_VSYNC_PULSE(v) (((v) & 0xFFFFU) << 16)
+
+#define CSI_CTRL_REG1_SET (CI_PI_BASE_OFFSET + 0x34)
+#define CSI_CTRL_REG1_CLR (CI_PI_BASE_OFFSET + 0x38)
+#define CSI_CTRL_REG1_TOG (CI_PI_BASE_OFFSET + 0x3C)
+
+enum {
+ PI_MODE_INIT,
+ PI_GATE_CLOCK_MODE,
+ PI_CCIR_MODE,
+};
+struct mxc_parallel_csi_dev {
+ struct v4l2_subdev sd;
+ struct v4l2_device v4l2_dev;
+ struct v4l2_subdev *sensor_sd;
+
+ struct media_pad pads[MXC_PARALLEL_CSI_PADS_NUM];
+
+ void __iomem *csr_regs;
+ void __iomem *lpcg_regs;
+ struct platform_device *pdev;
+ u32 flags;
+ int irq;
+
+ struct clk *clk_ipg;
+ struct clk *clk_pixel;
+ bool clk_enable;
+
+ struct v4l2_async_subdev asd;
+ struct v4l2_async_notifier subdev_notifier;
+ struct v4l2_async_subdev *async_subdevs[2];
+ struct v4l2_mbus_framefmt format;
+
+ struct device *pd_pi;
+ struct device *pd_isi;
+ struct device_link *pd_pi_link;
+ struct device_link *pd_isi_link;
+
+ struct mutex lock;
+
+ u8 running;
+ u8 mode;
+ u8 uv_swap;
+ u8 tvdec;
+};
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug level (0-2)");
+
+static int format;
+module_param(format, int, 0644);
+MODULE_PARM_DESC(format, "Format level (0-2)");
+
+#ifdef DEBUG
+static void mxc_pcsi_regs_dump(struct mxc_parallel_csi_dev *pcsidev)
+{
+ struct device *dev = &pcsidev->pdev->dev;
+ struct {
+ u32 offset;
+ const char *const name[32];
+ } registers[] = {
+ { 0x00, "HW_IF_CTRL_REG" },
+ { 0x10, "HW_CSI_CTRL_REG" },
+ { 0x20, "HW_CSI_STATUS" },
+ { 0x30, "HW_CSI_CTRL_REG1" },
+ };
+ u32 i;
+
+ for (i = 0; i < ARRAY_SIZE(registers); i++) {
+ u32 reg = readl(pcsidev->csr_regs + registers[i].offset);
+ dev_dbg(dev, "%20s[0x%.2x]: 0x%.8x\n",
+ registers[i].name, registers[i].offset, reg);
+ }
+}
+#else
+static void mxc_pcsi_regs_dump(struct mxc_parallel_csi_dev *pcsidev) { }
+#endif
+
+static struct mxc_parallel_csi_dev *sd_to_mxc_pcsi_dev(struct v4l2_subdev *sdev)
+{
+ return container_of(sdev, struct mxc_parallel_csi_dev, sd);
+}
+
+static int mxc_pcsi_clk_get(struct mxc_parallel_csi_dev *pcsidev)
+{
+ struct device *dev = &pcsidev->pdev->dev;
+
+ pcsidev->clk_pixel = devm_clk_get(dev, "pixel");
+ if (IS_ERR(pcsidev->clk_pixel)) {
+ dev_info(dev, "failed to get parallel csi pixel clk\n");
+ return PTR_ERR(pcsidev->clk_pixel);
+ }
+
+ pcsidev->clk_ipg = devm_clk_get(dev, "ipg");
+ if (IS_ERR(pcsidev->clk_ipg)) {
+ dev_info(dev, "failed to get parallel ipg pixel clk\n");
+ return PTR_ERR(pcsidev->clk_ipg);
+ }
+
+ return 0;
+}
+
+static int mxc_pcsi_attach_pd(struct mxc_parallel_csi_dev *pcsidev)
+{
+ struct device *dev = &pcsidev->pdev->dev;
+ struct device_link *link;
+
+ pcsidev->pd_pi = dev_pm_domain_attach_by_name(dev, "pd_pi");
+ if (IS_ERR(pcsidev->pd_pi)) {
+ if (PTR_ERR(pcsidev->pd_pi) != -EPROBE_DEFER) {
+ dev_err(dev, "attach pd_pi domain for pi fail\n");
+ return PTR_ERR(pcsidev->pd_pi);
+ } else {
+ return PTR_ERR(pcsidev->pd_pi);
+ }
+ }
+ link = device_link_add(dev, pcsidev->pd_pi,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+ pcsidev->pd_pi_link = link;
+
+ pcsidev->pd_isi = dev_pm_domain_attach_by_name(dev, "pd_isi_ch0");
+ if (IS_ERR(pcsidev->pd_isi)) {
+ if (PTR_ERR(pcsidev->pd_isi) != -EPROBE_DEFER) {
+ dev_err(dev, "attach pd_isi_ch0 domain for pi fail\n");
+ return PTR_ERR(pcsidev->pd_isi);
+ } else {
+ return PTR_ERR(pcsidev->pd_isi);
+ }
+ }
+ link = device_link_add(dev, pcsidev->pd_isi,
+ DL_FLAG_STATELESS |
+ DL_FLAG_PM_RUNTIME);
+ if (IS_ERR(link))
+ return PTR_ERR(link);
+ pcsidev->pd_isi_link = link;
+
+ return 0;
+}
+
+static void mxc_pcsi_detach_pd(struct mxc_parallel_csi_dev *pcsidev)
+{
+ device_link_del(pcsidev->pd_pi_link);
+ device_link_del(pcsidev->pd_isi_link);
+ dev_pm_domain_detach(pcsidev->pd_pi, true);
+ dev_pm_domain_detach(pcsidev->pd_isi, true);
+}
+
+static int mxc_pcsi_clk_enable(struct mxc_parallel_csi_dev *pcsidev)
+{
+ struct device *dev = &pcsidev->pdev->dev;
+ int ret;
+
+ if (pcsidev->clk_enable)
+ return 0;
+
+ ret = clk_prepare_enable(pcsidev->clk_pixel);
+ if (ret < 0) {
+ dev_info(dev, "enable pixel clk error (%d)\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(pcsidev->clk_ipg);
+ if (ret < 0) {
+ dev_info(dev, "enable ipg clk error (%d)\n", ret);
+ return ret;
+ }
+ pcsidev->clk_enable = true;
+
+ return 0;
+}
+
+static void mxc_pcsi_clk_disable(struct mxc_parallel_csi_dev *pcsidev)
+{
+ if (!pcsidev->clk_enable)
+ return;
+
+ clk_disable_unprepare(pcsidev->clk_pixel);
+ clk_disable_unprepare(pcsidev->clk_ipg);
+
+ pcsidev->clk_enable = false;
+}
+
+static void mxc_pcsi_sw_reset(struct mxc_parallel_csi_dev *pcsidev)
+{
+ u32 val;
+
+ /* Softwaret Reset */
+ val = CSI_CTRL_REG_SOFTRST;
+ writel(val, pcsidev->csr_regs + CSI_CTRL_REG_SET);
+
+ msleep(1);
+ writel(val, pcsidev->csr_regs + CSI_CTRL_REG_CLR);
+}
+
+static void mxc_pcsi_csr_config(struct mxc_parallel_csi_dev *pcsidev)
+{
+ u32 val;
+
+ /* Software Reset */
+ mxc_pcsi_sw_reset(pcsidev);
+
+ /* Config PL Data Type */
+ val = IF_CTRL_REG_DATA_TYPE(DATA_TYPE_OUT_YUV444);
+ writel(val, pcsidev->csr_regs + IF_CTRL_REG_SET);
+
+ /* Enable sync Force */
+ val = (CSI_CTRL_REG_HSYNC_FORCE_EN | CSI_CTRL_REG_VSYNC_FORCE_EN);
+ writel(val, pcsidev->csr_regs + CSI_CTRL_REG_SET);
+
+ /* Enable Pixel Link */
+ val = IF_CTRL_REG_PL_ENABLE;
+ writel(val, pcsidev->csr_regs + IF_CTRL_REG_SET);
+
+ /* Enable Pixel Link */
+ val = IF_CTRL_REG_PL_VALID;
+ writel(val, pcsidev->csr_regs + IF_CTRL_REG_SET);
+
+ /* Config CTRL REG */
+ val = readl(pcsidev->csr_regs + CSI_CTRL_REG);
+ val |= (CSI_CTRL_REG_DATA_TYPE_IN(DATA_TYPE_IN_UYVY_BT656_8BITS) |
+ CSI_CTRL_REG_HSYNC_POL |
+ CSI_CTRL_REG_MASK_VSYNC_COUNTER(3) |
+ CSI_CTRL_REG_HSYNC_PULSE(2));
+
+ if (pcsidev->uv_swap)
+ val |= CSI_CTRL_REG_UV_SWAP_EN;
+
+ if (pcsidev->mode & PI_GATE_CLOCK_MODE) {
+ val |= CSI_CTRL_REG_GCLK_MODE_EN;
+ } else if (pcsidev->mode & PI_CCIR_MODE) {
+ val |= (CSI_CTRL_REG_CCIR_EN |
+ CSI_CTRL_REG_CCIR_VSYNC_RESET_EN |
+ CSI_CTRL_REG_CCIR_EXT_VSYNC_EN |
+ CSI_CTRL_REG_CCIR_ECC_ERR_CORRECT_EN);
+ }
+
+ writel(val, pcsidev->csr_regs + CSI_CTRL_REG);
+}
+
+static void mxc_pcsi_config_ctrl_reg1(struct mxc_parallel_csi_dev *pcsidev)
+{
+ struct device *dev = &pcsidev->pdev->dev;
+ u32 val;
+
+ if (pcsidev->format.width <= 0 || pcsidev->format.height <= 0) {
+ dev_dbg(dev, "%s width/height invalid\n", __func__);
+ return;
+ }
+
+ /* Config Pixel Width */
+ val = (CSI_CTRL_REG1_PIXEL_WIDTH(pcsidev->format.width - 1) |
+ CSI_CTRL_REG1_VSYNC_PULSE(pcsidev->format.width << 1));
+ writel(val, pcsidev->csr_regs + CSI_CTRL_REG1);
+}
+
+static void mxc_pcsi_enable_csi(struct mxc_parallel_csi_dev *pcsidev)
+{
+ u32 val;
+
+ /* Enable CSI */
+ val = CSI_CTRL_REG_CSI_EN;
+ writel(val, pcsidev->csr_regs + CSI_CTRL_REG_SET);
+
+ /* Disable SYNC Force */
+ val = (CSI_CTRL_REG_HSYNC_FORCE_EN | CSI_CTRL_REG_VSYNC_FORCE_EN);
+ writel(val, pcsidev->csr_regs + CSI_CTRL_REG_CLR);
+}
+
+static void mxc_pcsi_disable_csi(struct mxc_parallel_csi_dev *pcsidev)
+{
+ u32 val;
+
+ /* Enable Sync Force */
+ val = (CSI_CTRL_REG_HSYNC_FORCE_EN | CSI_CTRL_REG_VSYNC_FORCE_EN);
+ writel(val, pcsidev->csr_regs + CSI_CTRL_REG_SET);
+
+ /* Disable CSI */
+ val = CSI_CTRL_REG_CSI_EN;
+ writel(val, pcsidev->csr_regs + CSI_CTRL_REG_CLR);
+
+ /* Disable Pixel Link */
+ val = IF_CTRL_REG_PL_VALID | IF_CTRL_REG_PL_ENABLE;
+ writel(val, pcsidev->csr_regs + IF_CTRL_REG_CLR);
+}
+
+static struct media_pad *
+mxc_pcsi_get_remote_sensor_pad(struct mxc_parallel_csi_dev *pcsidev)
+{
+ struct v4l2_subdev *subdev = &pcsidev->sd;
+ struct media_pad *sink_pad, *source_pad;
+ int i;
+
+ while (1) {
+ source_pad = NULL;
+ for (i = 0; i < subdev->entity.num_pads; i++) {
+ sink_pad = &subdev->entity.pads[i];
+
+ if (sink_pad->flags & MEDIA_PAD_FL_SINK) {
+ source_pad = media_entity_remote_pad(sink_pad);
+ if (source_pad)
+ break;
+ }
+ }
+ /* return first pad point in the loop */
+ return source_pad;
+ }
+
+ if (i == subdev->entity.num_pads)
+ v4l2_err(&pcsidev->v4l2_dev,
+ "%s, No remote pad found!\n", __func__);
+
+ return NULL;
+}
+
+static struct v4l2_subdev *mxc_get_remote_subdev(struct mxc_parallel_csi_dev *pcsidev,
+ const char * const label)
+{
+ struct media_pad *source_pad;
+ struct v4l2_subdev *sen_sd;
+
+ /* Get remote source pad */
+ source_pad = mxc_pcsi_get_remote_sensor_pad(pcsidev);
+ if (!source_pad) {
+ v4l2_err(&pcsidev->sd, "%s, No remote pad found!\n", label);
+ return NULL;
+ }
+
+ /* Get remote source pad subdev */
+ sen_sd = media_entity_to_v4l2_subdev(source_pad->entity);
+ if (!sen_sd) {
+ v4l2_err(&pcsidev->sd, "%s, No remote subdev found!\n", label);
+ return NULL;
+ }
+
+ return sen_sd;
+}
+
+static int mxc_pcsi_get_sensor_fmt(struct mxc_parallel_csi_dev *pcsidev)
+{
+ struct v4l2_mbus_framefmt *mf = &pcsidev->format;
+ struct v4l2_subdev *sen_sd;
+ struct media_pad *source_pad;
+ struct v4l2_subdev_format src_fmt;
+ int ret;
+
+ /* Get remote source pad */
+ source_pad = mxc_pcsi_get_remote_sensor_pad(pcsidev);
+ if (!source_pad) {
+ v4l2_err(&pcsidev->v4l2_dev, "%s, No remote pad found!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Get remote source pad subdev */
+ sen_sd = media_entity_to_v4l2_subdev(source_pad->entity);
+ if (!sen_sd) {
+ v4l2_err(&pcsidev->v4l2_dev, "%s, No remote subdev found!\n", __func__);
+ return -EINVAL;
+ }
+
+ src_fmt.pad = source_pad->index;
+ src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+ ret = v4l2_subdev_call(sen_sd, pad, get_fmt, NULL, &src_fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return -EINVAL;
+
+ /* Update input frame size and formate */
+ memcpy(mf, &src_fmt.format, sizeof(struct v4l2_mbus_framefmt));
+
+ if (mf->code == MEDIA_BUS_FMT_YUYV8_2X8 ||
+ mf->code == MEDIA_BUS_FMT_UYVY8_2X8)
+ pcsidev->uv_swap = 1;
+
+ dev_dbg(&pcsidev->pdev->dev,
+ "width=%d, height=%d, fmt.code=0x%x\n",
+ mf->width, mf->height, mf->code);
+
+ return 0;
+}
+
+static int mxc_pcsi_enum_framesizes(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_size_enum *fse)
+{
+ struct mxc_parallel_csi_dev *pcsidev = sd_to_mxc_pcsi_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(pcsidev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, pad, enum_frame_size, NULL, fse);
+}
+
+static int mxc_pcsi_enum_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_frame_interval_enum *fie)
+{
+ struct mxc_parallel_csi_dev *pcsidev = sd_to_mxc_pcsi_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(pcsidev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, pad, enum_frame_interval, NULL, fie);
+}
+
+static int mxc_pcsi_get_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mxc_parallel_csi_dev *pcsidev = sd_to_mxc_pcsi_dev(sd);
+ struct v4l2_mbus_framefmt *mf = &fmt->format;
+
+ mxc_pcsi_get_sensor_fmt(pcsidev);
+
+ memcpy(mf, &pcsidev->format, sizeof(struct v4l2_mbus_framefmt));
+ /* Source/Sink pads crop rectangle size */
+
+ return 0;
+}
+
+static int mxc_pcsi_set_fmt(struct v4l2_subdev *sd,
+ struct v4l2_subdev_pad_config *cfg,
+ struct v4l2_subdev_format *fmt)
+{
+ struct mxc_parallel_csi_dev *pcsidev = sd_to_mxc_pcsi_dev(sd);
+ struct v4l2_subdev *sen_sd;
+ struct media_pad *source_pad;
+ int ret;
+
+ /* Get remote source pad */
+ source_pad = mxc_pcsi_get_remote_sensor_pad(pcsidev);
+ if (!source_pad) {
+ v4l2_err(&pcsidev->v4l2_dev, "%s, No remote pad found!\n", __func__);
+ return -EINVAL;
+ }
+
+ /* Get remote source pad subdev */
+ sen_sd = media_entity_to_v4l2_subdev(source_pad->entity);
+ if (!sen_sd) {
+ v4l2_err(&pcsidev->v4l2_dev, "%s, No remote subdev found!\n", __func__);
+ return -EINVAL;
+ }
+
+ fmt->pad = source_pad->index;
+ ret = v4l2_subdev_call(sen_sd, pad, set_fmt, NULL, fmt);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ return 0;
+}
+
+static int mxc_pcsi_s_power(struct v4l2_subdev *sd, int on)
+{
+ struct mxc_parallel_csi_dev *pcsidev = sd_to_mxc_pcsi_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(pcsidev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, core, s_power, on);
+}
+
+static int mxc_pcsi_g_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct mxc_parallel_csi_dev *pcsidev = sd_to_mxc_pcsi_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(pcsidev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, video, g_frame_interval, interval);
+}
+
+static int mxc_pcsi_s_frame_interval(struct v4l2_subdev *sd,
+ struct v4l2_subdev_frame_interval *interval)
+{
+ struct mxc_parallel_csi_dev *pcsidev = sd_to_mxc_pcsi_dev(sd);
+ struct v4l2_subdev *sen_sd;
+
+ sen_sd = mxc_get_remote_subdev(pcsidev, __func__);
+ if (!sen_sd)
+ return -EINVAL;
+
+ return v4l2_subdev_call(sen_sd, video, s_frame_interval, interval);
+}
+
+static int mxc_pcsi_s_stream(struct v4l2_subdev *sd, int enable)
+{
+ struct mxc_parallel_csi_dev *pcsidev = sd_to_mxc_pcsi_dev(sd);
+ struct device *dev = &pcsidev->pdev->dev;
+
+ dev_dbg(dev, "%s: enable = %d\n", __func__, enable);
+
+ if (enable) {
+ pm_runtime_get_sync(dev);
+ if (!pcsidev->running) {
+ mxc_pcsi_get_sensor_fmt(pcsidev);
+ mxc_pcsi_csr_config(pcsidev);
+ mxc_pcsi_config_ctrl_reg1(pcsidev);
+ mxc_pcsi_enable_csi(pcsidev);
+ mxc_pcsi_regs_dump(pcsidev);
+ }
+ pcsidev->running++;
+ } else {
+ if (pcsidev->running)
+ mxc_pcsi_disable_csi(pcsidev);
+ pcsidev->running--;
+ pm_runtime_put(dev);
+ }
+
+ return 0;
+}
+
+static int mxc_pcsi_link_setup(struct media_entity *entity,
+ const struct media_pad *local,
+ const struct media_pad *remote, u32 flags)
+{
+ struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity);
+ struct platform_device *pdev = v4l2_get_subdevdata(sd);
+
+ if (local->flags & MEDIA_PAD_FL_SOURCE) {
+ switch (local->index) {
+ case MXC_PARALLEL_CSI_PAD_SOURCE:
+ break;
+ default:
+ dev_err(&pdev->dev, "%s invalid source pad\n", __func__);
+ return -EINVAL;
+ }
+ } else if (local->flags & MEDIA_PAD_FL_SINK) {
+ switch (local->index) {
+ case MXC_PARALLEL_CSI_PAD_SINK:
+ break;
+ default:
+ dev_err(&pdev->dev, "%s invalid sink pad\n", __func__);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static struct v4l2_subdev_pad_ops pcsi_pad_ops = {
+ .enum_frame_size = mxc_pcsi_enum_framesizes,
+ .enum_frame_interval = mxc_pcsi_enum_frame_interval,
+ .get_fmt = mxc_pcsi_get_fmt,
+ .set_fmt = mxc_pcsi_set_fmt,
+};
+
+static struct v4l2_subdev_core_ops pcsi_core_ops = {
+ .s_power = mxc_pcsi_s_power,
+};
+
+static struct v4l2_subdev_video_ops pcsi_video_ops = {
+ .g_frame_interval = mxc_pcsi_g_frame_interval,
+ .s_frame_interval = mxc_pcsi_s_frame_interval,
+ .s_stream = mxc_pcsi_s_stream,
+};
+
+static struct v4l2_subdev_ops pcsi_subdev_ops = {
+ .core = &pcsi_core_ops,
+ .video = &pcsi_video_ops,
+ .pad = &pcsi_pad_ops,
+};
+
+static const struct media_entity_operations mxc_pcsi_sd_media_ops = {
+ .link_setup = mxc_pcsi_link_setup,
+};
+
+static int mxc_parallel_csi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct resource *mem_res;
+ struct mxc_parallel_csi_dev *pcsidev;
+ int ret;
+
+ pcsidev = devm_kzalloc(dev, sizeof(*pcsidev), GFP_KERNEL);
+ if (!pcsidev)
+ return -ENOMEM;
+
+ pcsidev->pdev = pdev;
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pcsidev->csr_regs = devm_ioremap_resource(dev, mem_res);
+ if (IS_ERR(pcsidev->csr_regs)) {
+ dev_dbg(dev, "Failed to get parallel CSI CSR register\n");
+ return PTR_ERR(pcsidev->csr_regs);
+ }
+
+ ret = mxc_pcsi_clk_get(pcsidev);
+ if (ret < 0)
+ return ret;
+
+ ret = mxc_pcsi_attach_pd(pcsidev);
+ if (ret < 0)
+ return ret;
+
+ v4l2_subdev_init(&pcsidev->sd, &pcsi_subdev_ops);
+
+ pcsidev->mode = PI_GATE_CLOCK_MODE;
+
+ pcsidev->sd.owner = THIS_MODULE;
+ sprintf(pcsidev->sd.name, "%s", MXC_PARALLEL_CSI_SUBDEV_NAME);
+
+ pcsidev->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
+ pcsidev->sd.entity.function = MEDIA_ENT_F_IO_V4L;
+
+ pcsidev->sd.dev = dev;
+
+ pcsidev->pads[MXC_PARALLEL_CSI_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
+ pcsidev->pads[MXC_PARALLEL_CSI_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
+
+ ret = media_entity_pads_init(&pcsidev->sd.entity,
+ MXC_PARALLEL_CSI_PADS_NUM,
+ pcsidev->pads);
+ if (ret < 0)
+ goto e_clkdis;
+
+ pcsidev->sd.entity.ops = &mxc_pcsi_sd_media_ops;
+
+ v4l2_set_subdevdata(&pcsidev->sd, pdev);
+ platform_set_drvdata(pdev, pcsidev);
+
+ pcsidev->running = 0;
+ pm_runtime_enable(dev);
+
+ dev_info(dev, "%s probe successfully\n", __func__);
+ return 0;
+
+e_clkdis:
+ media_entity_cleanup(&pcsidev->sd.entity);
+ return ret;
+}
+
+static int mxc_parallel_csi_remove(struct platform_device *pdev)
+{
+ struct mxc_parallel_csi_dev *pcsidev =
+ (struct mxc_parallel_csi_dev *)platform_get_drvdata(pdev);
+
+ mxc_pcsi_detach_pd(pcsidev);
+ media_entity_cleanup(&pcsidev->sd.entity);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static int parallel_csi_pm_suspend(struct device *dev)
+{
+ return pm_runtime_force_suspend(dev);
+}
+
+static int parallel_csi_pm_resume(struct device *dev)
+{
+ return pm_runtime_force_resume(dev);
+}
+
+static int parallel_csi_runtime_suspend(struct device *dev)
+{
+ struct mxc_parallel_csi_dev *pcsidev = dev_get_drvdata(dev);
+
+ mxc_pcsi_clk_disable(pcsidev);
+
+ return 0;
+}
+
+static int parallel_csi_runtime_resume(struct device *dev)
+{
+ struct mxc_parallel_csi_dev *pcsidev = dev_get_drvdata(dev);
+ int ret;
+
+ ret = mxc_pcsi_clk_enable(pcsidev);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct dev_pm_ops parallel_csi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(parallel_csi_pm_suspend, parallel_csi_pm_resume)
+ SET_RUNTIME_PM_OPS(parallel_csi_runtime_suspend,
+ parallel_csi_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id parallel_csi_of_match[] = {
+ { .compatible = "fsl,mxc-parallel-csi",},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, parallel_csi_of_match);
+
+static struct platform_driver parallel_csi_driver = {
+ .driver = {
+ .name = MXC_PARALLEL_CSI_DRIVER_NAME,
+ .of_match_table = parallel_csi_of_match,
+ .pm = &parallel_csi_pm_ops,
+ },
+ .probe = mxc_parallel_csi_probe,
+ .remove = mxc_parallel_csi_remove,
+};
+
+module_platform_driver(parallel_csi_driver);
+
+MODULE_AUTHOR("Freescale Semiconductor, Inc.");
+MODULE_DESCRIPTION("MXC PARALLEL CSI driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" MXC_PARALLEL_CSI_DRIVER_NAME);