summaryrefslogtreecommitdiff
path: root/drivers/soc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc')
-rw-r--r--drivers/soc/fsl/Kconfig19
-rw-r--r--drivers/soc/fsl/Makefile2
-rw-r--r--drivers/soc/fsl/dpio/dpio-cmd.h3
-rw-r--r--drivers/soc/fsl/dpio/dpio-driver.c18
-rw-r--r--drivers/soc/fsl/dpio/dpio-service.c234
-rw-r--r--drivers/soc/fsl/dpio/dpio.c1
-rw-r--r--drivers/soc/fsl/dpio/dpio.h2
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.c227
-rw-r--r--drivers/soc/fsl/dpio/qbman-portal.h19
-rw-r--r--drivers/soc/fsl/qbman/bman_ccsr.c11
-rw-r--r--drivers/soc/fsl/qbman/bman_portal.c11
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.c185
-rw-r--r--drivers/soc/fsl/qbman/dpaa_sys.h5
-rw-r--r--drivers/soc/fsl/qbman/qman_ccsr.c31
-rw-r--r--drivers/soc/fsl/qbman/qman_portal.c17
-rw-r--r--drivers/soc/fsl/qixis_ctrl.c105
-rw-r--r--drivers/soc/fsl/sleep_fsm.c279
-rw-r--r--drivers/soc/fsl/sleep_fsm.h130
-rw-r--r--drivers/soc/imx/Kconfig67
-rw-r--r--drivers/soc/imx/Makefile8
-rw-r--r--drivers/soc/imx/busfreq-imx8mq.c670
-rw-r--r--drivers/soc/imx/gpc.c34
-rw-r--r--drivers/soc/imx/gpcv2.c637
-rw-r--r--drivers/soc/imx/imx8m_pm_domains.c243
-rw-r--r--drivers/soc/imx/imx8ulp_lpm.c276
-rw-r--r--drivers/soc/imx/imx93-blk-ctrl.c458
-rw-r--r--drivers/soc/imx/imx93-pd.c277
-rw-r--r--drivers/soc/imx/mu/Kconfig4
-rw-r--r--drivers/soc/imx/mu/Makefile1
-rw-r--r--drivers/soc/imx/mu/mx8_mu.c195
-rw-r--r--drivers/soc/imx/rpmsg_life_cycle.c197
-rw-r--r--drivers/soc/imx/secvio/Makefile3
-rw-r--r--drivers/soc/imx/secvio/imx-secvio-audit.c31
-rw-r--r--drivers/soc/imx/secvio/imx-secvio-debugfs.c283
-rw-r--r--drivers/soc/imx/secvio/imx-secvio-sc-int.h83
-rw-r--r--drivers/soc/imx/secvio/imx-secvio-sc.c675
-rw-r--r--drivers/soc/imx/soc-imx.c5
-rw-r--r--drivers/soc/imx/soc-imx8m.c64
-rw-r--r--drivers/soc/imx/soc-imx9.c123
39 files changed, 5023 insertions, 610 deletions
diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig
index c5d46152d468..f760e03a2887 100644
--- a/drivers/soc/fsl/Kconfig
+++ b/drivers/soc/fsl/Kconfig
@@ -24,6 +24,7 @@ config FSL_MC_DPIO
tristate "QorIQ DPAA2 DPIO driver"
depends on FSL_MC_BUS
select SOC_BUS
+ select DIMLIB
select FSL_GUTS
help
Driver for the DPAA2 DPIO object. A DPIO provides queue and
@@ -32,6 +33,13 @@ config FSL_MC_DPIO
objects individually, but groups them under a service layer
API.
+config FSL_SLEEP_FSM
+ bool
+ help
+ This driver configures a hardware FSM (Finite State Machine) for deep sleep.
+ The FSM is used to finish clean-ups at the last stage of system entering deep
+ sleep, and also wakes up system when a wake up event happens.
+
config DPAA2_CONSOLE
tristate "QorIQ DPAA2 console driver"
depends on OF && (ARCH_LAYERSCAPE || COMPILE_TEST)
@@ -51,4 +59,15 @@ config FSL_RCPM
tasks associated with power management, such as wakeup source control.
Note that currently this driver will not support PowerPC based
QorIQ processor.
+
+config FSL_QIXIS
+ tristate "QIXIS system controller driver"
+ depends on OF
+ select REGMAP_I2C
+ select REGMAP_MMIO
+ default n
+ help
+ Say y here to enable QIXIS system controller api. The qixis driver
+ provides FPGA functions to control system.
+
endmenu
diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile
index 906f1cd8af01..4a0ee1bc7037 100644
--- a/drivers/soc/fsl/Makefile
+++ b/drivers/soc/fsl/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_FSL_DPAA) += qbman/
obj-$(CONFIG_QUICC_ENGINE) += qe/
obj-$(CONFIG_CPM) += qe/
obj-$(CONFIG_FSL_RCPM) += rcpm.o
+obj-$(CONFIG_FSL_QIXIS) += qixis_ctrl.o
obj-$(CONFIG_FSL_GUTS) += guts.o
obj-$(CONFIG_FSL_MC_DPIO) += dpio/
obj-$(CONFIG_DPAA2_CONSOLE) += dpaa2-console.o
+obj-$(CONFIG_FSL_SLEEP_FSM) += sleep_fsm.o
diff --git a/drivers/soc/fsl/dpio/dpio-cmd.h b/drivers/soc/fsl/dpio/dpio-cmd.h
index e13fd3ac1939..2fbcb78cdaaf 100644
--- a/drivers/soc/fsl/dpio/dpio-cmd.h
+++ b/drivers/soc/fsl/dpio/dpio-cmd.h
@@ -46,6 +46,9 @@ struct dpio_rsp_get_attr {
__le64 qbman_portal_ci_addr;
/* cmd word 3 */
__le32 qbman_version;
+ __le32 pad1;
+ /* cmd word 4 */
+ __le32 clk;
};
struct dpio_stashing_dest {
diff --git a/drivers/soc/fsl/dpio/dpio-driver.c b/drivers/soc/fsl/dpio/dpio-driver.c
index 7f397b4ad878..1e22abd6f0e2 100644
--- a/drivers/soc/fsl/dpio/dpio-driver.c
+++ b/drivers/soc/fsl/dpio/dpio-driver.c
@@ -27,6 +27,11 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Freescale Semiconductor, Inc");
MODULE_DESCRIPTION("DPIO Driver");
+#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
+
+#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NS))
+
+
struct dpio_priv {
struct dpaa2_io *io;
};
@@ -162,6 +167,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
goto err_get_attr;
}
desc.qman_version = dpio_attrs.qbman_version;
+ desc.qman_clk = dpio_attrs.clk;
err = dpio_enable(dpio_dev->mc_io, 0, dpio_dev->mc_handle);
if (err) {
@@ -197,13 +203,11 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
if (dpio_dev->obj_desc.region_count < 3) {
/* No support for DDR backed portals, use classic mapping */
/*
- * Set the CENA regs to be the cache inhibited area of the
- * portal to avoid coherency issues if a user migrates to
- * another core.
+ * Set the CENA regs to be the cache enabled area of the portal to
+ * achieve the best performance.
*/
- desc.regs_cena = devm_memremap(dev, dpio_dev->regions[1].start,
- resource_size(&dpio_dev->regions[1]),
- MEMREMAP_WC);
+ desc.regs_cena = ioremap_cache_ns(dpio_dev->regions[0].start,
+ resource_size(&dpio_dev->regions[0]));
} else {
desc.regs_cena = devm_memremap(dev, dpio_dev->regions[2].start,
resource_size(&dpio_dev->regions[2]),
@@ -211,7 +215,7 @@ static int dpaa2_dpio_probe(struct fsl_mc_device *dpio_dev)
}
if (IS_ERR(desc.regs_cena)) {
- dev_err(dev, "devm_memremap failed\n");
+ dev_err(dev, "ioremap_cache_ns failed\n");
err = PTR_ERR(desc.regs_cena);
goto err_allocate_irqs;
}
diff --git a/drivers/soc/fsl/dpio/dpio-service.c b/drivers/soc/fsl/dpio/dpio-service.c
index 779c319a4b82..14197069ca1f 100644
--- a/drivers/soc/fsl/dpio/dpio-service.c
+++ b/drivers/soc/fsl/dpio/dpio-service.c
@@ -12,6 +12,7 @@
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
+#include <linux/dim.h>
#include <linux/slab.h>
#include "dpio.h"
@@ -28,6 +29,14 @@ struct dpaa2_io {
spinlock_t lock_notifications;
struct list_head notifications;
struct device *dev;
+
+ /* Net DIM */
+ struct dim rx_dim;
+ /* protect against concurrent Net DIM updates */
+ spinlock_t dim_lock;
+ u16 event_ctr;
+ u64 bytes;
+ u64 frames;
};
struct dpaa2_io_store {
@@ -100,6 +109,17 @@ struct dpaa2_io *dpaa2_io_service_select(int cpu)
}
EXPORT_SYMBOL_GPL(dpaa2_io_service_select);
+static void dpaa2_io_dim_work(struct work_struct *w)
+{
+ struct dim *dim = container_of(w, struct dim, work);
+ struct dim_cq_moder moder =
+ net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
+ struct dpaa2_io *d = container_of(dim, struct dpaa2_io, rx_dim);
+
+ dpaa2_io_set_irq_coalescing(d, moder.usec);
+ dim->state = DIM_START_MEASURE;
+}
+
/**
* dpaa2_io_create() - create a dpaa2_io object.
* @desc: the dpaa2_io descriptor
@@ -114,6 +134,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
struct device *dev)
{
struct dpaa2_io *obj = kmalloc(sizeof(*obj), GFP_KERNEL);
+ u32 qman_256_cycles_per_ns;
if (!obj)
return NULL;
@@ -127,7 +148,15 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
obj->dpio_desc = *desc;
obj->swp_desc.cena_bar = obj->dpio_desc.regs_cena;
obj->swp_desc.cinh_bar = obj->dpio_desc.regs_cinh;
+ obj->swp_desc.qman_clk = obj->dpio_desc.qman_clk;
obj->swp_desc.qman_version = obj->dpio_desc.qman_version;
+
+ /* Compute how many 256 QBMAN cycles fit into one ns. This is because
+ * the interrupt timeout period register needs to be specified in QBMAN
+ * clock cycles in increments of 256.
+ */
+ qman_256_cycles_per_ns = 256000 / (obj->swp_desc.qman_clk / 1000000);
+ obj->swp_desc.qman_256_cycles_per_ns = qman_256_cycles_per_ns;
obj->swp = qbman_swp_init(&obj->swp_desc);
if (!obj->swp) {
@@ -138,6 +167,7 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
INIT_LIST_HEAD(&obj->node);
spin_lock_init(&obj->lock_mgmt_cmd);
spin_lock_init(&obj->lock_notifications);
+ spin_lock_init(&obj->dim_lock);
INIT_LIST_HEAD(&obj->notifications);
/* For now only enable DQRR interrupts */
@@ -155,6 +185,12 @@ struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc,
obj->dev = dev;
+ memset(&obj->rx_dim, 0, sizeof(obj->rx_dim));
+ INIT_WORK(&obj->rx_dim.work, dpaa2_io_dim_work);
+ obj->event_ctr = 0;
+ obj->bytes = 0;
+ obj->frames = 0;
+
return obj;
}
@@ -194,6 +230,8 @@ irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj)
struct qbman_swp *swp;
u32 status;
+ obj->event_ctr++;
+
swp = obj->swp;
status = qbman_swp_interrupt_read_status(swp);
if (!status)
@@ -462,7 +500,7 @@ int dpaa2_io_service_enqueue_multiple_fq(struct dpaa2_io *d,
qbman_eq_desc_set_no_orp(&ed, 0);
qbman_eq_desc_set_fq(&ed, fqid);
- return qbman_swp_enqueue_multiple(d->swp, &ed, fd, 0, nb);
+ return qbman_swp_enqueue_multiple(d->swp, &ed, fd, NULL, nb);
}
EXPORT_SYMBOL(dpaa2_io_service_enqueue_multiple_fq);
@@ -779,3 +817,197 @@ int dpaa2_io_query_bp_count(struct dpaa2_io *d, u16 bpid, u32 *num)
return 0;
}
EXPORT_SYMBOL_GPL(dpaa2_io_query_bp_count);
+
+/**
+ * dpaa2_io_service_enqueue_orp_fq() - Enqueue a frame to a frame queue with
+ * order restoration
+ * @d: the given DPIO service.
+ * @fqid: the given frame queue id.
+ * @fd: the frame descriptor which is enqueued.
+ * @orpid: the order restoration point ID
+ * @seqnum: the order sequence number
+ * @last: must be set for the final frame if seqnum is shared (spilt frame)
+ *
+ * Performs an enqueue to a frame queue using the specified order restoration
+ * point. The QMan device will ensure the order of frames placed on the
+ * queue will be ordered as per the sequence number.
+ *
+ * In the case a frame is split it is possible to enqueue using the same
+ * sequence number more than once. The final frame in a shared sequence number
+ * most be indicated by setting last = 1. For non shared sequence numbers
+ * last = 1 must always be set.
+ *
+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_orp_fq(struct dpaa2_io *d, u32 fqid,
+ const struct dpaa2_fd *fd, u16 orpid,
+ u16 seqnum, int last)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_orp(&ed, 0, orpid, seqnum, !last);
+ qbman_eq_desc_set_fq(&ed, fqid);
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL(dpaa2_io_service_enqueue_orp_fq);
+
+/**
+ * dpaa2_io_service_enqueue_orp_qd() - Enqueue a frame to a queueing destination
+ * with order restoration
+ * @d: the given DPIO service.
+ * @qdid: the given queuing destination id.
+ * @fd: the frame descriptor which is enqueued.
+ * @orpid: the order restoration point ID
+ * @seqnum: the order sequence number
+ * @last: must be set for the final frame if seqnum is shared (spilt frame)
+ *
+ * Performs an enqueue to a frame queue using the specified order restoration
+ * point. The QMan device will ensure the order of frames placed on the
+ * queue will be ordered as per the sequence number.
+ *
+ * In the case a frame is split it is possible to enqueue using the same
+ * sequence number more than once. The final frame in a shared sequence number
+ * most be indicated by setting last = 1. For non shared sequence numbers
+ * last = 1 must always be set.
+ *
+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_enqueue_orp_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
+ u16 qdbin, const struct dpaa2_fd *fd,
+ u16 orpid, u16 seqnum, int last)
+{
+ struct qbman_eq_desc ed;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_orp(&ed, 0, orpid, seqnum, !last);
+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio);
+ return qbman_swp_enqueue(d->swp, &ed, fd);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_enqueue_orp_qd);
+
+/**
+ * dpaa2_io_service_orp_seqnum_drop() - Remove a sequence number from
+ * an order restoration list
+ * @d: the given DPIO service.
+ * @orpid: Order restoration point to remove a sequence number from
+ * @seqnum: Sequence number to remove
+ *
+ * Removes a frames sequence number from an order restoration point without
+ * enqueing the frame. Used to indicate that the order restoration hardware
+ * should not expect to see this sequence number. Typically used to indicate
+ * a frame was terminated or dropped from a flow.
+ *
+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready,
+ * or -ENODEV if there is no dpio service.
+ */
+int dpaa2_io_service_orp_seqnum_drop(struct dpaa2_io *d, u16 orpid, u16 seqnum)
+{
+ struct qbman_eq_desc ed;
+ struct dpaa2_fd fd;
+ unsigned long irqflags;
+ int ret;
+
+ d = service_select(d);
+ if (!d)
+ return -ENODEV;
+
+ if ((d->swp->desc->qman_version & QMAN_REV_MASK) >= QMAN_REV_5000) {
+ spin_lock_irqsave(&d->lock_mgmt_cmd, irqflags);
+ ret = qbman_orp_drop(d->swp, orpid, seqnum);
+ spin_unlock_irqrestore(&d->lock_mgmt_cmd, irqflags);
+ return ret;
+ }
+
+ qbman_eq_desc_clear(&ed);
+ qbman_eq_desc_set_orp_hole(&ed, orpid, seqnum);
+ return qbman_swp_enqueue(d->swp, &ed, &fd);
+}
+EXPORT_SYMBOL_GPL(dpaa2_io_service_orp_seqnum_drop);
+
+/**
+ * dpaa2_io_set_irq_coalescing() - Set new IRQ coalescing values
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int dpaa2_io_set_irq_coalescing(struct dpaa2_io *d, u32 irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ return qbman_swp_set_irq_coalescing(swp, swp->dqrr.dqrr_size - 1,
+ irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_set_irq_coalescing);
+
+/**
+ * dpaa2_io_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @d: the given DPIO object
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void dpaa2_io_get_irq_coalescing(struct dpaa2_io *d, u32 *irq_holdoff)
+{
+ struct qbman_swp *swp = d->swp;
+
+ qbman_swp_get_irq_coalescing(swp, NULL, irq_holdoff);
+}
+EXPORT_SYMBOL(dpaa2_io_get_irq_coalescing);
+
+/**
+ * dpaa2_io_set_adaptive_coalescing() - Enable/disable adaptive coalescing
+ * @d: the given DPIO object
+ * @use_adaptive_rx_coalesce: adaptive coalescing state
+ */
+void dpaa2_io_set_adaptive_coalescing(struct dpaa2_io *d,
+ int use_adaptive_rx_coalesce)
+{
+ d->swp->use_adaptive_rx_coalesce = use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_set_adaptive_coalescing);
+
+/**
+ * dpaa2_io_get_adaptive_coalescing() - Query adaptive coalescing state
+ * @d: the given DPIO object
+ *
+ * Return 1 when adaptive coalescing is enabled on the DPIO object and 0
+ * otherwise.
+ */
+int dpaa2_io_get_adaptive_coalescing(struct dpaa2_io *d)
+{
+ return d->swp->use_adaptive_rx_coalesce;
+}
+EXPORT_SYMBOL(dpaa2_io_get_adaptive_coalescing);
+
+/**
+ * dpaa2_io_update_net_dim() - Update Net DIM
+ * @d: the given DPIO object
+ * @frames: how many frames have been dequeued by the user since the last call
+ * @bytes: how many bytes have been dequeued by the user since the last call
+ */
+void dpaa2_io_update_net_dim(struct dpaa2_io *d, __u64 frames, __u64 bytes)
+{
+ struct dim_sample dim_sample = {};
+
+ if (!d->swp->use_adaptive_rx_coalesce)
+ return;
+
+ spin_lock(&d->dim_lock);
+
+ d->bytes += bytes;
+ d->frames += frames;
+
+ dim_update_sample(d->event_ctr, d->frames, d->bytes, &dim_sample);
+ net_dim(&d->rx_dim, dim_sample);
+
+ spin_unlock(&d->dim_lock);
+}
+EXPORT_SYMBOL(dpaa2_io_update_net_dim);
diff --git a/drivers/soc/fsl/dpio/dpio.c b/drivers/soc/fsl/dpio/dpio.c
index af74c597a675..8ed606ffaac5 100644
--- a/drivers/soc/fsl/dpio/dpio.c
+++ b/drivers/soc/fsl/dpio/dpio.c
@@ -162,6 +162,7 @@ int dpio_get_attributes(struct fsl_mc_io *mc_io,
attr->qbman_portal_ci_offset =
le64_to_cpu(dpio_rsp->qbman_portal_ci_addr);
attr->qbman_version = le32_to_cpu(dpio_rsp->qbman_version);
+ attr->clk = le32_to_cpu(dpio_rsp->clk);
return 0;
}
diff --git a/drivers/soc/fsl/dpio/dpio.h b/drivers/soc/fsl/dpio/dpio.h
index da06f7258098..7fda44f0d7f4 100644
--- a/drivers/soc/fsl/dpio/dpio.h
+++ b/drivers/soc/fsl/dpio/dpio.h
@@ -59,6 +59,7 @@ int dpio_disable(struct fsl_mc_io *mc_io,
* @num_priorities: Number of priorities for the notification channel (1-8);
* relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL'
* @qbman_version: QBMAN version
+ * @clk: QBMAN clock frequency value in Hz
*/
struct dpio_attr {
int id;
@@ -68,6 +69,7 @@ struct dpio_attr {
enum dpio_channel_mode channel_mode;
u8 num_priorities;
u32 qbman_version;
+ u32 clk;
};
int dpio_get_attributes(struct fsl_mc_io *mc_io,
diff --git a/drivers/soc/fsl/dpio/qbman-portal.c b/drivers/soc/fsl/dpio/qbman-portal.c
index 3ec8ab08b988..74de9f4aa1e5 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.c
+++ b/drivers/soc/fsl/dpio/qbman-portal.c
@@ -19,6 +19,7 @@
/* QBMan portal management command codes */
#define QBMAN_MC_ACQUIRE 0x30
#define QBMAN_WQCHAN_CONFIGURE 0x46
+#define QBMAN_MC_ORP 0x63
/* CINH register offsets */
#define QBMAN_CINH_SWP_EQCR_PI 0x800
@@ -29,6 +30,7 @@
#define QBMAN_CINH_SWP_EQCR_AM_RT 0x980
#define QBMAN_CINH_SWP_RCR_AM_RT 0x9c0
#define QBMAN_CINH_SWP_DQPI 0xa00
+#define QBMAN_CINH_SWP_DQRR_ITR 0xa80
#define QBMAN_CINH_SWP_DCAP 0xac0
#define QBMAN_CINH_SWP_SDQCR 0xb00
#define QBMAN_CINH_SWP_EQCR_AM_RT2 0xb40
@@ -38,6 +40,7 @@
#define QBMAN_CINH_SWP_IER 0xe40
#define QBMAN_CINH_SWP_ISDR 0xe80
#define QBMAN_CINH_SWP_IIR 0xec0
+#define QBMAN_CINH_SWP_ITPR 0xf40
/* CENA register offsets */
#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((u32)(n) << 6))
@@ -169,6 +172,14 @@ int (*qbman_swp_release_ptr)(struct qbman_swp *s,
unsigned int num_buffers)
= qbman_swp_release_direct;
+#define dccvac(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); }
+#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); }
+static inline void qbman_inval_prefetch(struct qbman_swp *p, u32 offset)
+{
+ dcivac(p->addr_cena + offset);
+ prefetch(p->addr_cena + offset);
+}
+
/* Portal Access */
static inline u32 qbman_read_register(struct qbman_swp *p, u32 offset)
@@ -282,7 +293,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
- 1, /* Writes Non-cacheable */
+ 0, /* Writes Non-cacheable */
0, /* EQCR_CI stashing threshold */
3, /* RPM: RCR in array mode */
2, /* DCM: Discrete consumption ack */
@@ -291,12 +302,12 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
1, /* mem stashing priority enable */
1, /* mem stashing enable */
1, /* dequeue stashing priority enable */
- 0, /* dequeue stashing enable enable */
+ 1, /* dequeue stashing enable enable */
0); /* EQCR_CI stashing priority enable */
} else {
memset(p->addr_cena, 0, 64 * 1024);
reg = qbman_set_swp_cfg(p->dqrr.dqrr_size,
- 1, /* Writes Non-cacheable */
+ 0, /* Writes Non-cacheable */
1, /* EQCR_CI stashing threshold */
3, /* RPM: RCR in array mode */
2, /* DCM: Discrete consumption ack */
@@ -305,7 +316,7 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
1, /* mem stashing priority enable */
1, /* mem stashing enable */
1, /* dequeue stashing priority enable */
- 0, /* dequeue stashing enable */
+ 1, /* dequeue stashing enable */
0); /* EQCR_CI stashing priority enable */
reg |= 1 << SWP_CFG_CPBS_SHIFT | /* memory-backed mode */
1 << SWP_CFG_VPM_SHIFT | /* VDQCR read triggered mode */
@@ -355,6 +366,9 @@ struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d)
& p->eqcr.pi_ci_mask;
p->eqcr.available = p->eqcr.pi_ring_size;
+ /* Initialize the software portal with a irq timeout period of 0us */
+ qbman_swp_set_irq_coalescing(p, p->dqrr.dqrr_size - 1, 0);
+
return p;
}
@@ -459,6 +473,7 @@ void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, u8 cmd_verb)
if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
dma_wmb();
*v = cmd_verb | p->mc.valid_bit;
+ dccvac(cmd);
} else {
*v = cmd_verb | p->mc.valid_bit;
dma_wmb();
@@ -475,6 +490,7 @@ void *qbman_swp_mc_result(struct qbman_swp *p)
u32 *ret, verb;
if ((p->desc->qman_version & QMAN_REV_MASK) < QMAN_REV_5000) {
+ qbman_inval_prefetch(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
ret = qbman_get_cmd(p, QBMAN_CENA_SWP_RR(p->mc.valid_bit));
/* Remove the valid-bit - command completed if the rest
* is non-zero.
@@ -534,6 +550,43 @@ void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success)
d->verb |= enqueue_rejects_to_fq;
}
+/**
+ * qbman_eq_desc_set_orp() - Set order-restoration in the enqueue descriptor
+ * @d: the enqueue descriptor.
+ * @response_success: 1 = enqueue with response always; 0 = enqueue with
+ * rejections returned on a FQ.
+ * @oprid: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ * @incomplete: indicates whether this is the last fragments using the same
+ * sequence number.
+ */
+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
+ u16 oprid, u16 seqnum, int incomplete)
+{
+ d->verb |= (1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT);
+ if (respond_success)
+ d->verb |= enqueue_response_always;
+ else
+ d->verb |= enqueue_rejects_to_fq;
+ d->orpid = cpu_to_le16(oprid);
+ d->seqnum = cpu_to_le16((!!incomplete << 14) | seqnum);
+}
+
+/**
+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence
+ * without any enqueue
+ * @d: the enqueue descriptor.
+ * @oprid: the order point record id.
+ * @seqnum: the order restoration sequence number.
+ */
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, u16 oprid,
+ u16 seqnum)
+{
+ d->verb |= (1 << QB_ENQUEUE_CMD_ORP_ENABLE_SHIFT) | enqueue_empty;
+ d->orpid = cpu_to_le16(oprid);
+ d->seqnum = cpu_to_le16(seqnum);
+}
+
/*
* Exactly one of the following descriptor "targets" should be set. (Calling any
* one of these will replace the effect of any prior call to one of these.)
@@ -647,6 +700,7 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
const uint32_t *cl = (uint32_t *)d;
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
+ uint64_t addr_cena;
spin_lock(&s->access_spinlock);
half_mask = (s->eqcr.pi_ci_mask>>1);
@@ -674,8 +728,8 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ memcpy_toio(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
@@ -688,9 +742,9 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
@@ -701,7 +755,11 @@ int qbman_swp_enqueue_multiple_direct(struct qbman_swp *s,
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi;
for (i = 0; i < num_enqueued; i++)
+ addr_cena = (size_t)s->addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dccvac((addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
eqcr_pi++;
+ }
s->eqcr.pi = eqcr_pi & full_mask;
spin_unlock(&s->access_spinlock);
@@ -738,8 +796,8 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
full_mask = s->eqcr.pi_ci_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
- p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
- s->eqcr.ci = *p & full_mask;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available) {
@@ -756,8 +814,8 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
for (i = 0; i < num_enqueued; i++) {
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ memcpy_toio(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
@@ -768,9 +826,9 @@ int qbman_swp_enqueue_multiple_mem_back(struct qbman_swp *s,
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
p[0] = cl[0] | s->eqcr.pi_vb;
if (flags && (flags[i] & QBMAN_ENQUEUE_FLAG_DCA)) {
- struct qbman_eq_desc *d = (struct qbman_eq_desc *)p;
+ struct qbman_eq_desc *eq_desc = (struct qbman_eq_desc *)p;
- d->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
+ eq_desc->dca = (1 << QB_ENQUEUE_CMD_DCA_EN_SHIFT) |
((flags[i]) & QBMAN_EQCR_DCA_IDXMASK);
}
eqcr_pi++;
@@ -807,6 +865,7 @@ int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
const uint32_t *cl;
uint32_t eqcr_ci, eqcr_pi, half_mask, full_mask;
int i, num_enqueued = 0;
+ uint64_t addr_cena;
half_mask = (s->eqcr.pi_ci_mask>>1);
full_mask = s->eqcr.pi_ci_mask;
@@ -829,8 +888,8 @@ int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ memcpy_toio(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
@@ -850,8 +909,12 @@ int qbman_swp_enqueue_multiple_desc_direct(struct qbman_swp *s,
/* Flush all the cacheline without load/store in between */
eqcr_pi = s->eqcr.pi;
- for (i = 0; i < num_enqueued; i++)
+ addr_cena = (uint64_t)s->addr_cena;
+ for (i = 0; i < num_enqueued; i++) {
+ dccvac((uint64_t *)(addr_cena +
+ QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask)));
eqcr_pi++;
+ }
s->eqcr.pi = eqcr_pi & full_mask;
return num_enqueued;
@@ -882,8 +945,8 @@ int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
full_mask = s->eqcr.pi_ci_mask;
if (!s->eqcr.available) {
eqcr_ci = s->eqcr.ci;
- p = s->addr_cena + QBMAN_CENA_SWP_EQCR_CI_MEMBACK;
- s->eqcr.ci = *p & full_mask;
+ s->eqcr.ci = qbman_read_register(s, QBMAN_CINH_SWP_EQCR_CI);
+ s->eqcr.ci &= full_mask;
s->eqcr.available = qm_cyc_diff(s->eqcr.pi_ring_size,
eqcr_ci, s->eqcr.ci);
if (!s->eqcr.available)
@@ -899,8 +962,8 @@ int qbman_swp_enqueue_multiple_desc_mem_back(struct qbman_swp *s,
p = (s->addr_cena + QBMAN_CENA_SWP_EQCR(eqcr_pi & half_mask));
cl = (uint32_t *)(&d[i]);
/* Skip copying the verb */
- memcpy(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
- memcpy(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
+ memcpy_toio(&p[1], &cl[1], EQ_DESC_SIZE_WITHOUT_FD - 1);
+ memcpy_toio(&p[EQ_DESC_SIZE_FD_START/sizeof(uint32_t)],
&fd[i], sizeof(*fd));
eqcr_pi++;
}
@@ -1114,6 +1177,7 @@ int qbman_swp_pull_direct(struct qbman_swp *s, struct qbman_pull_desc *d)
/* Set the verb byte, have to substitute in the valid-bit */
p->verb = d->verb | s->vdq.valid_bit;
s->vdq.valid_bit ^= QB_VALID_BIT;
+ dccvac(p);
return 0;
}
@@ -1205,8 +1269,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
s->dqrr.next_idx, pi);
s->dqrr.reset_bug = 0;
}
- prefetch(qbman_get_cmd(s,
- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
}
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
@@ -1221,8 +1284,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
* knew from reading PI.
*/
if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
- prefetch(qbman_get_cmd(s,
- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
return NULL;
}
/*
@@ -1245,7 +1307,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_next_direct(struct qbman_swp *s)
(flags & DPAA2_DQ_STAT_EXPIRED))
atomic_inc(&s->vdq.available);
- prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
return p;
}
@@ -1297,8 +1359,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
s->dqrr.next_idx, pi);
s->dqrr.reset_bug = 0;
}
- prefetch(qbman_get_cmd(s,
- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
}
p = qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR_MEM(s->dqrr.next_idx));
@@ -1313,8 +1374,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
* knew from reading PI.
*/
if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) {
- prefetch(qbman_get_cmd(s,
- QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
return NULL;
}
/*
@@ -1337,7 +1397,7 @@ const struct dpaa2_dq *qbman_swp_dqrr_next_mem_back(struct qbman_swp *s)
(flags & DPAA2_DQ_STAT_EXPIRED))
atomic_inc(&s->vdq.available);
- prefetch(qbman_get_cmd(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)));
+ qbman_inval_prefetch(s, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx));
return p;
}
@@ -1472,6 +1532,7 @@ int qbman_swp_release_direct(struct qbman_swp *s,
*/
dma_wmb();
p->verb = d->verb | RAR_VB(rar) | num_buffers;
+ dccvac(p);
return 0;
}
@@ -1793,3 +1854,107 @@ u32 qbman_bp_info_num_free_bufs(struct qbman_bp_query_rslt *a)
{
return le32_to_cpu(a->fill);
}
+
+struct qbman_orp_cmd_desc {
+ u8 verb;
+ u8 reserved;
+ u8 cid;
+ u8 reserved2;
+ u16 orpid;
+ u16 seqnum;
+ u8 reserved3[56];
+};
+
+struct qbman_orp_cmd_rslt {
+ u8 verb;
+ u8 rslt;
+ u8 cid;
+ u8 reserved1[61];
+};
+
+int qbman_orp_drop(struct qbman_swp *s, u16 orpid, u16 seqnum)
+{
+ struct qbman_orp_cmd_desc *p;
+ struct qbman_orp_cmd_rslt *r;
+ void *resp;
+
+ p = (struct qbman_orp_cmd_desc *)qbman_swp_mc_start(s);
+ if (!p)
+ return -EBUSY;
+
+ p->cid = 0x7;
+ p->orpid = cpu_to_le16(orpid);
+ p->seqnum = cpu_to_le16(seqnum);
+
+ resp = qbman_swp_mc_complete(s, p, QBMAN_MC_ORP);
+ if (!resp) {
+ pr_err("qbman: Drop sequence num %d orpid 0x%x failed, no response\n",
+ seqnum, orpid);
+ return -EIO;
+ }
+ r = (struct qbman_orp_cmd_rslt *)resp;
+ /* Decode the outcome */
+ WARN_ON((r->verb & QBMAN_RESPONSE_VERB_MASK) != QBMAN_MC_ORP);
+
+ /* Determine success or failure */
+ if (r->rslt != QBMAN_MC_RSLT_OK) {
+ pr_err("Drop seqnum %d of prpid 0x%x failed, code=0x%02x\n",
+ seqnum, orpid, r->rslt);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * qbman_swp_set_irq_coalescing() - Set new IRQ coalescing values
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ *
+ * Return 0 for success, or negative error code on error.
+ */
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff)
+{
+ u32 itp, max_holdoff;
+
+ /* Convert irq_holdoff value from usecs to 256 QBMAN clock cycles
+ * increments. This depends on the QBMAN internal frequency.
+ */
+ itp = (irq_holdoff * 1000) / p->desc->qman_256_cycles_per_ns;
+ if (itp > 4096) {
+ max_holdoff = (p->desc->qman_256_cycles_per_ns * 4096) / 1000;
+ pr_err("irq_holdoff must be <= %uus\n", max_holdoff);
+ return -EINVAL;
+ }
+
+ if (irq_threshold >= p->dqrr.dqrr_size) {
+ pr_err("irq_threshold must be < %u\n", p->dqrr.dqrr_size - 1);
+ return -EINVAL;
+ }
+
+ p->irq_threshold = irq_threshold;
+ p->irq_holdoff = irq_holdoff;
+
+ qbman_write_register(p, QBMAN_CINH_SWP_DQRR_ITR, irq_threshold);
+ qbman_write_register(p, QBMAN_CINH_SWP_ITPR, itp);
+
+ return 0;
+}
+
+/**
+ * qbman_swp_get_irq_coalescing() - Get the current IRQ coalescing parameters
+ * @p: the software portal object
+ * @irq_threshold: interrupt threshold (an IRQ is generated when there are more
+ * DQRR entries in the portal than the threshold)
+ * @irq_holdoff: interrupt holdoff (timeout) period in us
+ */
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff)
+{
+ if (irq_threshold)
+ *irq_threshold = p->irq_threshold;
+ if (irq_holdoff)
+ *irq_holdoff = p->irq_holdoff;
+}
diff --git a/drivers/soc/fsl/dpio/qbman-portal.h b/drivers/soc/fsl/dpio/qbman-portal.h
index c7c2225b7d91..e40b3fa58f40 100644
--- a/drivers/soc/fsl/dpio/qbman-portal.h
+++ b/drivers/soc/fsl/dpio/qbman-portal.h
@@ -24,6 +24,8 @@ struct qbman_swp_desc {
void *cena_bar; /* Cache-enabled portal base address */
void __iomem *cinh_bar; /* Cache-inhibited portal base address */
u32 qman_version;
+ u32 qman_clk;
+ u32 qman_256_cycles_per_ns;
};
#define QBMAN_SWP_INTERRUPT_EQRI 0x01
@@ -156,6 +158,11 @@ struct qbman_swp {
} eqcr;
spinlock_t access_spinlock;
+
+ /* Interrupt coalescing */
+ u32 irq_threshold;
+ u32 irq_holdoff;
+ int use_adaptive_rx_coalesce;
};
/* Function pointers */
@@ -215,11 +222,15 @@ int qbman_result_has_new_result(struct qbman_swp *p, const struct dpaa2_dq *dq);
void qbman_eq_desc_clear(struct qbman_eq_desc *d);
void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success);
+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success,
+ u16 oprid, u16 seqnum, int incomplete);
+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, u16 oprid, u16 seqnum);
void qbman_eq_desc_set_token(struct qbman_eq_desc *d, u8 token);
void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, u32 fqid);
void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, u32 qdid,
u32 qd_bin, u32 qd_prio);
+int qbman_orp_drop(struct qbman_swp *s, u16 orpid, u16 seqnum);
void qbman_release_desc_clear(struct qbman_release_desc *d);
void qbman_release_desc_set_bpid(struct qbman_release_desc *d, u16 bpid);
@@ -532,7 +543,7 @@ static inline int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s,
static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd,
u8 cmd_verb)
{
- int loopvar = 2000;
+ int loopvar = 10000;
qbman_swp_mc_submit(swp, cmd, cmd_verb);
@@ -648,4 +659,10 @@ static inline const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s)
return qbman_swp_dqrr_next_ptr(s);
}
+int qbman_swp_set_irq_coalescing(struct qbman_swp *p, u32 irq_threshold,
+ u32 irq_holdoff);
+
+void qbman_swp_get_irq_coalescing(struct qbman_swp *p, u32 *irq_threshold,
+ u32 *irq_holdoff);
+
#endif /* __FSL_QBMAN_PORTAL_H */
diff --git a/drivers/soc/fsl/qbman/bman_ccsr.c b/drivers/soc/fsl/qbman/bman_ccsr.c
index cb24a08be084..0177d4cba4d6 100644
--- a/drivers/soc/fsl/qbman/bman_ccsr.c
+++ b/drivers/soc/fsl/qbman/bman_ccsr.c
@@ -1,4 +1,5 @@
/* Copyright (c) 2009 - 2016 Freescale Semiconductor, Inc.
+ * Copyright 2020 Puresoftware Ltd.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -29,6 +30,7 @@
*/
#include "bman_priv.h"
+#include <linux/acpi.h>
u16 bman_ip_rev;
EXPORT_SYMBOL(bman_ip_rev);
@@ -247,7 +249,8 @@ static int fsl_bman_probe(struct platform_device *pdev)
* try using the of_reserved_mem_device method
*/
if (!fbpr_a) {
- ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz);
+ ret = qbman_init_private_mem(dev, 0, &fbpr_a, &fbpr_sz,
+ DPAA_BMAN_DEV);
if (ret) {
dev_err(dev, "qbman_init_private_mem() failed 0x%x\n",
ret);
@@ -298,6 +301,7 @@ static int fsl_bman_probe(struct platform_device *pdev)
__bman_probed = 1;
+ dev_dbg(dev, "Bman probed successfully [%d]\n", __bman_probed);
return 0;
};
@@ -308,10 +312,15 @@ static const struct of_device_id fsl_bman_ids[] = {
{}
};
+static const struct acpi_device_id fsl_bman_acpi_ids[] = {
+ {"NXP0021", 0}
+};
+
static struct platform_driver fsl_bman_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = fsl_bman_ids,
+ .acpi_match_table = ACPI_PTR(fsl_bman_acpi_ids),
.suppress_bind_attrs = true,
},
.probe = fsl_bman_probe,
diff --git a/drivers/soc/fsl/qbman/bman_portal.c b/drivers/soc/fsl/qbman/bman_portal.c
index acda8a5637c5..7622afbc3ce8 100644
--- a/drivers/soc/fsl/qbman/bman_portal.c
+++ b/drivers/soc/fsl/qbman/bman_portal.c
@@ -1,4 +1,5 @@
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ * Copyright 2020 Puresoftware Ltd.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -29,6 +30,7 @@
*/
#include "bman_priv.h"
+#include <linux/acpi.h>
static struct bman_portal *affine_bportals[NR_CPUS];
static struct cpumask portal_cpus;
@@ -192,7 +194,8 @@ check_cleanup:
}
bman_done_cleanup();
}
-
+ dev_dbg(dev, "Bman : Portal[%d] probed successfully [%d]\n",
+ cpu, __bman_portals_probed);
return 0;
err_portal_init:
@@ -213,10 +216,16 @@ static const struct of_device_id bman_portal_ids[] = {
};
MODULE_DEVICE_TABLE(of, bman_portal_ids);
+static const struct acpi_device_id bman_portal_acpi_ids[] = {
+ {"NXP0023", 0}
+};
+MODULE_DEVICE_TABLE(acpi, bman_portal_acpi_ids);
+
static struct platform_driver bman_portal_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = bman_portal_ids,
+ .acpi_match_table = ACPI_PTR(bman_portal_acpi_ids),
},
.probe = bman_portal_probe,
};
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.c b/drivers/soc/fsl/qbman/dpaa_sys.c
index 9dd8bb571dbc..7bc1d78e74bc 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.c
+++ b/drivers/soc/fsl/qbman/dpaa_sys.c
@@ -1,4 +1,5 @@
/* Copyright 2017 NXP Semiconductor, Inc.
+ * Copyright 2020 Puresoftware Ltd.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,32 +29,129 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/acpi.h>
+#include <linux/dma-map-ops.h>
#include <linux/dma-mapping.h>
#include "dpaa_sys.h"
+/* QMan needs global memory areas initialized at boot time */
+static dma_addr_t qman_base_addr;
+
/*
* Initialize a devices private memory region
*/
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
- size_t *size)
+ size_t *size, int dev_id)
{
- struct device_node *mem_node;
+ struct property_entry properties[2];
+ struct device_node *mem_node = NULL;
+ struct reserved_mem fw_mem;
struct reserved_mem *rmem;
struct property *prop;
- int len, err;
__be32 *res_array;
+ u32 qbman_vals[4];
+ u32 *pr_value;
+ int len, err;
+ int val_cnt;
+ u32 val[2];
- mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
- if (!mem_node) {
- dev_err(dev, "No memory-region found for index %d\n", idx);
- return -ENODEV;
- }
+ if (is_of_node(dev->fwnode)) {
+ mem_node = of_parse_phandle(dev->of_node, "memory-region", idx);
+ if (!mem_node) {
+ dev_err(dev, "No memory-region found for index %d\n",
+ idx);
+ return -ENODEV;
+ }
+
+ rmem = of_reserved_mem_lookup(mem_node);
+ if (!rmem) {
+ dev_err(dev, "of_reserved_mem_lookup() returned NULL\n");
+ return -ENODEV;
+ }
+ } else {
+ /*
+ * Fetching reserved memory size from scanning ACPI tables.
+ * As part of DPAA architecture, QMAN & BMAN h/w nodes need
+ * a large contiguous memory allocations to store private
+ * data while the data path is running.
+ * We will have to request CMA for each h/w node so that
+ * drivers can fetch and set up h/w in order while probing.
+ */
+ struct page *page = NULL;
+ size_t page_sz_count = 0;
+ unsigned long pool_size_order = 0;
+
+ switch (dev_id) {
+ case DPAA_BMAN_DEV:
+ val_cnt = 1;
+ break;
+ case DPAA_QMAN_DEV:
+ val_cnt = 2;
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ err = fwnode_property_read_u32_array(dev->fwnode,
+ "size", val,
+ val_cnt);
+ if (err < 0)
+ return err;
- rmem = of_reserved_mem_lookup(mem_node);
- if (!rmem) {
- dev_err(dev, "of_reserved_mem_lookup() returned NULL\n");
- return -ENODEV;
+ fw_mem.size = val[idx];
+
+ if (dev_id == DPAA_BMAN_DEV) {
+ /* In case of Bman, calculate page count and order.
+ * Try allocating this 16MB chunk in one go.
+ */
+ page_sz_count = ((fw_mem.size >> PAGE_SHIFT) +
+ ((fw_mem.size & 0xFFF) ? 1 : 0));
+ pool_size_order = get_order(fw_mem.size);
+ } else {
+ if (!idx) {
+ /* In case of Qman, allocate 48 MB -
+ * (8MB + 8MB + 32MB), ideally we need
+ * (8MB + 32MB). Here extra 8MB is just to set
+ * the correct alignment order.
+ */
+ fw_mem.size = ((2 * val[idx]) + val[idx + 1]);
+ page_sz_count = ((fw_mem.size >> PAGE_SHIFT) +
+ ((fw_mem.size & 0xFFF) ? 1 : 0));
+ pool_size_order = get_order(fw_mem.size);
+ /* Once large chunk(48MB) is available then
+ * reset the actual size 8MB for h/w node on
+ * index 0
+ */
+ fw_mem.size = val[idx];
+ } else {
+ /* From the large chunk of 48MB, slice it
+ * at base_address + 16MB, to get the aligned
+ * 32MB chunk.
+ */
+ fw_mem.base =
+ (qman_base_addr + (2 * val[idx - 1]));
+ fw_mem.size = val[idx];
+ }
+ }
+ if (!qman_base_addr) {
+ page = dma_alloc_from_contiguous(dev, page_sz_count,
+ pool_size_order,
+ false);
+ if (!page) {
+ pr_info("dma_alloc_from_contiguous failed.\n");
+ return -ENOMEM;
+ }
+ fw_mem.base = page_to_phys(page);
+ if (dev_id == DPAA_QMAN_DEV)
+ qman_base_addr = fw_mem.base;
+ }
+ /* Set the resource buffer */
+ rmem = &fw_mem;
+
+ dev_info(dev, "QBman : dev [%d] index [%d] mem-base [%llx] size [%llx]\n",
+ dev_id, idx, rmem->base, rmem->size);
}
+
*addr = rmem->base;
*size = rmem->size;
@@ -63,26 +161,49 @@ int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
* This is needed because QBMan HW does not allow the base address/
* size to be modified once set.
*/
- prop = of_find_property(mem_node, "reg", &len);
- if (!prop) {
- prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return -ENOMEM;
- prop->value = res_array = devm_kzalloc(dev, sizeof(__be32) * 4,
- GFP_KERNEL);
- if (!prop->value)
- return -ENOMEM;
- res_array[0] = cpu_to_be32(upper_32_bits(*addr));
- res_array[1] = cpu_to_be32(lower_32_bits(*addr));
- res_array[2] = cpu_to_be32(upper_32_bits(*size));
- res_array[3] = cpu_to_be32(lower_32_bits(*size));
- prop->length = sizeof(__be32) * 4;
- prop->name = devm_kstrdup(dev, "reg", GFP_KERNEL);
- if (!prop->name)
- return -ENOMEM;
- err = of_add_property(mem_node, prop);
- if (err)
- return err;
+ if (is_of_node(dev->fwnode)) {
+ prop = of_find_property(mem_node, "reg", &len);
+ if (!prop) {
+ prop = devm_kzalloc(dev, sizeof(*prop), GFP_KERNEL);
+ if (!prop)
+ return -ENOMEM;
+ prop->value = devm_kzalloc(dev, sizeof(__be32) * 4,
+ GFP_KERNEL);
+ if (!prop->value)
+ return -ENOMEM;
+ res_array = prop->value;
+ res_array[0] = cpu_to_be32(upper_32_bits(*addr));
+ res_array[1] = cpu_to_be32(lower_32_bits(*addr));
+ res_array[2] = cpu_to_be32(upper_32_bits(*size));
+ res_array[3] = cpu_to_be32(lower_32_bits(*size));
+ prop->length = sizeof(__be32) * 4;
+ prop->name = devm_kstrdup(dev, "reg", GFP_KERNEL);
+ if (!prop->name)
+ return -ENOMEM;
+ err = of_add_property(mem_node, prop);
+ if (err)
+ return err;
+ }
+ } else {
+ if (!device_property_present(dev, "reg")) {
+ /* Fill properties here */
+ pr_value = devm_kzalloc(dev, sizeof(u32) * 4,
+ GFP_KERNEL);
+ pr_value[0] = upper_32_bits(*addr);
+ pr_value[1] = lower_32_bits(*addr);
+ pr_value[2] = upper_32_bits(*size);
+ pr_value[3] = lower_32_bits(*size);
+
+ qbman_vals[0] = pr_value[0];
+ qbman_vals[1] = pr_value[1];
+ qbman_vals[2] = pr_value[2];
+ qbman_vals[3] = pr_value[3];
+
+ properties[0] =
+ PROPERTY_ENTRY_U32_ARRAY("reg", qbman_vals);
+
+ device_create_managed_software_node(dev, properties, NULL);
+ }
}
return 0;
diff --git a/drivers/soc/fsl/qbman/dpaa_sys.h b/drivers/soc/fsl/qbman/dpaa_sys.h
index ae8afa552b1e..8c1614f80935 100644
--- a/drivers/soc/fsl/qbman/dpaa_sys.h
+++ b/drivers/soc/fsl/qbman/dpaa_sys.h
@@ -47,6 +47,9 @@
#include <linux/io.h>
#include <linux/delay.h>
+#define DPAA_QMAN_DEV 0
+#define DPAA_BMAN_DEV 1
+
/* For 2-element tables related to cache-inhibited and cache-enabled mappings */
#define DPAA_PORTAL_CE 0
#define DPAA_PORTAL_CI 1
@@ -102,7 +105,7 @@ static inline u8 dpaa_cyc_diff(u8 ringsize, u8 first, u8 last)
/* Initialize the devices private memory region */
int qbman_init_private_mem(struct device *dev, int idx, dma_addr_t *addr,
- size_t *size);
+ size_t *size, int dev_id);
/* memremap() attributes for different platforms */
#ifdef CONFIG_PPC
diff --git a/drivers/soc/fsl/qbman/qman_ccsr.c b/drivers/soc/fsl/qbman/qman_ccsr.c
index 157659fd033a..e545bcf7bc2e 100644
--- a/drivers/soc/fsl/qbman/qman_ccsr.c
+++ b/drivers/soc/fsl/qbman/qman_ccsr.c
@@ -1,4 +1,5 @@
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ * Copyright 2020 Puresoftware Ltd.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,6 +29,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/acpi.h>
#include "qman_priv.h"
u16 qman_ip_rev;
@@ -252,10 +254,18 @@ static const struct qman_error_info_mdata error_mdata[] = {
{ 0x01FF, 24, "FQD cache tag memory 3" },
{ 0x0FFF, 512, "FQD cache memory" },
{ 0x07FF, 128, "SFDR memory" },
- { 0x01FF, 72, "WQ context memory" },
+ { 0x01FF, 84, "WQ context memory" },
{ 0x00FF, 240, "CGR memory" },
{ 0x00FF, 302, "Internal Order Restoration List memory" },
- { 0x01FF, 256, "SW portal ring memory" },
+ { 0x7FFF, 256, "SW portal ring memory" },
+ { 0x07FF, 181, "CEETM class queue descriptor memory" },
+ { 0x0FFF, 140, "CEETM extended SFDR memory" },
+ { 0x0FFF, 25, "CEETM logical FQ mapping memory" },
+ { 0x0FFF, 96, "CEETM dequeue context memory" },
+ { 0x07FF, 396, "CEETM ccgr memory" },
+ { 0x00FF, 146, "CEETM CQ channel shaping memory" },
+ { 0x007F, 256, "CEETM CQ channel scheduling memory" },
+ { 0x01FF, 88, "CEETM dequeue statistics memory" },
};
#define QMAN_ERRS_TO_DISABLE (QM_EIRQ_PLWI | QM_EIRQ_PEBI)
@@ -766,6 +776,9 @@ static int fsl_qman_probe(struct platform_device *pdev)
node);
return -ENXIO;
}
+ dev_dbg(dev, "Qman IORESOURCE [%llx] of size [%llx]\n",
+ res->start, resource_size(res));
+
qm_ccsr_start = devm_ioremap(dev, res->start, resource_size(res));
if (!qm_ccsr_start)
return -ENXIO;
@@ -795,6 +808,7 @@ static int fsl_qman_probe(struct platform_device *pdev)
qm_channel_pool1 = QMAN_CHANNEL_POOL1_REV3;
qm_channel_caam = QMAN_CHANNEL_CAAM_REV3;
}
+ dev_dbg(dev, "Qman version:%04x,%02x,%02x\n", id, major, minor);
if (fqd_a) {
#ifdef CONFIG_PPC
@@ -812,7 +826,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
* in order to ensure allocations from the correct regions the
* driver initializes then allocates each piece in order
*/
- ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz);
+ ret = qbman_init_private_mem(dev, 0, &fqd_a, &fqd_sz,
+ DPAA_QMAN_DEV);
if (ret) {
dev_err(dev, "qbman_init_private_mem() for FQD failed 0x%x\n",
ret);
@@ -823,7 +838,8 @@ static int fsl_qman_probe(struct platform_device *pdev)
if (!pfdr_a) {
/* Setup PFDR memory */
- ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz);
+ ret = qbman_init_private_mem(dev, 1, &pfdr_a, &pfdr_sz,
+ DPAA_QMAN_DEV);
if (ret) {
dev_err(dev, "qbman_init_private_mem() for PFDR failed 0x%x\n",
ret);
@@ -894,6 +910,7 @@ static int fsl_qman_probe(struct platform_device *pdev)
return ret;
__qman_probed = 1;
+ dev_dbg(dev, "Qman probed successfully [%d]\n", __qman_probed);
return 0;
}
@@ -905,10 +922,16 @@ static const struct of_device_id fsl_qman_ids[] = {
{}
};
+static const struct acpi_device_id fsl_qman_acpi_ids[] = {
+ {"NXP0028", 0}
+};
+MODULE_DEVICE_TABLE(acpi, fsl_qman_acpi_ids);
+
static struct platform_driver fsl_qman_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = fsl_qman_ids,
+ .acpi_match_table = ACPI_PTR(fsl_qman_acpi_ids),
.suppress_bind_attrs = true,
},
.probe = fsl_qman_probe,
diff --git a/drivers/soc/fsl/qbman/qman_portal.c b/drivers/soc/fsl/qbman/qman_portal.c
index 96f74a1dc603..2fca9694d19e 100644
--- a/drivers/soc/fsl/qbman/qman_portal.c
+++ b/drivers/soc/fsl/qbman/qman_portal.c
@@ -1,4 +1,5 @@
/* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
+ * Copyright 2020 Puresoftware Ltd.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -28,6 +29,7 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include <linux/acpi.h>
#include "qman_priv.h"
struct qman_portal *qman_dma_portal;
@@ -217,7 +219,11 @@ static int qman_portal_probe(struct platform_device *pdev)
goto err_ioremap1;
}
- err = of_property_read_u32(node, "cell-index", &val);
+ if (is_of_node(pdev->dev.fwnode))
+ err = of_property_read_u32(node, "cell-index", &val);
+ else
+ err = device_property_read_u32(&pdev->dev, "cell-index", &val);
+
if (err) {
dev_err(dev, "Can't get %pOF property 'cell-index'\n", node);
__qman_portals_probed = -1;
@@ -290,7 +296,8 @@ check_cleanup:
}
qman_done_cleanup();
}
-
+ dev_dbg(dev, "Qman : Portal[%d] probed successfully [%d]\n",
+ cpu, __qman_portals_probed);
return 0;
err_portal_init:
@@ -311,10 +318,16 @@ static const struct of_device_id qman_portal_ids[] = {
};
MODULE_DEVICE_TABLE(of, qman_portal_ids);
+static const struct acpi_device_id qman_portal_acpi_ids[] = {
+ {"NXP0022", 0}
+};
+MODULE_DEVICE_TABLE(acpi, qman_portal_acpi_ids);
+
static struct platform_driver qman_portal_driver = {
.driver = {
.name = KBUILD_MODNAME,
.of_match_table = qman_portal_ids,
+ .acpi_match_table = ACPI_PTR(qman_portal_acpi_ids),
},
.probe = qman_portal_probe,
};
diff --git a/drivers/soc/fsl/qixis_ctrl.c b/drivers/soc/fsl/qixis_ctrl.c
new file mode 100644
index 000000000000..cc4696cf9d50
--- /dev/null
+++ b/drivers/soc/fsl/qixis_ctrl.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/* Freescale QIXIS system controller driver.
+ *
+ * Copyright 2015 Freescale Semiconductor, Inc.
+ * Copyright 2018-2019 NXP
+ */
+
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/mfd/core.h>
+#include <linux/of.h>
+#include <linux/regmap.h>
+
+/* QIXIS MAP */
+struct fsl_qixis_regs {
+ u8 id; /* Identification Registers */
+ u8 version; /* Version Register */
+ u8 qixis_ver; /* QIXIS Version Register */
+ u8 reserved1[0x1f];
+};
+
+struct qixis_priv {
+ struct regmap *regmap;
+};
+
+static struct regmap_config qixis_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
+static const struct mfd_cell fsl_qixis_devs[] = {
+ {
+ .name = "reg-mux",
+ .of_compatible = "reg-mux",
+ },
+};
+
+static int fsl_qixis_i2c_probe(struct i2c_client *client)
+{
+ struct qixis_priv *priv;
+ int ret = 0;
+ u32 qver;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EOPNOTSUPP;
+
+ priv = devm_kzalloc(&client->dev, sizeof(struct qixis_priv),
+ GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->regmap = regmap_init_i2c(client, &qixis_regmap_config);
+ regmap_read(priv->regmap, offsetof(struct fsl_qixis_regs, qixis_ver),
+ &qver);
+ pr_info("Freescale QIXIS Version: 0x%08x\n", qver);
+
+ i2c_set_clientdata(client, priv);
+
+ if (of_device_is_compatible(client->dev.of_node, "simple-mfd"))
+ ret = devm_mfd_add_devices(&client->dev, -1, fsl_qixis_devs,
+ ARRAY_SIZE(fsl_qixis_devs), NULL, 0,
+ NULL);
+ if (ret)
+ goto error;
+
+ return ret;
+error:
+ regmap_exit(priv->regmap);
+
+ return ret;
+}
+
+static int fsl_qixis_i2c_remove(struct i2c_client *client)
+{
+ struct qixis_priv *priv;
+
+ priv = i2c_get_clientdata(client);
+ regmap_exit(priv->regmap);
+
+ return 0;
+}
+
+static const struct of_device_id fsl_qixis_i2c_of_match[] = {
+ { .compatible = "fsl,fpga-qixis-i2c" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, fsl_qixis_i2c_of_match);
+
+static struct i2c_driver fsl_qixis_i2c_driver = {
+ .driver = {
+ .name = "qixis_ctrl_i2c",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(fsl_qixis_i2c_of_match),
+ },
+ .probe_new = fsl_qixis_i2c_probe,
+ .remove = fsl_qixis_i2c_remove,
+};
+module_i2c_driver(fsl_qixis_i2c_driver);
+
+MODULE_AUTHOR("Wang Dongsheng <dongsheng.wang@freescale.com>");
+MODULE_DESCRIPTION("Freescale QIXIS system controller driver");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/soc/fsl/sleep_fsm.c b/drivers/soc/fsl/sleep_fsm.c
new file mode 100644
index 000000000000..a30309863822
--- /dev/null
+++ b/drivers/soc/fsl/sleep_fsm.c
@@ -0,0 +1,279 @@
+/*
+ * deep sleep FSM (finite-state machine) configuration
+ *
+ * Copyright 2018 NXP
+ *
+ * Author: Hongbo Zhang <hongbo.zhang@freescale.com>
+ * Chenhui Zhao <chenhui.zhao@freescale.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/types.h>
+
+#include "sleep_fsm.h"
+/*
+ * These values are from chip's reference manual. For example,
+ * the values for T1040 can be found in "8.4.3.8 Programming
+ * supporting deep sleep mode" of Chapter 8 "Run Control and
+ * Power Management (RCPM)".
+ * The default value can be applied to T104x, LS1021.
+ */
+struct fsm_reg_vals epu_default_val[] = {
+ /* EPGCR (Event Processor Global Control Register) */
+ {EPGCR, 0},
+ /* EPECR (Event Processor Event Control Registers) */
+ {EPECR0 + EPECR_STRIDE * 0, 0},
+ {EPECR0 + EPECR_STRIDE * 1, 0},
+ {EPECR0 + EPECR_STRIDE * 2, 0xF0004004},
+ {EPECR0 + EPECR_STRIDE * 3, 0x80000084},
+ {EPECR0 + EPECR_STRIDE * 4, 0x20000084},
+ {EPECR0 + EPECR_STRIDE * 5, 0x08000004},
+ {EPECR0 + EPECR_STRIDE * 6, 0x80000084},
+ {EPECR0 + EPECR_STRIDE * 7, 0x80000084},
+ {EPECR0 + EPECR_STRIDE * 8, 0x60000084},
+ {EPECR0 + EPECR_STRIDE * 9, 0x08000084},
+ {EPECR0 + EPECR_STRIDE * 10, 0x42000084},
+ {EPECR0 + EPECR_STRIDE * 11, 0x90000084},
+ {EPECR0 + EPECR_STRIDE * 12, 0x80000084},
+ {EPECR0 + EPECR_STRIDE * 13, 0x08000084},
+ {EPECR0 + EPECR_STRIDE * 14, 0x02000084},
+ {EPECR0 + EPECR_STRIDE * 15, 0x00000004},
+ /*
+ * EPEVTCR (Event Processor EVT Pin Control Registers)
+ * SCU8 triger EVT2, and SCU11 triger EVT9
+ */
+ {EPEVTCR0 + EPEVTCR_STRIDE * 0, 0},
+ {EPEVTCR0 + EPEVTCR_STRIDE * 1, 0},
+ {EPEVTCR0 + EPEVTCR_STRIDE * 2, 0x80000001},
+ {EPEVTCR0 + EPEVTCR_STRIDE * 3, 0},
+ {EPEVTCR0 + EPEVTCR_STRIDE * 4, 0},
+ {EPEVTCR0 + EPEVTCR_STRIDE * 5, 0},
+ {EPEVTCR0 + EPEVTCR_STRIDE * 6, 0},
+ {EPEVTCR0 + EPEVTCR_STRIDE * 7, 0},
+ {EPEVTCR0 + EPEVTCR_STRIDE * 8, 0},
+ {EPEVTCR0 + EPEVTCR_STRIDE * 9, 0xB0000001},
+ /* EPCMPR (Event Processor Counter Compare Registers) */
+ {EPCMPR0 + EPCMPR_STRIDE * 0, 0},
+ {EPCMPR0 + EPCMPR_STRIDE * 1, 0},
+ {EPCMPR0 + EPCMPR_STRIDE * 2, 0x000000FF},
+ {EPCMPR0 + EPCMPR_STRIDE * 3, 0},
+ {EPCMPR0 + EPCMPR_STRIDE * 4, 0x000000FF},
+ {EPCMPR0 + EPCMPR_STRIDE * 5, 0x00000020},
+ {EPCMPR0 + EPCMPR_STRIDE * 6, 0},
+ {EPCMPR0 + EPCMPR_STRIDE * 7, 0},
+ {EPCMPR0 + EPCMPR_STRIDE * 8, 0x000000FF},
+ {EPCMPR0 + EPCMPR_STRIDE * 9, 0x000000FF},
+ {EPCMPR0 + EPCMPR_STRIDE * 10, 0x000000FF},
+ {EPCMPR0 + EPCMPR_STRIDE * 11, 0x000000FF},
+ {EPCMPR0 + EPCMPR_STRIDE * 12, 0x000000FF},
+ {EPCMPR0 + EPCMPR_STRIDE * 13, 0},
+ {EPCMPR0 + EPCMPR_STRIDE * 14, 0x000000FF},
+ {EPCMPR0 + EPCMPR_STRIDE * 15, 0x000000FF},
+ /* EPCCR (Event Processor Counter Control Registers) */
+ {EPCCR0 + EPCCR_STRIDE * 0, 0},
+ {EPCCR0 + EPCCR_STRIDE * 1, 0},
+ {EPCCR0 + EPCCR_STRIDE * 2, 0x92840000},
+ {EPCCR0 + EPCCR_STRIDE * 3, 0},
+ {EPCCR0 + EPCCR_STRIDE * 4, 0x92840000},
+ {EPCCR0 + EPCCR_STRIDE * 5, 0x92840000},
+ {EPCCR0 + EPCCR_STRIDE * 6, 0},
+ {EPCCR0 + EPCCR_STRIDE * 7, 0},
+ {EPCCR0 + EPCCR_STRIDE * 8, 0x92840000},
+ {EPCCR0 + EPCCR_STRIDE * 9, 0x92840000},
+ {EPCCR0 + EPCCR_STRIDE * 10, 0x92840000},
+ {EPCCR0 + EPCCR_STRIDE * 11, 0x92840000},
+ {EPCCR0 + EPCCR_STRIDE * 12, 0x92840000},
+ {EPCCR0 + EPCCR_STRIDE * 13, 0},
+ {EPCCR0 + EPCCR_STRIDE * 14, 0x92840000},
+ {EPCCR0 + EPCCR_STRIDE * 15, 0x92840000},
+ /* EPSMCR (Event Processor SCU Mux Control Registers) */
+ {EPSMCR0 + EPSMCR_STRIDE * 0, 0},
+ {EPSMCR0 + EPSMCR_STRIDE * 1, 0},
+ {EPSMCR0 + EPSMCR_STRIDE * 2, 0x6C700000},
+ {EPSMCR0 + EPSMCR_STRIDE * 3, 0x2F000000},
+ {EPSMCR0 + EPSMCR_STRIDE * 4, 0x002F0000},
+ {EPSMCR0 + EPSMCR_STRIDE * 5, 0x00002E00},
+ {EPSMCR0 + EPSMCR_STRIDE * 6, 0x7C000000},
+ {EPSMCR0 + EPSMCR_STRIDE * 7, 0x30000000},
+ {EPSMCR0 + EPSMCR_STRIDE * 8, 0x64300000},
+ {EPSMCR0 + EPSMCR_STRIDE * 9, 0x00003000},
+ {EPSMCR0 + EPSMCR_STRIDE * 10, 0x65000030},
+ {EPSMCR0 + EPSMCR_STRIDE * 11, 0x31740000},
+ {EPSMCR0 + EPSMCR_STRIDE * 12, 0x7F000000},
+ {EPSMCR0 + EPSMCR_STRIDE * 13, 0x00003100},
+ {EPSMCR0 + EPSMCR_STRIDE * 14, 0x00000031},
+ {EPSMCR0 + EPSMCR_STRIDE * 15, 0x76000000},
+ /* EPACR (Event Processor Action Control Registers) */
+ {EPACR0 + EPACR_STRIDE * 0, 0},
+ {EPACR0 + EPACR_STRIDE * 1, 0},
+ {EPACR0 + EPACR_STRIDE * 2, 0},
+ {EPACR0 + EPACR_STRIDE * 3, 0x00000080},
+ {EPACR0 + EPACR_STRIDE * 4, 0},
+ {EPACR0 + EPACR_STRIDE * 5, 0x00000040},
+ {EPACR0 + EPACR_STRIDE * 6, 0},
+ {EPACR0 + EPACR_STRIDE * 7, 0},
+ {EPACR0 + EPACR_STRIDE * 8, 0},
+ {EPACR0 + EPACR_STRIDE * 9, 0x0000001C},
+ {EPACR0 + EPACR_STRIDE * 10, 0x00000020},
+ {EPACR0 + EPACR_STRIDE * 11, 0},
+ {EPACR0 + EPACR_STRIDE * 12, 0x00000003},
+ {EPACR0 + EPACR_STRIDE * 13, 0x06000000},
+ {EPACR0 + EPACR_STRIDE * 14, 0x04000000},
+ {EPACR0 + EPACR_STRIDE * 15, 0x02000000},
+ /* EPIMCR (Event Processor Input Mux Control Registers) */
+ {EPIMCR0 + EPIMCR_STRIDE * 0, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 1, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 2, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 3, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 4, 0x44000000},
+ {EPIMCR0 + EPIMCR_STRIDE * 5, 0x40000000},
+ {EPIMCR0 + EPIMCR_STRIDE * 6, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 7, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 8, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 9, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 10, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 11, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 12, 0x44000000},
+ {EPIMCR0 + EPIMCR_STRIDE * 13, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 14, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 15, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 16, 0x6A000000},
+ {EPIMCR0 + EPIMCR_STRIDE * 17, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 18, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 19, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 20, 0x48000000},
+ {EPIMCR0 + EPIMCR_STRIDE * 21, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 22, 0x6C000000},
+ {EPIMCR0 + EPIMCR_STRIDE * 23, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 24, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 25, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 26, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 27, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 28, 0x76000000},
+ {EPIMCR0 + EPIMCR_STRIDE * 29, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 30, 0},
+ {EPIMCR0 + EPIMCR_STRIDE * 31, 0x76000000},
+ /* EPXTRIGCR (Event Processor Crosstrigger Control Register) */
+ {EPXTRIGCR, 0x0000FFDF},
+ /* end */
+ {FSM_END_FLAG, 0},
+};
+
+struct fsm_reg_vals npc_default_val[] = {
+ /* NPC triggered Memory-Mapped Access Registers */
+ {NCR, 0x80000000},
+ {MCCR1, 0},
+ {MCSR1, 0},
+ {MMAR1LO, 0},
+ {MMAR1HI, 0},
+ {MMDR1, 0},
+ {MCSR2, 0},
+ {MMAR2LO, 0},
+ {MMAR2HI, 0},
+ {MMDR2, 0},
+ {MCSR3, 0x80000000},
+ {MMAR3LO, 0x000E2130},
+ {MMAR3HI, 0x00030000},
+ {MMDR3, 0x00020000},
+ /* end */
+ {FSM_END_FLAG, 0},
+};
+
+/**
+ * fsl_fsm_setup - Configure EPU's FSM registers
+ * @base: the base address of registers
+ * @val: Pointer to address-value pairs for FSM registers
+ */
+void fsl_fsm_setup(void __iomem *base, struct fsm_reg_vals *val)
+{
+ struct fsm_reg_vals *data = val;
+
+ WARN_ON(!base || !data);
+ while (data->offset != FSM_END_FLAG) {
+ iowrite32be(data->value, base + data->offset);
+ data++;
+ }
+}
+
+void fsl_epu_setup_default(void __iomem *epu_base)
+{
+ fsl_fsm_setup(epu_base, epu_default_val);
+}
+
+void fsl_npc_setup_default(void __iomem *npc_base)
+{
+ fsl_fsm_setup(npc_base, npc_default_val);
+}
+
+void fsl_epu_clean_default(void __iomem *epu_base)
+{
+ u32 offset;
+
+ /* follow the exact sequence to clear the registers */
+ /* Clear EPACRn */
+ for (offset = EPACR0; offset <= EPACR15; offset += EPACR_STRIDE)
+ iowrite32be(0, epu_base + offset);
+
+ /* Clear EPEVTCRn */
+ for (offset = EPEVTCR0; offset <= EPEVTCR9; offset += EPEVTCR_STRIDE)
+ iowrite32be(0, epu_base + offset);
+
+ /* Clear EPGCR */
+ iowrite32be(0, epu_base + EPGCR);
+
+ /* Clear EPSMCRn */
+ for (offset = EPSMCR0; offset <= EPSMCR15; offset += EPSMCR_STRIDE)
+ iowrite32be(0, epu_base + offset);
+
+ /* Clear EPCCRn */
+ for (offset = EPCCR0; offset <= EPCCR31; offset += EPCCR_STRIDE)
+ iowrite32be(0, epu_base + offset);
+
+ /* Clear EPCMPRn */
+ for (offset = EPCMPR0; offset <= EPCMPR31; offset += EPCMPR_STRIDE)
+ iowrite32be(0, epu_base + offset);
+
+ /* Clear EPCTRn */
+ for (offset = EPCTR0; offset <= EPCTR31; offset += EPCTR_STRIDE)
+ iowrite32be(0, epu_base + offset);
+
+ /* Clear EPIMCRn */
+ for (offset = EPIMCR0; offset <= EPIMCR31; offset += EPIMCR_STRIDE)
+ iowrite32be(0, epu_base + offset);
+
+ /* Clear EPXTRIGCRn */
+ iowrite32be(0, epu_base + EPXTRIGCR);
+
+ /* Clear EPECRn */
+ for (offset = EPECR0; offset <= EPECR15; offset += EPECR_STRIDE)
+ iowrite32be(0, epu_base + offset);
+}
diff --git a/drivers/soc/fsl/sleep_fsm.h b/drivers/soc/fsl/sleep_fsm.h
new file mode 100644
index 000000000000..e0013c0d9984
--- /dev/null
+++ b/drivers/soc/fsl/sleep_fsm.h
@@ -0,0 +1,130 @@
+/*
+ * deep sleep FSM (finite-state machine) configuration
+ *
+ * Copyright 2018 NXP
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the above-listed copyright holders nor the
+ * names of any contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ *
+ * ALTERNATIVELY, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") as published by the Free Software
+ * Foundation, either version 2 of that License or (at your option) any
+ * later version.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _FSL_SLEEP_FSM_H
+#define _FSL_SLEEP_FSM_H
+
+#define FSL_STRIDE_4B 4
+#define FSL_STRIDE_8B 8
+
+/* End flag */
+#define FSM_END_FLAG 0xFFFFFFFFUL
+
+/* Block offsets */
+#define RCPM_BLOCK_OFFSET 0x00022000
+#define EPU_BLOCK_OFFSET 0x00000000
+#define NPC_BLOCK_OFFSET 0x00001000
+
+/* EPGCR (Event Processor Global Control Register) */
+#define EPGCR 0x000
+
+/* EPEVTCR0-9 (Event Processor EVT Pin Control Registers) */
+#define EPEVTCR0 0x050
+#define EPEVTCR9 0x074
+#define EPEVTCR_STRIDE FSL_STRIDE_4B
+
+/* EPXTRIGCR (Event Processor Crosstrigger Control Register) */
+#define EPXTRIGCR 0x090
+
+/* EPIMCR0-31 (Event Processor Input Mux Control Registers) */
+#define EPIMCR0 0x100
+#define EPIMCR31 0x17C
+#define EPIMCR_STRIDE FSL_STRIDE_4B
+
+/* EPSMCR0-15 (Event Processor SCU Mux Control Registers) */
+#define EPSMCR0 0x200
+#define EPSMCR15 0x278
+#define EPSMCR_STRIDE FSL_STRIDE_8B
+
+/* EPECR0-15 (Event Processor Event Control Registers) */
+#define EPECR0 0x300
+#define EPECR15 0x33C
+#define EPECR_STRIDE FSL_STRIDE_4B
+
+/* EPACR0-15 (Event Processor Action Control Registers) */
+#define EPACR0 0x400
+#define EPACR15 0x43C
+#define EPACR_STRIDE FSL_STRIDE_4B
+
+/* EPCCRi0-15 (Event Processor Counter Control Registers) */
+#define EPCCR0 0x800
+#define EPCCR15 0x83C
+#define EPCCR31 0x87C
+#define EPCCR_STRIDE FSL_STRIDE_4B
+
+/* EPCMPR0-15 (Event Processor Counter Compare Registers) */
+#define EPCMPR0 0x900
+#define EPCMPR15 0x93C
+#define EPCMPR31 0x97C
+#define EPCMPR_STRIDE FSL_STRIDE_4B
+
+/* EPCTR0-31 (Event Processor Counter Register) */
+#define EPCTR0 0xA00
+#define EPCTR31 0xA7C
+#define EPCTR_STRIDE FSL_STRIDE_4B
+
+/* NPC triggered Memory-Mapped Access Registers */
+#define NCR 0x000
+#define MCCR1 0x0CC
+#define MCSR1 0x0D0
+#define MMAR1LO 0x0D4
+#define MMAR1HI 0x0D8
+#define MMDR1 0x0DC
+#define MCSR2 0x0E0
+#define MMAR2LO 0x0E4
+#define MMAR2HI 0x0E8
+#define MMDR2 0x0EC
+#define MCSR3 0x0F0
+#define MMAR3LO 0x0F4
+#define MMAR3HI 0x0F8
+#define MMDR3 0x0FC
+
+/* RCPM Core State Action Control Register 0 */
+#define CSTTACR0 0xB00
+
+/* RCPM Core Group 1 Configuration Register 0 */
+#define CG1CR0 0x31C
+
+struct fsm_reg_vals {
+ u32 offset;
+ u32 value;
+};
+
+void fsl_fsm_setup(void __iomem *base, struct fsm_reg_vals *val);
+void fsl_epu_setup_default(void __iomem *epu_base);
+void fsl_npc_setup_default(void __iomem *npc_base);
+void fsl_epu_clean_default(void __iomem *epu_base);
+
+#endif /* _FSL_SLEEP_FSM_H */
diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig
index 05812f8ae734..14a98e4877f9 100644
--- a/drivers/soc/imx/Kconfig
+++ b/drivers/soc/imx/Kconfig
@@ -1,6 +1,8 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "i.MX SoC drivers"
+source "drivers/soc/imx/mu/Kconfig"
+
config IMX_GPCV2_PM_DOMAINS
bool "i.MX GPCv2 PM domains"
depends on ARCH_MXC || (COMPILE_TEST && OF)
@@ -8,8 +10,18 @@ config IMX_GPCV2_PM_DOMAINS
select PM_GENERIC_DOMAINS
default y if SOC_IMX7D
+config SOC_IMX9
+ tristate "i.MX9 SoC family support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
+ select SOC_BUS
+ select PM_GENERIC_DOMAINS
+ help
+ If you say yes here you get support for the NXP i.MX9 family
+ support.
+
config SOC_IMX8M
- bool "i.MX8M SoC family support"
+ tristate "i.MX8M SoC family support"
depends on ARCH_MXC || COMPILE_TEST
default ARCH_MXC && ARM64
select SOC_BUS
@@ -19,4 +31,57 @@ config SOC_IMX8M
support, it will provide the SoC info like SoC family,
ID and revision etc.
+config SOC_IMX9
+ tristate "i.MX9 SoC family support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
+ select SOC_BUS
+ select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7
+ help
+ If you say yes here you get support for the NXP i.MX9 family
+ support, it will provide the SoC info like SoC family,
+ ID and revision etc.
+
+config IMX8M_BUSFREQ
+ tristate "i.MX8M busfreq"
+ depends on SOC_IMX8M
+ default ARCH_MXC
+
+config SECVIO_SC
+ tristate "NXP SC secvio support"
+ depends on IMX_SCU
+ default y
+ help
+ If you say yes here you get support for the NXP SNVS security
+ violation module. It includes the possibility to read information
+ related to security violations and tampers. It also gives the
+ possibility to register user callbacks when a security violation
+ occurs.
+
+config IMX8M_PM_DOMAINS
+ tristate "i.MX8M PM domains"
+ default ARCH_MXC
+ depends on ARCH_MXC || (COMPILE_TEST && OF)
+ depends on PM
+ select PM_GENERIC_DOMAINS
+
+config RPMSG_LIFE_CYCLE
+ tristate "i.MX8ULP Rpmsg Life Cycle Support"
+ depends on ARCH_MXC || COMPILE_TEST
+ depends on RPMSG
+ default ARCH_MXC && ARM64
+ help
+ If you say yes here you get supoort for the rpmsg life cycle support on
+ i.MX8ULP for low power mode state coordination between A core & M core to
+ make sure A core can be put into Low power mode without risk by sending
+ notify to M core.
+
+config IMX8ULP_LPM_CTRL
+ tristate "i.MX8ULP DDR Low Power Control support"
+ depends on ARCH_MXC || COMPILE_TEST
+ default ARCH_MXC && ARM64
+ help
+ If you say yes here you get supoort for DDR frequency scaling support on
+ i.MX8ULP for scaling the DDR frequency based on user case. The DDR frequency
+ need to be switched manually by user.
endmenu
diff --git a/drivers/soc/imx/Makefile b/drivers/soc/imx/Makefile
index 078dc918f4f3..525b9eac207c 100644
--- a/drivers/soc/imx/Makefile
+++ b/drivers/soc/imx/Makefile
@@ -5,3 +5,11 @@ endif
obj-$(CONFIG_HAVE_IMX_GPC) += gpc.o
obj-$(CONFIG_IMX_GPCV2_PM_DOMAINS) += gpcv2.o
obj-$(CONFIG_SOC_IMX8M) += soc-imx8m.o
+obj-$(CONFIG_SOC_IMX9) += soc-imx9.o
+obj-$(CONFIG_IMX8M_BUSFREQ) += busfreq-imx8mq.o
+obj-$(CONFIG_SOC_IMX_MU) += mu/
+obj-${CONFIG_SECVIO_SC} += secvio/
+obj-$(CONFIG_IMX8M_PM_DOMAINS) += imx8m_pm_domains.o
+obj-$(CONFIG_RPMSG_LIFE_CYCLE) += rpmsg_life_cycle.o
+obj-$(CONFIG_IMX8ULP_LPM_CTRL) += imx8ulp_lpm.o
+obj-$(CONFIG_SOC_IMX9) += imx93-pd.o imx93-blk-ctrl.o
diff --git a/drivers/soc/imx/busfreq-imx8mq.c b/drivers/soc/imx/busfreq-imx8mq.c
new file mode 100644
index 000000000000..5e36962285ef
--- /dev/null
+++ b/drivers/soc/imx/busfreq-imx8mq.c
@@ -0,0 +1,670 @@
+/*
+ * Copyright 2017-2018 NXP
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/busfreq-imx.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/cpumask.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/proc_fs.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/suspend.h>
+#include <linux/sys_soc.h>
+
+#define FSL_SIP_DDR_DVFS 0xc2000004
+
+#define HIGH_FREQ_3200MTS 0x0
+#define AUDIO_FREQ_400MTS 0x1
+#define LOW_BUS_FREQ_100MTS 0x2
+#define LOW_BUS_FREQ_667MTS 0x1
+#define WAIT_BUS_FREQ_DONE 0xf
+#define DLL_ON_DRATE 667
+
+static struct device *busfreq_dev;
+static int low_bus_freq_mode;
+static int audio_bus_freq_mode;
+static int high_bus_freq_mode;
+static int bus_freq_scaling_initialized;
+static int bus_freq_scaling_is_active;
+static int high_bus_count, audio_bus_count, low_bus_count;
+static int cur_bus_freq_mode;
+static int busfreq_suspended;
+static bool cancel_reduce_bus_freq;
+
+static unsigned int fsp_table[4];
+static unsigned long origin_noc_rate;
+static int low_bus_mode_fsp_index;
+/* no bypass or dll off mode support if lowest fsp > 667mts */
+static bool bypass_support = true;
+
+static struct clk *dram_pll_clk;
+static struct clk *dram_pll;
+static struct clk *sys1_pll_800m;
+static struct clk *sys1_pll_400m;
+static struct clk *sys1_pll_100m;
+static struct clk *sys1_pll_40m;
+static struct clk *dram_alt_src;
+static struct clk *dram_alt_root;
+static struct clk *dram_core_clk;
+static struct clk *dram_apb_src;
+static struct clk *dram_apb_pre_div;
+static struct clk *noc_div;
+static struct clk *main_axi_src;
+static struct clk *ahb_div;
+static struct clk *osc_25m;
+static struct clk *sys2_pll_333m;
+
+static struct delayed_work low_bus_freq_handler;
+static struct delayed_work bus_freq_daemon;
+
+DEFINE_MUTEX(bus_freq_mutex);
+
+static void update_bus_freq(int target_freq)
+{
+ struct arm_smccc_res res;
+ u32 online_cpus = 0;
+ int cpu = 0;
+
+ local_irq_disable();
+
+ for_each_online_cpu(cpu) {
+ online_cpus |= (1 << (cpu * 8));
+ }
+ /* change the ddr freqency */
+ arm_smccc_smc(FSL_SIP_DDR_DVFS, target_freq, online_cpus,
+ 0, 0, 0, 0, 0, &res);
+
+ local_irq_enable();
+}
+
+static void reduce_bus_freq(void)
+{
+ u32 rate;
+
+ high_bus_freq_mode = 0;
+
+ /*
+ * below piece of code has some redundant part, keep
+ * it at present, we may need update the audio freq
+ * in the future if needed.
+ */
+ if (audio_bus_count) {
+ if (cur_bus_freq_mode == BUS_FREQ_HIGH) {
+ if (bypass_support) {
+ /* prepare the necessary clk before frequency change */
+ clk_prepare_enable(sys1_pll_40m);
+ clk_prepare_enable(dram_alt_root);
+ clk_prepare_enable(sys1_pll_100m);
+
+ update_bus_freq(low_bus_mode_fsp_index);
+
+ clk_set_parent(dram_alt_src, sys1_pll_100m);
+ clk_set_parent(dram_core_clk, dram_alt_root);
+ clk_set_parent(dram_apb_src, sys1_pll_40m);
+ clk_set_rate(dram_apb_pre_div, 20000000);
+ clk_disable_unprepare(sys1_pll_100m);
+ clk_disable_unprepare(sys1_pll_40m);
+ clk_disable_unprepare(dram_alt_root);
+ } else {
+ update_bus_freq(low_bus_mode_fsp_index);
+ /*
+ * the dram_apb and dram_core clk rate is changed
+ * in ATF side, below two lines of code is just used
+ * to update the clock tree info in kernel side.
+ */
+ clk_set_rate(dram_apb_pre_div, 160000000);
+ clk_get_rate(dram_pll);
+ }
+ /* change the NOC rate */
+ if (of_machine_is_compatible("fsl,imx8mq"))
+ clk_set_rate(noc_div, origin_noc_rate / 8);
+ else
+ clk_set_rate(noc_div, origin_noc_rate / 5);
+
+ rate = clk_get_rate(ahb_div);
+ if (rate == 0) {
+ WARN_ON(1);
+ return;
+ }
+ clk_set_rate(ahb_div, rate / 6);
+ clk_set_parent(main_axi_src, osc_25m);
+ }
+
+ low_bus_freq_mode = 0;
+ audio_bus_freq_mode = 1;
+ cur_bus_freq_mode = BUS_FREQ_AUDIO;
+ } else {
+ if (cur_bus_freq_mode == BUS_FREQ_HIGH) {
+ if (bypass_support) {
+ /* prepare the necessary clk before frequency change */
+ clk_prepare_enable(sys1_pll_40m);
+ clk_prepare_enable(dram_alt_root);
+ clk_prepare_enable(sys1_pll_100m);
+
+ update_bus_freq(low_bus_mode_fsp_index);
+
+ clk_set_parent(dram_alt_src, sys1_pll_100m);
+ clk_set_parent(dram_core_clk, dram_alt_root);
+ clk_set_parent(dram_apb_src, sys1_pll_40m);
+ clk_set_rate(dram_apb_pre_div, 20000000);
+ clk_disable_unprepare(sys1_pll_100m);
+ clk_disable_unprepare(sys1_pll_40m);
+ clk_disable_unprepare(dram_alt_root);
+ } else {
+ update_bus_freq(low_bus_mode_fsp_index);
+ /*
+ * the dram_apb and dram_core clk rate is changed
+ * in ATF side, below two lines of code is just used
+ * to update the clock tree info in kernel side.
+ */
+ clk_set_rate(dram_apb_pre_div, 160000000);
+ clk_get_rate(dram_pll);
+ }
+
+ /* change the NOC rate */
+ if (of_machine_is_compatible("fsl,imx8mq"))
+ clk_set_rate(noc_div, origin_noc_rate / 8);
+ else
+ clk_set_rate(noc_div, origin_noc_rate / 5);
+
+ rate = clk_get_rate(ahb_div);
+ if (rate == 0) {
+ WARN_ON(1);
+ return;
+ }
+ clk_set_rate(ahb_div, rate / 6);
+ clk_set_parent(main_axi_src, osc_25m);
+ }
+
+ low_bus_freq_mode = 1;
+ audio_bus_freq_mode = 0;
+ cur_bus_freq_mode = BUS_FREQ_LOW;
+ }
+
+ if (audio_bus_freq_mode)
+ printk(KERN_DEBUG "ddrc freq set to audio bus mode\n");
+ if (low_bus_freq_mode)
+ printk(KERN_DEBUG "ddrc freq set to low bus mode\n");
+}
+
+static void reduce_bus_freq_handler(struct work_struct *work)
+{
+ mutex_lock(&bus_freq_mutex);
+
+ if (!cancel_reduce_bus_freq)
+ reduce_bus_freq();
+
+ mutex_unlock(&bus_freq_mutex);
+}
+
+static int set_low_bus_freq(void)
+{
+ if (busfreq_suspended)
+ return 0;
+
+ if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
+ return 0;
+
+ cancel_reduce_bus_freq = false;
+
+ /*
+ * check to see if we need to got from low bus
+ * freq mode to audio bus freq mode.
+ * If so, the change needs to be done immediately.
+ */
+ if (audio_bus_count && low_bus_freq_mode)
+ reduce_bus_freq();
+ else
+ schedule_delayed_work(&low_bus_freq_handler,
+ usecs_to_jiffies(1000000));
+
+ return 0;
+}
+
+static inline void cancel_low_bus_freq_handler(void)
+{
+ cancel_delayed_work(&low_bus_freq_handler);
+ cancel_reduce_bus_freq = true;
+}
+
+static int set_high_bus_freq(int high_bus_freq)
+{
+ if (bus_freq_scaling_initialized || bus_freq_scaling_is_active)
+ cancel_low_bus_freq_handler();
+
+ if (busfreq_suspended)
+ return 0;
+
+ if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
+ return 0;
+
+ if (high_bus_freq_mode)
+ return 0;
+
+ if (bypass_support) {
+ /* enable the clks needed in frequency */
+ clk_prepare_enable(sys1_pll_800m);
+ clk_prepare_enable(dram_pll_clk);
+
+ /* switch the DDR freqeuncy */
+ update_bus_freq(HIGH_FREQ_3200MTS);
+
+ /* correct the clock tree info */
+ clk_set_parent(dram_apb_src, sys1_pll_800m);
+ clk_set_rate(dram_apb_pre_div, 160000000);
+ clk_set_parent(dram_core_clk, dram_pll_clk);
+ clk_disable_unprepare(sys1_pll_800m);
+ clk_disable_unprepare(dram_pll_clk);
+ } else {
+ /* switch the DDR freqeuncy */
+ update_bus_freq(HIGH_FREQ_3200MTS);
+
+ clk_set_rate(dram_apb_pre_div, 200000000);
+ clk_get_rate(dram_pll);
+ }
+
+ clk_set_rate(noc_div, origin_noc_rate);
+ clk_set_rate(ahb_div, 133333333);
+ clk_set_parent(main_axi_src, sys2_pll_333m);
+
+ high_bus_freq_mode = 1;
+ audio_bus_freq_mode = 0;
+ low_bus_freq_mode = 0;
+ cur_bus_freq_mode = BUS_FREQ_HIGH;
+
+ if (high_bus_freq_mode)
+ printk(KERN_DEBUG "ddrc freq set to high bus mode\n");
+
+ return 0;
+}
+
+void request_bus_freq(enum bus_freq_mode mode)
+{
+ mutex_lock(&bus_freq_mutex);
+
+ if (mode == BUS_FREQ_HIGH)
+ high_bus_count++;
+ else if (mode == BUS_FREQ_AUDIO)
+ audio_bus_count++;
+ else if (mode == BUS_FREQ_LOW)
+ low_bus_count++;
+
+ if (busfreq_suspended || !bus_freq_scaling_initialized ||
+ !bus_freq_scaling_is_active) {
+ mutex_unlock(&bus_freq_mutex);
+ return;
+ }
+
+ cancel_low_bus_freq_handler();
+
+ if ((mode == BUS_FREQ_HIGH) && (!high_bus_freq_mode)) {
+ set_high_bus_freq(1);
+ mutex_unlock(&bus_freq_mutex);
+ return;
+ }
+
+ if ((mode == BUS_FREQ_AUDIO) && (!high_bus_freq_mode) &&
+ (!audio_bus_freq_mode)) {
+ set_low_bus_freq();
+ mutex_unlock(&bus_freq_mutex);
+ return;
+ }
+
+ mutex_unlock(&bus_freq_mutex);
+}
+EXPORT_SYMBOL(request_bus_freq);
+
+void release_bus_freq(enum bus_freq_mode mode)
+{
+ mutex_lock(&bus_freq_mutex);
+ if (mode == BUS_FREQ_HIGH) {
+ if (high_bus_count == 0) {
+ dev_err(busfreq_dev, "high bus count mismatch!\n");
+ dump_stack();
+ mutex_unlock(&bus_freq_mutex);
+ return;
+ }
+ high_bus_count--;
+ } else if (mode == BUS_FREQ_AUDIO) {
+ if (audio_bus_count == 0) {
+ dev_err(busfreq_dev, "audio bus count mismatch!\n");
+ dump_stack();
+ mutex_unlock(&bus_freq_mutex);
+ return;
+ }
+ audio_bus_count--;
+ } else if (mode == BUS_FREQ_LOW) {
+ if (low_bus_count == 0) {
+ dev_err(busfreq_dev, "low bus count mismatch!\n");
+ dump_stack();
+ mutex_unlock(&bus_freq_mutex);
+ return;
+ }
+ low_bus_count--;
+ }
+
+ if (busfreq_suspended || !bus_freq_scaling_initialized ||
+ !bus_freq_scaling_is_active) {
+ mutex_unlock(&bus_freq_mutex);
+ return;
+ }
+
+ if ((!audio_bus_freq_mode) && (high_bus_count == 0) &&
+ (audio_bus_count != 0)) {
+ set_low_bus_freq();
+ mutex_unlock(&bus_freq_mutex);
+ return;
+ }
+
+ if ((!low_bus_freq_mode) && (high_bus_count == 0) &&
+ (audio_bus_count == 0)) {
+ set_low_bus_freq();
+ mutex_unlock(&bus_freq_mutex);
+ return;
+ }
+
+ mutex_unlock(&bus_freq_mutex);
+}
+EXPORT_SYMBOL(release_bus_freq);
+
+int get_bus_freq_mode(void)
+{
+ return cur_bus_freq_mode;
+}
+EXPORT_SYMBOL(get_bus_freq_mode);
+
+static void bus_freq_daemon_handler(struct work_struct *work)
+{
+ mutex_lock(&bus_freq_mutex);
+ if ((!low_bus_freq_mode) && (high_bus_count == 0) &&
+ (audio_bus_count == 0))
+ set_low_bus_freq();
+ mutex_unlock(&bus_freq_mutex);
+}
+
+static ssize_t bus_freq_scaling_enable_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (bus_freq_scaling_is_active)
+ return sprintf(buf, "Bus frequency scaling is enabled\n");
+ else
+ return sprintf(buf, "Bus frequency scaling is disabled\n");
+}
+
+static ssize_t bus_freq_scaling_enable_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ if (strncmp(buf, "1", 1) == 0) {
+ bus_freq_scaling_is_active = 1;
+ set_high_bus_freq(1);
+ /*
+ * We set bus freq to higher at the beginning,
+ * so we use this daemon thread to make sure system
+ * can enter low bus mode if there is no high bus request pending
+ */
+ schedule_delayed_work(&bus_freq_daemon,
+ usecs_to_jiffies(5000000));
+ } else if (strncmp(buf, "0", 1) == 0) {
+ if (bus_freq_scaling_is_active)
+ set_high_bus_freq(1);
+ bus_freq_scaling_is_active = 0;
+ }
+ return size;
+}
+
+static int bus_freq_pm_notify(struct notifier_block *nb, unsigned long event,
+ void *dummy)
+{
+ mutex_lock(&bus_freq_mutex);
+
+ if (event == PM_SUSPEND_PREPARE) {
+ high_bus_count++;
+ set_high_bus_freq(1);
+ busfreq_suspended = 1;
+ } else if (event == PM_POST_SUSPEND) {
+ busfreq_suspended = 0;
+ high_bus_count--;
+ schedule_delayed_work(&bus_freq_daemon,
+ usecs_to_jiffies(5000000));
+ }
+
+ mutex_unlock(&bus_freq_mutex);
+
+ return NOTIFY_OK;
+}
+
+static int busfreq_reboot_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ /* System is rebooting. Set the system into high_bus_freq_mode. */
+ request_bus_freq(BUS_FREQ_HIGH);
+
+ return 0;
+}
+
+static struct notifier_block imx_bus_freq_pm_notifier = {
+ .notifier_call = bus_freq_pm_notify,
+};
+
+static struct notifier_block imx_busfreq_reboot_notifier = {
+ .notifier_call = busfreq_reboot_notifier_event,
+};
+
+static DEVICE_ATTR(enable, 0644, bus_freq_scaling_enable_show,
+ bus_freq_scaling_enable_store);
+
+static int imx8mq_init_busfreq_clk(struct platform_device *pdev)
+{
+ dram_pll_clk = devm_clk_get(&pdev->dev, "dram_pll");
+ sys1_pll_800m = devm_clk_get(&pdev->dev, "sys1_pll_800m");
+ sys1_pll_400m = devm_clk_get(&pdev->dev, "sys1_pll_400m");
+ sys1_pll_100m = devm_clk_get(&pdev->dev, "sys1_pll_100m");
+ sys1_pll_40m = devm_clk_get(&pdev->dev, "sys1_pll_40m");
+ dram_alt_src = devm_clk_get(&pdev->dev, "dram_alt_src");
+ dram_alt_root = devm_clk_get(&pdev->dev, "dram_alt_root");
+ dram_core_clk = devm_clk_get(&pdev->dev, "dram_core");
+ dram_apb_src = devm_clk_get(&pdev->dev, "dram_apb_src");
+ dram_apb_pre_div = devm_clk_get(&pdev->dev, "dram_apb_pre_div");
+ noc_div = devm_clk_get(&pdev->dev, "noc_div");
+ ahb_div = devm_clk_get(&pdev->dev, "ahb_div");
+ main_axi_src = devm_clk_get(&pdev->dev, "main_axi_src");
+ osc_25m = devm_clk_get(&pdev->dev, "osc_25m");
+ sys2_pll_333m = devm_clk_get(&pdev->dev, "sys2_pll_333m");
+
+ if (IS_ERR(dram_pll_clk) || IS_ERR(sys1_pll_400m) || IS_ERR(sys1_pll_100m) ||
+ IS_ERR(sys1_pll_40m) || IS_ERR(dram_alt_src) || IS_ERR(dram_alt_root) ||
+ IS_ERR(dram_core_clk) || IS_ERR(dram_apb_src) || IS_ERR(dram_apb_pre_div)
+ || IS_ERR(noc_div) || IS_ERR(main_axi_src) || IS_ERR(ahb_div)
+ || IS_ERR(osc_25m) || IS_ERR(sys2_pll_333m)) {
+ dev_err(&pdev->dev, "failed to get busfreq clk\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int imx8mm_init_busfreq_clk(struct platform_device *pdev)
+{
+ dram_pll = devm_clk_get(&pdev->dev, "dram_pll_div");
+ dram_pll_clk = devm_clk_get(&pdev->dev, "dram_pll");
+ dram_alt_src = devm_clk_get(&pdev->dev, "dram_alt_src");
+ dram_alt_root = devm_clk_get(&pdev->dev, "dram_alt_root");
+ dram_core_clk = devm_clk_get(&pdev->dev, "dram_core");
+ dram_apb_src = devm_clk_get(&pdev->dev, "dram_apb_src");
+ dram_apb_pre_div = devm_clk_get(&pdev->dev, "dram_apb_pre_div");
+ sys1_pll_800m = devm_clk_get(&pdev->dev, "sys_pll1_800m");
+ sys1_pll_100m = devm_clk_get(&pdev->dev, "sys_pll1_100m");
+ sys1_pll_40m = devm_clk_get(&pdev->dev, "sys_pll1_40m");
+ noc_div = devm_clk_get(&pdev->dev, "noc_div");
+ ahb_div = devm_clk_get(&pdev->dev, "ahb_div");
+ main_axi_src = devm_clk_get(&pdev->dev, "main_axi_src");
+ osc_25m = devm_clk_get(&pdev->dev, "osc_24m");
+ sys2_pll_333m = devm_clk_get(&pdev->dev, "sys_pll2_333m");
+
+ if (IS_ERR(dram_pll_clk) || IS_ERR(dram_alt_src) || IS_ERR(dram_alt_root) ||
+ IS_ERR(dram_core_clk) || IS_ERR(dram_apb_src) || IS_ERR(dram_apb_pre_div) ||
+ IS_ERR(sys1_pll_800m) || IS_ERR(sys1_pll_100m) || IS_ERR(sys1_pll_40m) ||
+ IS_ERR(osc_25m) || IS_ERR(noc_div) || IS_ERR(main_axi_src) || IS_ERR(ahb_div) ||
+ IS_ERR(sys2_pll_333m)) {
+ dev_err(&pdev->dev, "failed to get busfreq clk\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*!
+ * This is the probe routine for the bus frequency driver.
+ *
+ * @param pdev The platform device structure
+ *
+ * @return The function returns 0 on success
+ *
+ */
+
+static int busfreq_probe(struct platform_device *pdev)
+{
+ int i, err;
+ struct arm_smccc_res res;
+
+ busfreq_dev = &pdev->dev;
+
+ /* get the clock for DDRC */
+ if (of_machine_is_compatible("fsl,imx8mq"))
+ err = imx8mq_init_busfreq_clk(pdev);
+ else
+ err = imx8mm_init_busfreq_clk(pdev);
+
+ if (err) {
+ dev_err(busfreq_dev, "init clk failed\n");
+ return err;
+ }
+
+ origin_noc_rate = clk_get_rate(noc_div);
+ if (origin_noc_rate == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ /*
+ * Get the supported frequency, normally the lowest frequency point
+ * is used for low bus & audio bus mode.
+ */
+ for (i = 0; i < 4; i++) {
+ arm_smccc_smc(FSL_SIP_DDR_DVFS, 0x11, i, 0, 0, 0, 0, 0, &res);
+ err = res.a0;
+ if (err < 0)
+ return -EINVAL;
+
+ fsp_table[i] = res.a0;
+ }
+
+ /* get the lowest fsp index */
+ for (i = 0; i < 4; i++)
+ if (fsp_table[i] == 0)
+ break;
+
+ low_bus_mode_fsp_index = i - 1;
+
+ /*
+ * if lowest fsp data rate higher than 666mts, then no dll off mode or
+ * bypass mode support.
+ */
+ if (fsp_table[low_bus_mode_fsp_index] >= DLL_ON_DRATE)
+ bypass_support = false;
+
+ /* create the sysfs file */
+ err = sysfs_create_file(&busfreq_dev->kobj, &dev_attr_enable.attr);
+ if (err) {
+ dev_err(busfreq_dev,
+ "Unable to register sysdev entry for BUSFREQ");
+ return err;
+ }
+
+ high_bus_freq_mode = 1;
+ low_bus_freq_mode = 0;
+ audio_bus_freq_mode = 0;
+ cur_bus_freq_mode = BUS_FREQ_HIGH;
+
+ bus_freq_scaling_is_active = 1;
+ bus_freq_scaling_initialized = 1;
+
+ INIT_DELAYED_WORK(&low_bus_freq_handler, reduce_bus_freq_handler);
+ INIT_DELAYED_WORK(&bus_freq_daemon, bus_freq_daemon_handler);
+ register_pm_notifier(&imx_bus_freq_pm_notifier);
+ register_reboot_notifier(&imx_busfreq_reboot_notifier);
+
+ /* enter low bus mode if no high speed device enabled */
+ schedule_delayed_work(&bus_freq_daemon, msecs_to_jiffies(10000));
+
+ return 0;
+}
+
+static const struct of_device_id imx_busfreq_ids[] = {
+ { .compatible = "fsl,imx_busfreq", },
+ { /*sentinel */}
+};
+
+static struct platform_driver busfreq_driver = {
+ .driver = {
+ .name = "imx_busfreq",
+ .owner = THIS_MODULE,
+ .of_match_table = imx_busfreq_ids,
+ },
+ .probe = busfreq_probe,
+};
+
+/*!
+ * Initialise the busfreq_driver.
+ *
+ * @return The function always returns 0.
+ */
+static int __init busfreq_init(void)
+{
+ if (platform_driver_register(&busfreq_driver) != 0)
+ return -ENODEV;
+
+ printk(KERN_INFO "Bus freq driver module loaded\n");
+
+ return 0;
+}
+
+static void __exit busfreq_cleanup(void)
+{
+ sysfs_remove_file(&busfreq_dev->kobj, &dev_attr_enable.attr);
+
+ /* Unregister the device structure */
+ platform_driver_unregister(&busfreq_driver);
+ bus_freq_scaling_initialized = 0;
+}
+
+module_init(busfreq_init);
+module_exit(busfreq_cleanup);
+
+MODULE_AUTHOR("NXP Semiconductor, Inc.");
+MODULE_DESCRIPTION("Busfreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 90a8b2c0676f..09a170256c44 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -39,6 +39,11 @@
#define PGC_DOMAIN_FLAG_NO_PD BIT(0)
+#define GPC_PGC_DOMAIN_ARM 0
+#define GPC_PGC_DOMAIN_PU 1
+#define GPC_PGC_DOMAIN_DISPLAY 2
+#define GPC_PGC_DOMAIN_PCI 3
+
struct imx_pm_domain {
struct generic_pm_domain base;
struct regmap *regmap;
@@ -176,6 +181,8 @@ static int imx_pgc_parse_dt(struct device *dev, struct imx_pm_domain *domain)
return imx_pgc_get_clocks(dev, domain);
}
+static void imx_gpc_handle_ldobypass(struct platform_device *pdev);
+
static int imx_pgc_power_domain_probe(struct platform_device *pdev)
{
struct imx_pm_domain *domain = pdev->dev.platform_data;
@@ -202,6 +209,10 @@ static int imx_pgc_power_domain_probe(struct platform_device *pdev)
device_link_add(dev, dev->parent, DL_FLAG_AUTOREMOVE_CONSUMER);
+ /* Mark PU regulator as bypass */
+ if (pdev->id == GPC_PGC_DOMAIN_PU)
+ imx_gpc_handle_ldobypass(pdev);
+
return 0;
genpd_err:
@@ -239,11 +250,6 @@ static struct platform_driver imx_pgc_power_domain_driver = {
};
builtin_platform_driver(imx_pgc_power_domain_driver)
-#define GPC_PGC_DOMAIN_ARM 0
-#define GPC_PGC_DOMAIN_PU 1
-#define GPC_PGC_DOMAIN_DISPLAY 2
-#define GPC_PGC_DOMAIN_PCI 3
-
static struct genpd_power_state imx6_pm_domain_pu_state = {
.power_off_latency_ns = 25000,
.power_on_latency_ns = 2000000,
@@ -401,6 +407,22 @@ clk_err:
return ret;
}
+static void imx_gpc_handle_ldobypass(struct platform_device *pdev)
+{
+ struct imx_pm_domain *domain = pdev->dev.platform_data;
+ struct regulator *pu_reg = domain->supply;
+ u32 bypass = 0;
+ int ret;
+
+ ret = of_property_read_u32(pdev->dev.parent->of_node, "fsl,ldo-bypass", &bypass);
+ if (ret && ret != -EINVAL)
+ dev_warn(pdev->dev.parent, "failed to read fsl,ldo-bypass property: %d\n", ret);
+
+ /* We only bypass pu since arm and soc has been set in u-boot */
+ if (pu_reg && bypass)
+ regulator_allow_bypass(pu_reg, true);
+}
+
static int imx_gpc_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
@@ -455,6 +477,8 @@ static int imx_gpc_probe(struct platform_device *pdev)
of_id_data->num_domains);
if (ret)
return ret;
+
+ imx_gpc_handle_ldobypass(pdev);
} else {
struct imx_pm_domain *domain;
struct platform_device *pd_pdev;
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index 4dc3a3f73511..db7e7fc321b1 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -12,15 +12,11 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_domain.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
-#include <linux/reset.h>
#include <linux/sizes.h>
#include <dt-bindings/power/imx7-power.h>
#include <dt-bindings/power/imx8mq-power.h>
-#include <dt-bindings/power/imx8mm-power.h>
-#include <dt-bindings/power/imx8mn-power.h>
#define GPC_LPCR_A_CORE_BSC 0x000
@@ -46,25 +42,6 @@
#define IMX8M_PCIE1_A53_DOMAIN BIT(3)
#define IMX8M_MIPI_A53_DOMAIN BIT(2)
-#define IMX8MM_VPUH1_A53_DOMAIN BIT(15)
-#define IMX8MM_VPUG2_A53_DOMAIN BIT(14)
-#define IMX8MM_VPUG1_A53_DOMAIN BIT(13)
-#define IMX8MM_DISPMIX_A53_DOMAIN BIT(12)
-#define IMX8MM_VPUMIX_A53_DOMAIN BIT(10)
-#define IMX8MM_GPUMIX_A53_DOMAIN BIT(9)
-#define IMX8MM_GPU_A53_DOMAIN (BIT(8) | BIT(11))
-#define IMX8MM_DDR1_A53_DOMAIN BIT(7)
-#define IMX8MM_OTG2_A53_DOMAIN BIT(5)
-#define IMX8MM_OTG1_A53_DOMAIN BIT(4)
-#define IMX8MM_PCIE_A53_DOMAIN BIT(3)
-#define IMX8MM_MIPI_A53_DOMAIN BIT(2)
-
-#define IMX8MN_DISPMIX_A53_DOMAIN BIT(12)
-#define IMX8MN_GPUMIX_A53_DOMAIN BIT(9)
-#define IMX8MN_DDR1_A53_DOMAIN BIT(7)
-#define IMX8MN_OTG1_A53_DOMAIN BIT(4)
-#define IMX8MN_MIPI_A53_DOMAIN BIT(2)
-
#define GPC_PU_PGC_SW_PUP_REQ 0x0f8
#define GPC_PU_PGC_SW_PDN_REQ 0x104
@@ -88,55 +65,14 @@
#define IMX8M_PCIE1_SW_Pxx_REQ BIT(1)
#define IMX8M_MIPI_SW_Pxx_REQ BIT(0)
-#define IMX8MM_VPUH1_SW_Pxx_REQ BIT(13)
-#define IMX8MM_VPUG2_SW_Pxx_REQ BIT(12)
-#define IMX8MM_VPUG1_SW_Pxx_REQ BIT(11)
-#define IMX8MM_DISPMIX_SW_Pxx_REQ BIT(10)
-#define IMX8MM_VPUMIX_SW_Pxx_REQ BIT(8)
-#define IMX8MM_GPUMIX_SW_Pxx_REQ BIT(7)
-#define IMX8MM_GPU_SW_Pxx_REQ (BIT(6) | BIT(9))
-#define IMX8MM_DDR1_SW_Pxx_REQ BIT(5)
-#define IMX8MM_OTG2_SW_Pxx_REQ BIT(3)
-#define IMX8MM_OTG1_SW_Pxx_REQ BIT(2)
-#define IMX8MM_PCIE_SW_Pxx_REQ BIT(1)
-#define IMX8MM_MIPI_SW_Pxx_REQ BIT(0)
-
-#define IMX8MN_DISPMIX_SW_Pxx_REQ BIT(10)
-#define IMX8MN_GPUMIX_SW_Pxx_REQ BIT(7)
-#define IMX8MN_DDR1_SW_Pxx_REQ BIT(5)
-#define IMX8MN_OTG1_SW_Pxx_REQ BIT(2)
-#define IMX8MN_MIPI_SW_Pxx_REQ BIT(0)
-
#define GPC_M4_PU_PDN_FLG 0x1bc
#define GPC_PU_PWRHSK 0x1fc
-#define IMX8M_GPU_HSK_PWRDNACKN BIT(26)
-#define IMX8M_VPU_HSK_PWRDNACKN BIT(25)
-#define IMX8M_DISP_HSK_PWRDNACKN BIT(24)
#define IMX8M_GPU_HSK_PWRDNREQN BIT(6)
#define IMX8M_VPU_HSK_PWRDNREQN BIT(5)
#define IMX8M_DISP_HSK_PWRDNREQN BIT(4)
-
-#define IMX8MM_GPUMIX_HSK_PWRDNACKN BIT(29)
-#define IMX8MM_GPU_HSK_PWRDNACKN (BIT(27) | BIT(28))
-#define IMX8MM_VPUMIX_HSK_PWRDNACKN BIT(26)
-#define IMX8MM_DISPMIX_HSK_PWRDNACKN BIT(25)
-#define IMX8MM_HSIO_HSK_PWRDNACKN (BIT(23) | BIT(24))
-#define IMX8MM_GPUMIX_HSK_PWRDNREQN BIT(11)
-#define IMX8MM_GPU_HSK_PWRDNREQN (BIT(9) | BIT(10))
-#define IMX8MM_VPUMIX_HSK_PWRDNREQN BIT(8)
-#define IMX8MM_DISPMIX_HSK_PWRDNREQN BIT(7)
-#define IMX8MM_HSIO_HSK_PWRDNREQN (BIT(5) | BIT(6))
-
-#define IMX8MN_GPUMIX_HSK_PWRDNACKN (BIT(29) | BIT(27))
-#define IMX8MN_DISPMIX_HSK_PWRDNACKN BIT(25)
-#define IMX8MN_HSIO_HSK_PWRDNACKN BIT(23)
-#define IMX8MN_GPUMIX_HSK_PWRDNREQN (BIT(11) | BIT(9))
-#define IMX8MN_DISPMIX_HSK_PWRDNREQN BIT(7)
-#define IMX8MN_HSIO_HSK_PWRDNREQN BIT(5)
-
/*
* The PGC offset values in Reference Manual
* (Rev. 1, 01/2018 and the older ones) GPC chapter's
@@ -159,37 +95,18 @@
#define IMX8M_PGC_MIPI_CSI2 28
#define IMX8M_PGC_PCIE2 29
-#define IMX8MM_PGC_MIPI 16
-#define IMX8MM_PGC_PCIE 17
-#define IMX8MM_PGC_OTG1 18
-#define IMX8MM_PGC_OTG2 19
-#define IMX8MM_PGC_DDR1 21
-#define IMX8MM_PGC_GPU2D 22
-#define IMX8MM_PGC_GPUMIX 23
-#define IMX8MM_PGC_VPUMIX 24
-#define IMX8MM_PGC_GPU3D 25
-#define IMX8MM_PGC_DISPMIX 26
-#define IMX8MM_PGC_VPUG1 27
-#define IMX8MM_PGC_VPUG2 28
-#define IMX8MM_PGC_VPUH1 29
-
-#define IMX8MN_PGC_MIPI 16
-#define IMX8MN_PGC_OTG1 18
-#define IMX8MN_PGC_DDR1 21
-#define IMX8MN_PGC_GPUMIX 23
-#define IMX8MN_PGC_DISPMIX 26
-
#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
#define GPC_PGC_CTRL_PCR BIT(0)
+#define GPC_CLK_MAX 6
+
struct imx_pgc_domain {
struct generic_pm_domain genpd;
struct regmap *regmap;
struct regulator *regulator;
- struct reset_control *reset;
- struct clk_bulk_data *clks;
+ struct clk *clk[GPC_CLK_MAX];
int num_clks;
unsigned int pgc;
@@ -197,8 +114,7 @@ struct imx_pgc_domain {
const struct {
u32 pxx;
u32 map;
- u32 hskreq;
- u32 hskack;
+ u32 hsk;
} bits;
const int voltage;
@@ -211,175 +127,96 @@ struct imx_pgc_domain_data {
const struct regmap_access_table *reg_access_table;
};
-static inline struct imx_pgc_domain *
-to_imx_pgc_domain(struct generic_pm_domain *genpd)
+static int imx_gpc_pu_pgc_sw_pxx_req(struct generic_pm_domain *genpd,
+ bool on)
{
- return container_of(genpd, struct imx_pgc_domain, genpd);
-}
-
-static int imx_pgc_power_up(struct generic_pm_domain *genpd)
-{
- struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
- u32 reg_val;
- int ret;
-
- ret = pm_runtime_get_sync(domain->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(domain->dev);
- return ret;
- }
-
- if (!IS_ERR(domain->regulator)) {
+ struct imx_pgc_domain *domain = container_of(genpd,
+ struct imx_pgc_domain,
+ genpd);
+ unsigned int offset = on ?
+ GPC_PU_PGC_SW_PUP_REQ : GPC_PU_PGC_SW_PDN_REQ;
+ const bool enable_power_control = !on;
+ const bool has_regulator = !IS_ERR(domain->regulator);
+ int i, ret = 0;
+ u32 pxx_req;
+
+ regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ domain->bits.map, domain->bits.map);
+
+ if (has_regulator && on) {
ret = regulator_enable(domain->regulator);
if (ret) {
dev_err(domain->dev, "failed to enable regulator\n");
- goto out_put_pm;
+ goto unmap;
}
}
- reset_control_assert(domain->reset);
-
/* Enable reset clocks for all devices in the domain */
- ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
- if (ret) {
- dev_err(domain->dev, "failed to enable reset clocks\n");
- goto out_regulator_disable;
- }
-
- /* delays for reset to propagate */
- udelay(5);
-
- if (domain->bits.pxx) {
- /* request the domain to power up */
- regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PUP_REQ,
- domain->bits.pxx, domain->bits.pxx);
- /*
- * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
- * for PUP_REQ/PDN_REQ bit to be cleared
- */
- ret = regmap_read_poll_timeout(domain->regmap,
- GPC_PU_PGC_SW_PUP_REQ, reg_val,
- !(reg_val & domain->bits.pxx),
- 0, USEC_PER_MSEC);
- if (ret) {
- dev_err(domain->dev, "failed to command PGC\n");
- goto out_clk_disable;
- }
-
- /* disable power control */
- regmap_clear_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
- GPC_PGC_CTRL_PCR);
- }
+ for (i = 0; i < domain->num_clks; i++)
+ clk_prepare_enable(domain->clk[i]);
- /* delay for reset to propagate */
- udelay(5);
-
- reset_control_deassert(domain->reset);
+ if (enable_power_control)
+ regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
+ GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
- /* request the ADB400 to power up */
- if (domain->bits.hskreq) {
+ if (domain->bits.hsk)
regmap_update_bits(domain->regmap, GPC_PU_PWRHSK,
- domain->bits.hskreq, domain->bits.hskreq);
-
+ domain->bits.hsk, on ? domain->bits.hsk : 0);
+
+ regmap_update_bits(domain->regmap, offset,
+ domain->bits.pxx, domain->bits.pxx);
+
+ /*
+ * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
+ * for PUP_REQ/PDN_REQ bit to be cleared
+ */
+ ret = regmap_read_poll_timeout(domain->regmap, offset, pxx_req,
+ !(pxx_req & domain->bits.pxx),
+ 0, USEC_PER_MSEC);
+ if (ret) {
+ dev_err(domain->dev, "failed to command PGC\n");
/*
- * ret = regmap_read_poll_timeout(domain->regmap, GPC_PU_PWRHSK, reg_val,
- * (reg_val & domain->bits.hskack), 0,
- * USEC_PER_MSEC);
- * Technically we need the commented code to wait handshake. But that needs
- * the BLK-CTL module BUS clk-en bit being set.
- *
- * There is a separate BLK-CTL module and we will have such a driver for it,
- * that driver will set the BUS clk-en bit and handshake will be triggered
- * automatically there. Just add a delay and suppose the handshake finish
- * after that.
+ * If we were in a process of enabling a
+ * domain and failed we might as well disable
+ * the regulator we just enabled. And if it
+ * was the opposite situation and we failed to
+ * power down -- keep the regulator on
*/
+ on = !on;
}
- /* Disable reset clocks for all devices in the domain */
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
-
- return 0;
-
-out_clk_disable:
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
-out_regulator_disable:
- if (!IS_ERR(domain->regulator))
- regulator_disable(domain->regulator);
-out_put_pm:
- pm_runtime_put(domain->dev);
-
- return ret;
-}
-
-static int imx_pgc_power_down(struct generic_pm_domain *genpd)
-{
- struct imx_pgc_domain *domain = to_imx_pgc_domain(genpd);
- u32 reg_val;
- int ret;
-
- /* Enable reset clocks for all devices in the domain */
- ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
- if (ret) {
- dev_err(domain->dev, "failed to enable reset clocks\n");
- return ret;
- }
-
- /* request the ADB400 to power down */
- if (domain->bits.hskreq) {
- regmap_clear_bits(domain->regmap, GPC_PU_PWRHSK,
- domain->bits.hskreq);
-
- ret = regmap_read_poll_timeout(domain->regmap, GPC_PU_PWRHSK,
- reg_val,
- !(reg_val & domain->bits.hskack),
- 0, USEC_PER_MSEC);
- if (ret) {
- dev_err(domain->dev, "failed to power down ADB400\n");
- goto out_clk_disable;
- }
- }
-
- if (domain->bits.pxx) {
- /* enable power control */
+ if (enable_power_control)
regmap_update_bits(domain->regmap, GPC_PGC_CTRL(domain->pgc),
- GPC_PGC_CTRL_PCR, GPC_PGC_CTRL_PCR);
-
- /* request the domain to power down */
- regmap_update_bits(domain->regmap, GPC_PU_PGC_SW_PDN_REQ,
- domain->bits.pxx, domain->bits.pxx);
- /*
- * As per "5.5.9.4 Example Code 4" in IMX7DRM.pdf wait
- * for PUP_REQ/PDN_REQ bit to be cleared
- */
- ret = regmap_read_poll_timeout(domain->regmap,
- GPC_PU_PGC_SW_PDN_REQ, reg_val,
- !(reg_val & domain->bits.pxx),
- 0, USEC_PER_MSEC);
- if (ret) {
- dev_err(domain->dev, "failed to command PGC\n");
- goto out_clk_disable;
- }
- }
+ GPC_PGC_CTRL_PCR, 0);
/* Disable reset clocks for all devices in the domain */
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
-
- if (!IS_ERR(domain->regulator)) {
- ret = regulator_disable(domain->regulator);
- if (ret) {
- dev_err(domain->dev, "failed to disable regulator\n");
- return ret;
- }
+ for (i = 0; i < domain->num_clks; i++)
+ clk_disable_unprepare(domain->clk[i]);
+
+ if (has_regulator && !on) {
+ int err;
+
+ err = regulator_disable(domain->regulator);
+ if (err)
+ dev_err(domain->dev,
+ "failed to disable regulator: %d\n", err);
+ /* Preserve earlier error code */
+ ret = ret ?: err;
}
+unmap:
+ regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
+ domain->bits.map, 0);
+ return ret;
+}
- pm_runtime_put_sync_suspend(domain->dev);
-
- return 0;
-
-out_clk_disable:
- clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+static int imx_gpc_pu_pgc_sw_pup_req(struct generic_pm_domain *genpd)
+{
+ return imx_gpc_pu_pgc_sw_pxx_req(genpd, true);
+}
- return ret;
+static int imx_gpc_pu_pgc_sw_pdn_req(struct generic_pm_domain *genpd)
+{
+ return imx_gpc_pu_pgc_sw_pxx_req(genpd, false);
}
static const struct imx_pgc_domain imx7_pgc_domains[] = {
@@ -505,8 +342,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_GPU_SW_Pxx_REQ,
.map = IMX8M_GPU_A53_DOMAIN,
- .hskreq = IMX8M_GPU_HSK_PWRDNREQN,
- .hskack = IMX8M_GPU_HSK_PWRDNACKN,
+ .hsk = IMX8M_GPU_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_GPU,
},
@@ -518,8 +354,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_VPU_SW_Pxx_REQ,
.map = IMX8M_VPU_A53_DOMAIN,
- .hskreq = IMX8M_VPU_HSK_PWRDNREQN,
- .hskack = IMX8M_VPU_HSK_PWRDNACKN,
+ .hsk = IMX8M_VPU_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_VPU,
},
@@ -531,8 +366,7 @@ static const struct imx_pgc_domain imx8m_pgc_domains[] = {
.bits = {
.pxx = IMX8M_DISP_SW_Pxx_REQ,
.map = IMX8M_DISP_A53_DOMAIN,
- .hskreq = IMX8M_DISP_HSK_PWRDNREQN,
- .hskack = IMX8M_DISP_HSK_PWRDNACKN,
+ .hsk = IMX8M_DISP_HSK_PWRDNREQN,
},
.pgc = IMX8M_PGC_DISP,
},
@@ -609,254 +443,40 @@ static const struct imx_pgc_domain_data imx8m_pgc_domain_data = {
.reg_access_table = &imx8m_access_table,
};
-static const struct imx_pgc_domain imx8mm_pgc_domains[] = {
- [IMX8MM_POWER_DOMAIN_HSIOMIX] = {
- .genpd = {
- .name = "hsiomix",
- },
- .bits = {
- .pxx = 0, /* no power sequence control */
- .map = 0, /* no power sequence control */
- .hskreq = IMX8MM_HSIO_HSK_PWRDNREQN,
- .hskack = IMX8MM_HSIO_HSK_PWRDNACKN,
- },
- },
-
- [IMX8MM_POWER_DOMAIN_PCIE] = {
- .genpd = {
- .name = "pcie",
- },
- .bits = {
- .pxx = IMX8MM_PCIE_SW_Pxx_REQ,
- .map = IMX8MM_PCIE_A53_DOMAIN,
- },
- .pgc = IMX8MM_PGC_PCIE,
- },
-
- [IMX8MM_POWER_DOMAIN_OTG1] = {
- .genpd = {
- .name = "usb-otg1",
- },
- .bits = {
- .pxx = IMX8MM_OTG1_SW_Pxx_REQ,
- .map = IMX8MM_OTG1_A53_DOMAIN,
- },
- .pgc = IMX8MM_PGC_OTG1,
- },
-
- [IMX8MM_POWER_DOMAIN_OTG2] = {
- .genpd = {
- .name = "usb-otg2",
- },
- .bits = {
- .pxx = IMX8MM_OTG2_SW_Pxx_REQ,
- .map = IMX8MM_OTG2_A53_DOMAIN,
- },
- .pgc = IMX8MM_PGC_OTG2,
- },
-
- [IMX8MM_POWER_DOMAIN_GPUMIX] = {
- .genpd = {
- .name = "gpumix",
- },
- .bits = {
- .pxx = IMX8MM_GPUMIX_SW_Pxx_REQ,
- .map = IMX8MM_GPUMIX_A53_DOMAIN,
- .hskreq = IMX8MM_GPUMIX_HSK_PWRDNREQN,
- .hskack = IMX8MM_GPUMIX_HSK_PWRDNACKN,
- },
- .pgc = IMX8MM_PGC_GPUMIX,
- },
-
- [IMX8MM_POWER_DOMAIN_GPU] = {
- .genpd = {
- .name = "gpu",
- },
- .bits = {
- .pxx = IMX8MM_GPU_SW_Pxx_REQ,
- .map = IMX8MM_GPU_A53_DOMAIN,
- .hskreq = IMX8MM_GPU_HSK_PWRDNREQN,
- .hskack = IMX8MM_GPU_HSK_PWRDNACKN,
- },
- .pgc = IMX8MM_PGC_GPU2D,
- },
-
- [IMX8MM_POWER_DOMAIN_VPUMIX] = {
- .genpd = {
- .name = "vpumix",
- },
- .bits = {
- .pxx = IMX8MM_VPUMIX_SW_Pxx_REQ,
- .map = IMX8MM_VPUMIX_A53_DOMAIN,
- .hskreq = IMX8MM_VPUMIX_HSK_PWRDNREQN,
- .hskack = IMX8MM_VPUMIX_HSK_PWRDNACKN,
- },
- .pgc = IMX8MM_PGC_VPUMIX,
- },
-
- [IMX8MM_POWER_DOMAIN_VPUG1] = {
- .genpd = {
- .name = "vpu-g1",
- },
- .bits = {
- .pxx = IMX8MM_VPUG1_SW_Pxx_REQ,
- .map = IMX8MM_VPUG1_A53_DOMAIN,
- },
- .pgc = IMX8MM_PGC_VPUG1,
- },
-
- [IMX8MM_POWER_DOMAIN_VPUG2] = {
- .genpd = {
- .name = "vpu-g2",
- },
- .bits = {
- .pxx = IMX8MM_VPUG2_SW_Pxx_REQ,
- .map = IMX8MM_VPUG2_A53_DOMAIN,
- },
- .pgc = IMX8MM_PGC_VPUG2,
- },
-
- [IMX8MM_POWER_DOMAIN_VPUH1] = {
- .genpd = {
- .name = "vpu-h1",
- },
- .bits = {
- .pxx = IMX8MM_VPUH1_SW_Pxx_REQ,
- .map = IMX8MM_VPUH1_A53_DOMAIN,
- },
- .pgc = IMX8MM_PGC_VPUH1,
- },
-
- [IMX8MM_POWER_DOMAIN_DISPMIX] = {
- .genpd = {
- .name = "dispmix",
- },
- .bits = {
- .pxx = IMX8MM_DISPMIX_SW_Pxx_REQ,
- .map = IMX8MM_DISPMIX_A53_DOMAIN,
- .hskreq = IMX8MM_DISPMIX_HSK_PWRDNREQN,
- .hskack = IMX8MM_DISPMIX_HSK_PWRDNACKN,
- },
- .pgc = IMX8MM_PGC_DISPMIX,
- },
-
- [IMX8MM_POWER_DOMAIN_MIPI] = {
- .genpd = {
- .name = "mipi",
- },
- .bits = {
- .pxx = IMX8MM_MIPI_SW_Pxx_REQ,
- .map = IMX8MM_MIPI_A53_DOMAIN,
- },
- .pgc = IMX8MM_PGC_MIPI,
- },
-};
-
-static const struct regmap_range imx8mm_yes_ranges[] = {
- regmap_reg_range(GPC_LPCR_A_CORE_BSC,
- GPC_PU_PWRHSK),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_MIPI),
- GPC_PGC_SR(IMX8MM_PGC_MIPI)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_PCIE),
- GPC_PGC_SR(IMX8MM_PGC_PCIE)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_OTG1),
- GPC_PGC_SR(IMX8MM_PGC_OTG1)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_OTG2),
- GPC_PGC_SR(IMX8MM_PGC_OTG2)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_DDR1),
- GPC_PGC_SR(IMX8MM_PGC_DDR1)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_GPU2D),
- GPC_PGC_SR(IMX8MM_PGC_GPU2D)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_GPUMIX),
- GPC_PGC_SR(IMX8MM_PGC_GPUMIX)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUMIX),
- GPC_PGC_SR(IMX8MM_PGC_VPUMIX)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_GPU3D),
- GPC_PGC_SR(IMX8MM_PGC_GPU3D)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_DISPMIX),
- GPC_PGC_SR(IMX8MM_PGC_DISPMIX)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUG1),
- GPC_PGC_SR(IMX8MM_PGC_VPUG1)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUG2),
- GPC_PGC_SR(IMX8MM_PGC_VPUG2)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MM_PGC_VPUH1),
- GPC_PGC_SR(IMX8MM_PGC_VPUH1)),
-};
-
-static const struct regmap_access_table imx8mm_access_table = {
- .yes_ranges = imx8mm_yes_ranges,
- .n_yes_ranges = ARRAY_SIZE(imx8mm_yes_ranges),
-};
-
-static const struct imx_pgc_domain_data imx8mm_pgc_domain_data = {
- .domains = imx8mm_pgc_domains,
- .domains_num = ARRAY_SIZE(imx8mm_pgc_domains),
- .reg_access_table = &imx8mm_access_table,
-};
-
-static const struct imx_pgc_domain imx8mn_pgc_domains[] = {
- [IMX8MN_POWER_DOMAIN_HSIOMIX] = {
- .genpd = {
- .name = "hsiomix",
- },
- .bits = {
- .pxx = 0, /* no power sequence control */
- .map = 0, /* no power sequence control */
- .hskreq = IMX8MN_HSIO_HSK_PWRDNREQN,
- .hskack = IMX8MN_HSIO_HSK_PWRDNACKN,
- },
- },
+static int imx_pgc_get_clocks(struct imx_pgc_domain *domain)
+{
+ int i, ret;
+
+ for (i = 0; ; i++) {
+ struct clk *clk = of_clk_get(domain->dev->of_node, i);
+ if (IS_ERR(clk))
+ break;
+ if (i >= GPC_CLK_MAX) {
+ dev_err(domain->dev, "more than %d clocks\n",
+ GPC_CLK_MAX);
+ ret = -EINVAL;
+ goto clk_err;
+ }
+ domain->clk[i] = clk;
+ }
+ domain->num_clks = i;
- [IMX8MN_POWER_DOMAIN_OTG1] = {
- .genpd = {
- .name = "usb-otg1",
- },
- .bits = {
- .pxx = IMX8MN_OTG1_SW_Pxx_REQ,
- .map = IMX8MN_OTG1_A53_DOMAIN,
- },
- .pgc = IMX8MN_PGC_OTG1,
- },
+ return 0;
- [IMX8MN_POWER_DOMAIN_GPUMIX] = {
- .genpd = {
- .name = "gpumix",
- },
- .bits = {
- .pxx = IMX8MN_GPUMIX_SW_Pxx_REQ,
- .map = IMX8MN_GPUMIX_A53_DOMAIN,
- .hskreq = IMX8MN_GPUMIX_HSK_PWRDNREQN,
- .hskack = IMX8MN_GPUMIX_HSK_PWRDNACKN,
- },
- .pgc = IMX8MN_PGC_GPUMIX,
- },
-};
+clk_err:
+ while (i--)
+ clk_put(domain->clk[i]);
-static const struct regmap_range imx8mn_yes_ranges[] = {
- regmap_reg_range(GPC_LPCR_A_CORE_BSC,
- GPC_PU_PWRHSK),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_MIPI),
- GPC_PGC_SR(IMX8MN_PGC_MIPI)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_OTG1),
- GPC_PGC_SR(IMX8MN_PGC_OTG1)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_DDR1),
- GPC_PGC_SR(IMX8MN_PGC_DDR1)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_GPUMIX),
- GPC_PGC_SR(IMX8MN_PGC_GPUMIX)),
- regmap_reg_range(GPC_PGC_CTRL(IMX8MN_PGC_DISPMIX),
- GPC_PGC_SR(IMX8MN_PGC_DISPMIX)),
-};
+ return ret;
+}
-static const struct regmap_access_table imx8mn_access_table = {
- .yes_ranges = imx8mn_yes_ranges,
- .n_yes_ranges = ARRAY_SIZE(imx8mn_yes_ranges),
-};
+static void imx_pgc_put_clocks(struct imx_pgc_domain *domain)
+{
+ int i;
-static const struct imx_pgc_domain_data imx8mn_pgc_domain_data = {
- .domains = imx8mn_pgc_domains,
- .domains_num = ARRAY_SIZE(imx8mn_pgc_domains),
- .reg_access_table = &imx8mn_access_table,
-};
+ for (i = domain->num_clks - 1; i >= 0; i--)
+ clk_put(domain->clk[i]);
+}
static int imx_pgc_domain_probe(struct platform_device *pdev)
{
@@ -875,45 +495,25 @@ static int imx_pgc_domain_probe(struct platform_device *pdev)
domain->voltage, domain->voltage);
}
- domain->num_clks = devm_clk_bulk_get_all(domain->dev, &domain->clks);
- if (domain->num_clks < 0)
- return dev_err_probe(domain->dev, domain->num_clks,
- "Failed to get domain's clocks\n");
-
- domain->reset = devm_reset_control_array_get_optional_exclusive(domain->dev);
- if (IS_ERR(domain->reset))
- return dev_err_probe(domain->dev, PTR_ERR(domain->reset),
- "Failed to get domain's resets\n");
-
- pm_runtime_enable(domain->dev);
-
- if (domain->bits.map)
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
- domain->bits.map, domain->bits.map);
+ ret = imx_pgc_get_clocks(domain);
+ if (ret)
+ return dev_err_probe(domain->dev, ret, "Failed to get domain's clocks\n");
ret = pm_genpd_init(&domain->genpd, NULL, true);
if (ret) {
dev_err(domain->dev, "Failed to init power domain\n");
- goto out_domain_unmap;
+ imx_pgc_put_clocks(domain);
+ return ret;
}
ret = of_genpd_add_provider_simple(domain->dev->of_node,
&domain->genpd);
if (ret) {
dev_err(domain->dev, "Failed to add genpd provider\n");
- goto out_genpd_remove;
+ pm_genpd_remove(&domain->genpd);
+ imx_pgc_put_clocks(domain);
}
- return 0;
-
-out_genpd_remove:
- pm_genpd_remove(&domain->genpd);
-out_domain_unmap:
- if (domain->bits.map)
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
- domain->bits.map, 0);
- pm_runtime_disable(domain->dev);
-
return ret;
}
@@ -923,12 +523,7 @@ static int imx_pgc_domain_remove(struct platform_device *pdev)
of_genpd_del_provider(domain->dev->of_node);
pm_genpd_remove(&domain->genpd);
-
- if (domain->bits.map)
- regmap_update_bits(domain->regmap, GPC_PGC_CPU_MAPPING,
- domain->bits.map, 0);
-
- pm_runtime_disable(domain->dev);
+ imx_pgc_put_clocks(domain);
return 0;
}
@@ -1022,8 +617,8 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
domain = pd_pdev->dev.platform_data;
domain->regmap = regmap;
- domain->genpd.power_on = imx_pgc_power_up;
- domain->genpd.power_off = imx_pgc_power_down;
+ domain->genpd.power_on = imx_gpc_pu_pgc_sw_pup_req;
+ domain->genpd.power_off = imx_gpc_pu_pgc_sw_pdn_req;
pd_pdev->dev.parent = dev;
pd_pdev->dev.of_node = np;
@@ -1041,8 +636,6 @@ static int imx_gpcv2_probe(struct platform_device *pdev)
static const struct of_device_id imx_gpcv2_dt_ids[] = {
{ .compatible = "fsl,imx7d-gpc", .data = &imx7_pgc_domain_data, },
- { .compatible = "fsl,imx8mm-gpc", .data = &imx8mm_pgc_domain_data, },
- { .compatible = "fsl,imx8mn-gpc", .data = &imx8mn_pgc_domain_data, },
{ .compatible = "fsl,imx8mq-gpc", .data = &imx8m_pgc_domain_data, },
{ }
};
diff --git a/drivers/soc/imx/imx8m_pm_domains.c b/drivers/soc/imx/imx8m_pm_domains.c
new file mode 100644
index 000000000000..d0af2ddb14d9
--- /dev/null
+++ b/drivers/soc/imx/imx8m_pm_domains.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/regulator/consumer.h>
+
+#include <soc/imx/imx_sip.h>
+
+#define MAX_CLK_NUM 6
+#define to_imx8m_pm_domain(_genpd) container_of(_genpd, struct imx8m_pm_domain, pd)
+
+
+struct imx8m_pm_domain {
+ struct device *dev;
+ struct generic_pm_domain pd;
+ u32 domain_index;
+ struct clk *clk[MAX_CLK_NUM];
+ unsigned int num_clks;
+ struct regulator *reg;
+};
+
+enum imx8m_pm_domain_state {
+ PD_STATE_OFF,
+ PD_STATE_ON,
+};
+
+static DEFINE_MUTEX(gpc_pd_mutex);
+
+static int imx8m_pd_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx8m_pm_domain *domain = to_imx8m_pm_domain(genpd);
+ struct arm_smccc_res res;
+ int index, ret = 0;
+
+ /* power on the external supply */
+ if (!IS_ERR(domain->reg)) {
+ ret = regulator_enable(domain->reg);
+ if (ret) {
+ dev_warn(domain->dev, "failed to power up the reg%d\n", ret);
+ return ret;
+ }
+ }
+
+ /* enable the necessary clks needed by the power domain */
+ if (domain->num_clks) {
+ for (index = 0; index < domain->num_clks; index++)
+ clk_prepare_enable(domain->clk[index]);
+ }
+
+ mutex_lock(&gpc_pd_mutex);
+ arm_smccc_smc(IMX_SIP_GPC, IMX_SIP_CONFIG_GPC_PM_DOMAIN, domain->domain_index,
+ PD_STATE_ON, 0, 0, 0, 0, &res);
+ mutex_unlock(&gpc_pd_mutex);
+
+ return 0;
+}
+
+static int imx8m_pd_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx8m_pm_domain *domain = to_imx8m_pm_domain(genpd);
+ struct arm_smccc_res res;
+ int index, ret = 0;
+
+ mutex_lock(&gpc_pd_mutex);
+ arm_smccc_smc(IMX_SIP_GPC, IMX_SIP_CONFIG_GPC_PM_DOMAIN, domain->domain_index,
+ PD_STATE_OFF, 0, 0, 0, 0, &res);
+ mutex_unlock(&gpc_pd_mutex);
+
+ /* power off the external supply */
+ if (!IS_ERR(domain->reg)) {
+ ret = regulator_disable(domain->reg);
+ if (ret) {
+ dev_warn(domain->dev, "failed to power off the reg%d\n", ret);
+ return ret;
+ }
+ }
+
+ /* disable clks when power domain is off */
+ if (domain->num_clks) {
+ for (index = 0; index < domain->num_clks; index++)
+ clk_disable_unprepare(domain->clk[index]);
+ }
+
+ return ret;
+};
+
+static int imx8m_pd_get_clocks(struct imx8m_pm_domain *domain)
+{
+ int i, ret;
+
+ if (domain->pd.flags & GENPD_FLAG_PM_PD_CLK)
+ return 0;
+
+ for (i = 0; ; i++) {
+ struct clk *clk = of_clk_get(domain->dev->of_node, i);
+ if (IS_ERR(clk))
+ break;
+ if (i >= MAX_CLK_NUM) {
+ dev_err(domain->dev, "more than %d clocks\n",
+ MAX_CLK_NUM);
+ ret = -EINVAL;
+ goto clk_err;
+ }
+ domain->clk[i] = clk;
+ }
+ domain->num_clks = i;
+
+ return 0;
+
+clk_err:
+ while (i--)
+ clk_put(domain->clk[i]);
+
+ return ret;
+}
+
+static void imx8m_pd_put_clocks(struct imx8m_pm_domain *domain)
+{
+ int i;
+
+ if (domain->pd.flags & GENPD_FLAG_PM_PD_CLK)
+ return;
+
+ for (i = domain->num_clks - 1; i >= 0; i--)
+ clk_put(domain->clk[i]);
+}
+
+static const struct of_device_id imx8m_pm_domain_ids[] = {
+ {.compatible = "fsl,imx8m-pm-domain"},
+ {},
+};
+
+static int imx8m_pm_domain_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct imx8m_pm_domain *domain;
+ struct of_phandle_args parent, child;
+ int ret;
+
+ domain = devm_kzalloc(dev, sizeof(*domain), GFP_KERNEL);
+ if (!domain)
+ return -ENOMEM;
+
+ child.np = np;
+ domain->dev = dev;
+
+ ret = of_property_read_string(np, "domain-name", &domain->pd.name);
+ if (ret) {
+ dev_err(dev, "failed to get the domain name\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(np, "domain-index", &domain->domain_index);
+ if (ret) {
+ dev_err(dev, "failed to get the domain index\n");
+ return -EINVAL;
+ }
+
+ domain->reg = devm_regulator_get_optional(dev, "power");
+ if (IS_ERR(domain->reg)) {
+ if (PTR_ERR(domain->reg) != -ENODEV) {
+ if (PTR_ERR(domain->reg) != -EPROBE_DEFER)
+ dev_err(dev, "failed to get domain's regulator\n");
+ return PTR_ERR(domain->reg);
+ }
+ }
+
+ if (of_machine_is_compatible("fsl,imx8mp"))
+ domain->pd.flags |= GENPD_FLAG_PM_PD_CLK;
+
+ ret = imx8m_pd_get_clocks(domain);
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to get domain's clocks\n");
+ return ret;
+ }
+
+ domain->pd.power_off = imx8m_pd_power_off;
+ domain->pd.power_on = imx8m_pd_power_on;
+ if (of_property_read_bool(np, "active-wakeup"))
+ domain->pd.flags |= GENPD_FLAG_ACTIVE_WAKEUP;
+ if (of_property_read_bool(np, "rpm-always-on"))
+ domain->pd.flags |= GENPD_FLAG_RPM_ALWAYS_ON;
+
+ pm_genpd_init(&domain->pd, NULL, !(domain->pd.flags & GENPD_FLAG_RPM_ALWAYS_ON));
+
+ ret = pm_genpd_of_add_clks(&domain->pd, dev);
+ if (ret) {
+ pm_genpd_remove(&domain->pd);
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_simple(np, &domain->pd);
+ if (ret) {
+ dev_err(dev, "failed to add the domain provider\n");
+ pm_genpd_remove(&domain->pd);
+ imx8m_pd_put_clocks(domain);
+ return ret;
+ }
+
+ /* add it as subdomain if necessary */
+ if (!of_parse_phandle_with_args(np, "parent-domains",
+ "#power-domain-cells", 0, &parent)) {
+ ret = of_genpd_add_subdomain(&parent, &child);
+ of_node_put(parent.np);
+
+ if (ret < 0) {
+ dev_dbg(dev, "failed to add the subdomain: %s: %d",
+ domain->pd.name, ret);
+ of_genpd_del_provider(np);
+ pm_genpd_remove(&domain->pd);
+ imx8m_pd_put_clocks(domain);
+ return -EPROBE_DEFER;
+ }
+ }
+
+ return 0;
+}
+
+static struct platform_driver imx8m_pm_domain_driver = {
+ .driver = {
+ .name = "imx8m_pm_domain",
+ .owner = THIS_MODULE,
+ .of_match_table = imx8m_pm_domain_ids,
+ },
+ .probe = imx8m_pm_domain_probe,
+};
+module_platform_driver(imx8m_pm_domain_driver);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP i.MX8M power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/imx/imx8ulp_lpm.c b/drivers/soc/imx/imx8ulp_lpm.c
new file mode 100644
index 000000000000..09e2f7a0fe7a
--- /dev/null
+++ b/drivers/soc/imx/imx8ulp_lpm.c
@@ -0,0 +1,276 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2022 NXP
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/suspend.h>
+
+#define FSL_SIP_DDR_DVFS 0xc2000004
+#define DDR_DFS_GET_FSP_COUNT 0x10
+#define DDR_FSP_HIGH 2
+#define DDR_FSP_LOW 1
+#define DDR_DFS_FSP_NUM_MIN 3
+#define DDR_BYPASS_DRATE 400
+
+static struct clk *dram_sel;
+static struct clk *dram_div;
+static struct clk *pll4;
+static struct clk *frosc;
+static struct clk *spll2;
+static struct clk *a35_sel;
+static struct clk *nic_sel;
+static struct clk *lpav_axi_sel;
+static struct clk *nic_old_parent;
+static struct clk *a35_old_parent;
+static struct clk *lpav_old_parent;
+static struct clk *pll4;
+
+static bool lpm_enabled = false;
+static bool bypass_enabled = false;
+static bool sys_dvfs_enabled = false;
+static struct device *imx8ulp_lpm_dev;
+static int num_fsp;
+
+static int scaling_dram_freq(unsigned int fsp_index)
+{
+ struct arm_smccc_res res;
+ u32 num_cpus = num_online_cpus();
+
+ local_irq_disable();
+
+ /* need to check the return value ?*/
+ arm_smccc_smc(FSL_SIP_DDR_DVFS, fsp_index, num_cpus,
+ sys_dvfs_enabled, 0, 0, 0, 0, &res);
+
+ local_irq_enable();
+
+ /* Correct the clock tree & rate info as it has been updated in TF-A */
+ if (fsp_index == DDR_FSP_HIGH) {
+ clk_set_parent(dram_sel, pll4);
+ } else if (bypass_enabled) {
+ /* only need to correct the clock parent/child for bypass mode */
+ clk_set_parent(dram_sel, frosc);
+ }
+
+ clk_get_rate(dram_div);
+
+ return 0;
+}
+
+static void sys_freq_scaling(bool enter)
+{
+ int ret;
+ if (enter) {
+ if (sys_dvfs_enabled) {
+ /*scaling down APD side NIC frequency, switch to fro 192MHz */
+ nic_old_parent = clk_get_parent(nic_sel);
+ lpav_old_parent = clk_get_parent(lpav_axi_sel);
+ a35_old_parent = clk_get_parent(a35_sel);
+
+ ret = clk_set_parent(nic_sel, frosc);
+ if (ret)
+ pr_err("failed to change nic clock parent:%d\n", ret);
+
+ ret = clk_set_parent(lpav_axi_sel, frosc);
+ if (ret)
+ pr_err("failed to change lpav axi clock parent:%d\n", ret);
+
+ /*
+ * scaling down the A35 core frequency, switch to fro 192MHz,
+ * then, change SPLL2 frequency to 500MHz.
+ */
+ ret = clk_set_parent(a35_sel, frosc);
+ if (ret)
+ pr_err("failed to change a35 clock parent:%d\n", ret);
+
+ /* change SPLL2 to UD 500MHz */
+ ret = clk_set_rate(spll2, 500000000);
+ if (ret)
+ pr_err("failed to set spll2 frequency:%d\n", ret);
+
+ /* switch A35 from frosc to spll2 */
+ ret = clk_set_parent(a35_sel, a35_old_parent);
+ if (ret)
+ pr_err("failed to change a35 clock parent back:%d\n", ret);
+ }
+
+ /*
+ * scaling down the ddr frequency and change the BUCK3
+ * voltage to ND 1.0V if system level dvfs is enabled.
+ */
+ scaling_dram_freq(DDR_FSP_LOW);
+
+ pr_info("DDR enter low frequency mode\n");
+ } else {
+ /* prepare enable PLL4 first */
+ clk_prepare_enable(pll4);
+ /*
+ * exit LPM mode, increase the BUCK3 voltage to OD if system level
+ * dvfs enabled, scaling up the DDR frequency
+ */
+ scaling_dram_freq(DDR_FSP_HIGH);
+
+ if (sys_dvfs_enabled) {
+ ret = clk_set_parent(a35_sel, frosc);
+ if (ret)
+ pr_err("failed to change a35 clock parent:%d\n", ret);
+
+ /* change SPLL2 to OD 960MHz */
+ ret = clk_set_rate(spll2, 960000000);
+ if (ret)
+ pr_err("failed to set spll2 frequency:%d\n", ret);
+
+ /* switch A35 from frosc to spll2 */
+ ret = clk_set_parent(a35_sel, a35_old_parent);
+ if (ret)
+ pr_err("failed to change a35 clock parent back:%d\n", ret);
+
+ /* scaling up the NIC frequency */
+ ret = clk_set_parent(nic_sel, nic_old_parent);
+ if (ret)
+ pr_err("failed to change nic clock parent:%d\n", ret);
+
+ /* scaling up lpav axi frequency */
+ ret = clk_set_parent(lpav_axi_sel, lpav_old_parent);
+ if (ret)
+ pr_err("failed to change lpav axi clock parent:%d\n", ret);
+ }
+
+ /* unprepare pll4 after clock tree info is correct */
+ clk_disable_unprepare(pll4);
+
+ pr_info("DDR Exit from low frequency mode\n");
+ }
+}
+static ssize_t lpm_enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ if(lpm_enabled)
+ return sprintf(buf, "i.MX8ULP LPM mode enabled\n");
+ else
+ return sprintf(buf, "i.MX8ULP LPM mode disabled\n");
+}
+
+static ssize_t lpm_enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size)
+{
+ /*
+ * only support DDR DFS between PLL on and PLL bypass, so the valid
+ * num_fsp should be 3
+ */
+ if (num_fsp < DDR_DFS_FSP_NUM_MIN)
+ pr_info("DDR DFS only support with both F1 & F2 enabled\n");
+
+
+ if ((strncmp(buf, "1", 1) == 0) && !lpm_enabled) {
+ sys_freq_scaling(true);
+
+ lpm_enabled = true;
+ } else if (strncmp(buf, "0", 1) == 0) {
+ if (lpm_enabled)
+ sys_freq_scaling(false);
+
+ lpm_enabled = false;
+ }
+
+ return size;
+}
+static DEVICE_ATTR(enable, 0644, lpm_enable_show,
+ lpm_enable_store);
+
+static int imx8ulp_lpm_pm_notify(struct notifier_block *nb, unsigned long event,
+ void *dummy)
+{
+ /* if DDR is not in low frequency, return directly */
+ if (!lpm_enabled)
+ return NOTIFY_OK;
+
+ if (event == PM_SUSPEND_PREPARE)
+ sys_freq_scaling(false);
+ else if (event == PM_POST_SUSPEND)
+ sys_freq_scaling(true);
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block imx8ulp_lpm_pm_notifier = {
+ .notifier_call = imx8ulp_lpm_pm_notify,
+};
+
+/* sysfs for user control */
+static int imx8ulp_lpm_probe(struct platform_device *pdev)
+{
+ int err;
+ struct arm_smccc_res res;
+
+ imx8ulp_lpm_dev = &pdev->dev;
+
+ arm_smccc_smc(FSL_SIP_DDR_DVFS, DDR_DFS_GET_FSP_COUNT, 0,
+ 0, 0, 0, 0, 0, &res);
+ num_fsp = res.a0;
+ /* check F1 is bypass or not */
+ if (res.a1 <= DDR_BYPASS_DRATE)
+ bypass_enabled = true;
+
+ /* only support DFS for F1 & F2 both enabled */
+ if (num_fsp != DDR_DFS_FSP_NUM_MIN)
+ return -ENODEV;
+
+ /*
+ * check if system level dvfs is enabled, only when this is enabled,
+ * we can do system level frequency scaling and voltage change dynamically
+ */
+ sys_dvfs_enabled = of_property_read_bool(pdev->dev.of_node, "sys-dvfs-enabled");
+
+ /* get the necessary clocks */
+ dram_sel = devm_clk_get(&pdev->dev, "ddr_sel");
+ dram_div = devm_clk_get(&pdev->dev, "ddr_div");
+ pll4 = devm_clk_get(&pdev->dev, "pll4");
+ frosc = devm_clk_get(&pdev->dev, "frosc");
+ /* below clock is used for system level od/nd mode swithing */
+ nic_sel = devm_clk_get(&pdev->dev, "nic_sel");
+ a35_sel = devm_clk_get(&pdev->dev, "a35_sel");
+ spll2 = devm_clk_get(&pdev->dev, "spll2");
+ lpav_axi_sel = devm_clk_get(&pdev->dev, "lpav_axi_sel");
+ pll4 = devm_clk_get(&pdev->dev, "pll4");
+ if (IS_ERR(dram_sel) || IS_ERR(dram_div) || IS_ERR(pll4) || IS_ERR(frosc) ||
+ IS_ERR(nic_sel) || IS_ERR(a35_sel) || IS_ERR(spll2) || IS_ERR(lpav_axi_sel) ||
+ IS_ERR(pll4))
+ dev_err(&pdev->dev, "Get clocks failed\n");
+
+ /* create the sysfs file */
+ err = sysfs_create_file(&imx8ulp_lpm_dev->kobj, &dev_attr_enable.attr);
+ if (err) {
+ dev_err(&pdev->dev, "creating i.MX8ULP LPM control sys file\n");
+ return err;
+ }
+
+ register_pm_notifier(&imx8ulp_lpm_pm_notifier);
+
+ return 0;
+}
+
+static const struct of_device_id imx8ulp_lpm_ids[] = {
+ {.compatible = "nxp, imx8ulp-lpm", },
+ { /* sentinel */}
+};
+
+static struct platform_driver imx8ulp_lpm_driver = {
+ .driver = {
+ .name = "imx8ulp-lpm",
+ .owner = THIS_MODULE,
+ .of_match_table = imx8ulp_lpm_ids,
+ },
+ .probe = imx8ulp_lpm_probe,
+};
+module_platform_driver(imx8ulp_lpm_driver);
+
+MODULE_AUTHOR("NXP Semiconductor, Inc.");
+MODULE_DESCRIPTION("i.MX8ULP Low Power Control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/imx/imx93-blk-ctrl.c b/drivers/soc/imx/imx93-blk-ctrl.c
new file mode 100644
index 000000000000..a5fd9ab80d87
--- /dev/null
+++ b/drivers/soc/imx/imx93-blk-ctrl.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+/*
+ * Copyright 2022 NXP, Peng Fan <peng.fan@nxp.com>
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+
+#include <dt-bindings/power/imx93-power.h>
+
+#define BLK_SFT_RSTN 0x0
+#define BLK_CLK_EN 0x4
+
+#define BLK_MAX_CLKS 4
+
+#define LCDIF_QOS_REG 0xC
+#define LCDIF_DEFAULT_QOS_OFF 12
+#define LCDIF_CFG_QOS_OFF 8
+
+#define PXP_QOS_REG 0x10
+#define PXP_R_DEFAULT_QOS_OFF 28
+#define PXP_R_CFG_QOS_OFF 24
+#define PXP_W_DEFAULT_QOS_OFF 20
+#define PXP_W_CFG_QOS_OFF 16
+
+#define ISI_CACHE_REG 0x14
+
+#define ISI_QOS_REG 0x1C
+#define ISI_V_DEFAULT_QOS_OFF 28
+#define ISI_V_CFG_QOS_OFF 24
+#define ISI_U_DEFAULT_QOS_OFF 20
+#define ISI_U_CFG_QOS_OFF 16
+#define ISI_Y_R_DEFAULT_QOS_OFF 12
+#define ISI_Y_R_CFG_QOS_OFF 8
+#define ISI_Y_W_DEFAULT_QOS_OFF 4
+#define ISI_Y_W_CFG_QOS_OFF 0
+
+#define PRIO_MASK 0xF
+
+#define PRIO(X) (X)
+
+struct imx93_blk_ctrl_domain;
+
+struct imx93_blk_ctrl {
+ struct device *dev;
+ struct regmap *regmap;
+ int num_clks;
+ struct clk_bulk_data clks[BLK_MAX_CLKS];
+ struct imx93_blk_ctrl_domain *domains;
+ struct genpd_onecell_data onecell_data;
+};
+
+#define DOMAIN_MAX_QOS 4
+
+struct imx93_blk_ctrl_qos {
+ u32 reg;
+ u32 cfg_off;
+ u32 default_prio;
+ u32 cfg_prio;
+};
+
+struct imx93_blk_ctrl_domain_data {
+ const char *name;
+ const char * const *clk_names;
+ int num_clks;
+ u32 rst_mask;
+ u32 clk_mask;
+ u32 num_qos;
+ struct imx93_blk_ctrl_qos qos[DOMAIN_MAX_QOS];
+ const struct regmap_access_table *reg_access_table;
+};
+
+#define DOMAIN_MAX_CLKS 4
+
+struct imx93_blk_ctrl_domain {
+ struct generic_pm_domain genpd;
+ const struct imx93_blk_ctrl_domain_data *data;
+ struct clk_bulk_data clks[DOMAIN_MAX_CLKS];
+ struct imx93_blk_ctrl *bc;
+};
+
+struct imx93_blk_ctrl_data {
+ const struct imx93_blk_ctrl_domain_data *domains;
+ const struct imx93_blk_ctrl_domain_data *bus;
+ int num_domains;
+};
+
+static const struct regmap_range imx93_media_blk_ctl_yes_ranges[] = {
+ regmap_reg_range(BLK_SFT_RSTN, BLK_CLK_EN),
+ regmap_reg_range(LCDIF_QOS_REG, ISI_CACHE_REG),
+ regmap_reg_range(ISI_QOS_REG, ISI_QOS_REG),
+};
+
+static const struct regmap_access_table imx93_media_blk_ctl_access_table = {
+ .yes_ranges = imx93_media_blk_ctl_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(imx93_media_blk_ctl_yes_ranges),
+};
+
+static const struct imx93_blk_ctrl_domain_data imx93_media_blk_ctl_bus_data = {
+ .clk_names = (const char *[]){ "axi", "apb", "nic", },
+ .num_clks = 3,
+ .reg_access_table = &imx93_media_blk_ctl_access_table,
+};
+
+static inline struct imx93_blk_ctrl_domain *
+to_imx93_blk_ctrl_domain(struct generic_pm_domain *genpd)
+{
+ return container_of(genpd, struct imx93_blk_ctrl_domain, genpd);
+}
+
+static int imx93_blk_ctrl_set_qos(struct imx93_blk_ctrl_domain *domain)
+{
+ const struct imx93_blk_ctrl_domain_data *data = domain->data;
+ struct imx93_blk_ctrl *bc = domain->bc;
+ const struct imx93_blk_ctrl_qos *qos;
+ u32 val, mask;
+ int i;
+
+ for (i = 0; i < data->num_qos; i++) {
+ qos = &data->qos[i];
+
+ mask = PRIO_MASK << qos->cfg_off;
+ mask |= PRIO_MASK << (qos->cfg_off + 4);
+ val = qos->cfg_prio << qos->cfg_off;
+ val |= qos->default_prio << (qos->cfg_off + 4);
+
+ regmap_write_bits(bc->regmap, qos->reg, mask, val);
+
+ dev_dbg(bc->dev, "data->qos[i].reg 0x%x 0x%x\n", qos->reg, val);
+ }
+
+ return 0;
+}
+
+static int imx93_blk_ctrl_power_on(struct generic_pm_domain *genpd)
+{
+ struct imx93_blk_ctrl_domain *domain = to_imx93_blk_ctrl_domain(genpd);
+ const struct imx93_blk_ctrl_domain_data *data = domain->data;
+ struct imx93_blk_ctrl *bc = domain->bc;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(bc->num_clks, bc->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable bus clocks\n");
+ return ret;
+ }
+
+ ret = clk_bulk_prepare_enable(data->num_clks, domain->clks);
+ if (ret) {
+ dev_err(bc->dev, "failed to enable clocks\n");
+ return ret;
+ }
+
+ /* Make sure PM runtime is active */
+ pm_runtime_set_active(bc->dev);
+ ret = pm_runtime_get_sync(bc->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(bc->dev);
+ dev_err(bc->dev, "failed to power up domain\n");
+ goto disable_clk;
+ }
+
+ /* ungate clk */
+ regmap_clear_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ /* release reset */
+ regmap_set_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+
+ dev_dbg(bc->dev, "pd_on: name: %s\n", genpd->name);
+
+ return imx93_blk_ctrl_set_qos(domain);
+
+disable_clk:
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ return ret;
+}
+
+static int imx93_blk_ctrl_power_off(struct generic_pm_domain *genpd)
+{
+ struct imx93_blk_ctrl_domain *domain = to_imx93_blk_ctrl_domain(genpd);
+ const struct imx93_blk_ctrl_domain_data *data = domain->data;
+ struct imx93_blk_ctrl *bc = domain->bc;
+
+ dev_dbg(bc->dev, "pd_off: name: %s\n", genpd->name);
+
+ regmap_clear_bits(bc->regmap, BLK_SFT_RSTN, data->rst_mask);
+ regmap_set_bits(bc->regmap, BLK_CLK_EN, data->clk_mask);
+
+ pm_runtime_put(bc->dev);
+
+ clk_bulk_disable_unprepare(data->num_clks, domain->clks);
+
+ clk_bulk_disable_unprepare(bc->num_clks, bc->clks);
+
+ return 0;
+}
+
+static struct generic_pm_domain *
+imx93_blk_ctrl_xlate(struct of_phandle_args *args, void *data)
+{
+ struct genpd_onecell_data *onecell_data = data;
+ unsigned int index = args->args[0];
+
+ if (args->args_count != 1 ||
+ index >= onecell_data->num_domains)
+ return ERR_PTR(-EINVAL);
+
+ return onecell_data->domains[index];
+}
+
+static int imx93_blk_ctrl_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct imx93_blk_ctrl_data *bc_data = of_device_get_match_data(dev);
+ const struct imx93_blk_ctrl_domain_data *bus = bc_data->bus;
+ struct imx93_blk_ctrl *bc;
+ void __iomem *base;
+ int i, ret;
+
+ struct regmap_config regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .rd_table = bus->reg_access_table,
+ .wr_table = bus->reg_access_table,
+ .max_register = SZ_4K,
+ };
+
+ bc = devm_kzalloc(dev, sizeof(*bc), GFP_KERNEL);
+ if (!bc)
+ return -ENOMEM;
+
+ bc->dev = dev;
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ bc->regmap = regmap_init_mmio(NULL, base, &regmap_config);
+ if (IS_ERR(bc->regmap))
+ return dev_err_probe(dev, PTR_ERR(bc->regmap),
+ "failed to init regmap\n");
+
+ bc->domains = devm_kcalloc(dev, bc_data->num_domains + 1,
+ sizeof(struct imx93_blk_ctrl_domain),
+ GFP_KERNEL);
+ if (!bc->domains) {
+ ret = -ENOMEM;
+ goto free_regmap;
+ }
+
+ bc->onecell_data.num_domains = bc_data->num_domains;
+ bc->onecell_data.xlate = imx93_blk_ctrl_xlate;
+ bc->onecell_data.domains =
+ devm_kcalloc(dev, bc_data->num_domains,
+ sizeof(struct generic_pm_domain *), GFP_KERNEL);
+ if (!bc->onecell_data.domains) {
+ ret = -ENOMEM;
+ goto free_regmap;
+ }
+
+ for (i = 0; i < bus->num_clks; i++)
+ bc->clks[i].id = bus->clk_names[i];
+ bc->num_clks = bus->num_clks;
+
+ ret = devm_clk_bulk_get(dev, bc->num_clks, bc->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get bus clock\n");
+ goto free_regmap;
+ }
+
+ for (i = 0; i < bc_data->num_domains; i++) {
+ const struct imx93_blk_ctrl_domain_data *data = &bc_data->domains[i];
+ struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
+ int j;
+
+ domain->data = data;
+
+ for (j = 0; j < data->num_clks; j++)
+ domain->clks[j].id = data->clk_names[j];
+
+ ret = devm_clk_bulk_get(dev, data->num_clks, domain->clks);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to get clock\n");
+ goto cleanup_pds;
+ }
+
+ domain->genpd.name = data->name;
+ domain->genpd.power_on = imx93_blk_ctrl_power_on;
+ domain->genpd.power_off = imx93_blk_ctrl_power_off;
+ domain->bc = bc;
+
+ ret = pm_genpd_init(&domain->genpd, NULL, true);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to init power domain\n");
+ goto cleanup_pds;
+ }
+
+ bc->onecell_data.domains[i] = &domain->genpd;
+ }
+
+ pm_runtime_enable(dev);
+
+ ret = of_genpd_add_provider_onecell(dev->of_node, &bc->onecell_data);
+ if (ret) {
+ dev_err_probe(dev, ret, "failed to add power domain provider\n");
+ goto cleanup_pds;
+ }
+
+
+ dev_set_drvdata(dev, bc);
+
+ return 0;
+
+cleanup_pds:
+ for (i--; i >= 0; i--)
+ pm_genpd_remove(&bc->domains[i].genpd);
+
+free_regmap:
+ regmap_exit(bc->regmap);
+
+ return ret;
+}
+
+static int imx93_blk_ctrl_remove(struct platform_device *pdev)
+{
+ struct imx93_blk_ctrl *bc = dev_get_drvdata(&pdev->dev);
+ int i;
+
+ of_genpd_del_provider(pdev->dev.of_node);
+
+ for (i = 0; bc->onecell_data.num_domains; i++) {
+ struct imx93_blk_ctrl_domain *domain = &bc->domains[i];
+
+ pm_genpd_remove(&domain->genpd);
+ }
+
+ return 0;
+}
+
+static const struct imx93_blk_ctrl_domain_data imx93_media_blk_ctl_domain_data[] = {
+ [IMX93_MEDIABLK_PD_MIPI_DSI] = {
+ .name = "mediablk-mipi-dsi",
+ .clk_names = (const char *[]){ "dsi" },
+ .num_clks = 1,
+ .rst_mask = BIT(11) | BIT(12),
+ .clk_mask = BIT(11) | BIT(12),
+ },
+ [IMX93_MEDIABLK_PD_MIPI_CSI] = {
+ .name = "mediablk-mipi-csi",
+ .clk_names = (const char *[]){ "cam", "csi" },
+ .num_clks = 2,
+ .rst_mask = BIT(9) | BIT(10),
+ .clk_mask = BIT(9) | BIT(10),
+ },
+ [IMX93_MEDIABLK_PD_PXP] = {
+ .name = "mediablk-pxp",
+ .clk_names = (const char *[]){ "pxp" },
+ .num_clks = 1,
+ .rst_mask = BIT(7) | BIT(8),
+ .clk_mask = BIT(7) | BIT(8),
+ .num_qos = 2,
+ .qos = {
+ {
+ .reg = PXP_QOS_REG,
+ .cfg_off = PXP_R_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(6),
+ }, {
+ .reg = PXP_QOS_REG,
+ .cfg_off = PXP_W_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(6),
+ }
+ }
+ },
+ [IMX93_MEDIABLK_PD_LCDIF] = {
+ .name = "mediablk-lcdif",
+ .clk_names = (const char *[]){ "disp", "lcdif" },
+ .num_clks = 2,
+ .rst_mask = BIT(4) | BIT(5) | BIT(6),
+ .clk_mask = BIT(4) | BIT(5) | BIT(6),
+ .num_qos = 1,
+ .qos = {
+ {
+ .reg = LCDIF_QOS_REG,
+ .cfg_off = LCDIF_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }
+ }
+ },
+ [IMX93_MEDIABLK_PD_ISI] = {
+ .name = "mediablk-isi",
+ .clk_names = (const char *[]){ "isi" },
+ .num_clks = 1,
+ .rst_mask = BIT(2) | BIT(3),
+ .clk_mask = BIT(2) | BIT(3),
+ .num_qos = 4,
+ .qos = {
+ {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_Y_W_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }, {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_Y_R_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }, {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_U_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }, {
+ .reg = ISI_QOS_REG,
+ .cfg_off = ISI_V_CFG_QOS_OFF,
+ .default_prio = PRIO(3),
+ .cfg_prio = PRIO(7),
+ }
+ }
+ },
+};
+
+static const struct imx93_blk_ctrl_data imx93_media_blk_ctl_dev_data = {
+ .domains = imx93_media_blk_ctl_domain_data,
+ .bus = &imx93_media_blk_ctl_bus_data,
+ .num_domains = ARRAY_SIZE(imx93_media_blk_ctl_domain_data),
+};
+
+static const struct of_device_id imx93_blk_ctrl_of_match[] = {
+ {
+ .compatible = "fsl,imx93-media-blk-ctrl",
+ .data = &imx93_media_blk_ctl_dev_data
+ }, {
+ /* Sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, imx93_blk_ctrl_of_match);
+
+static struct platform_driver imx93_blk_ctrl_driver = {
+ .probe = imx93_blk_ctrl_probe,
+ .remove = imx93_blk_ctrl_remove,
+ .driver = {
+ .name = "imx93-blk-ctrl",
+ .of_match_table = imx93_blk_ctrl_of_match,
+ },
+};
+module_platform_driver(imx93_blk_ctrl_driver);
diff --git a/drivers/soc/imx/imx93-pd.c b/drivers/soc/imx/imx93-pd.c
new file mode 100644
index 000000000000..c49d37932964
--- /dev/null
+++ b/drivers/soc/imx/imx93-pd.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2022 NXP.
+ */
+
+#include <linux/clk.h>
+#include <linux/of_device.h>
+#include <linux/delay.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
+#include <dt-bindings/power/imx93-power.h>
+
+#define IMX93_SRC_MLMIX_OFF 0x1800
+#define IMX93_SRC_MEDIAMIX_OFF 0x2400
+
+#define MIX_SLICE_SW_CTRL_OFF 0x20
+#define SLICE_SW_CTRL_PSW_CTRL_OFF_MASK BIT(4)
+#define SLICE_SW_CTRL_PDN_SOFT_MASK BIT(31)
+
+#define MIX_FUNC_STAT_OFF 0xB4
+
+#define FUNC_STAT_PSW_STAT_MASK BIT(0)
+#define FUNC_STAT_RST_STAT_MASK BIT(2)
+#define FUNC_STAT_ISO_STAT_MASK BIT(4)
+
+struct imx93_slice_info {
+ char *name;
+ u32 mix_off;
+};
+
+struct imx93_plat_data {
+ u32 num_slice;
+ struct imx93_slice_info *slices;
+};
+
+struct imx93_power_domain {
+ struct generic_pm_domain genpd;
+ struct device *dev;
+ void * __iomem base;
+ const struct imx93_slice_info *slice_info;
+ struct clk_bulk_data *clks;
+ int num_clks;
+};
+
+#define to_imx93_pd(_genpd) container_of(_genpd, struct imx93_power_domain, genpd)
+
+struct imx93_slice_info imx93_slice_infos[] = {
+ [IMX93_POWER_DOMAIN_MEDIAMIX] = {
+ .name = "mediamix",
+ .mix_off = IMX93_SRC_MEDIAMIX_OFF,
+ },
+ [IMX93_POWER_DOMAIN_MLMIX] = {
+ .name = "mlmix",
+ .mix_off = IMX93_SRC_MLMIX_OFF,
+ }
+};
+
+struct imx93_plat_data imx93_plat_data = {
+ .num_slice = ARRAY_SIZE(imx93_slice_infos),
+ .slices = imx93_slice_infos,
+};
+
+static int imx93_pd_on(struct generic_pm_domain *genpd)
+{
+ struct imx93_power_domain *domain = to_imx93_pd(genpd);
+ const struct imx93_slice_info *slice_info = domain->slice_info;
+ void * __iomem addr = domain->base + slice_info->mix_off;
+ u32 val;
+ int ret;
+
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable clocks for domain: %s\n", genpd->name);
+ return ret;
+ }
+
+ val = readl(addr + MIX_SLICE_SW_CTRL_OFF);
+ val &= ~SLICE_SW_CTRL_PDN_SOFT_MASK;
+ writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+
+ ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+ !(val & FUNC_STAT_ISO_STAT_MASK), 1, 10000);
+ if (ret) {
+ dev_err(domain->dev, "pd_on timeout: name: %s, stat: %x\n", genpd->name, val);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int imx93_pd_off(struct generic_pm_domain *genpd)
+{
+ struct imx93_power_domain *domain = to_imx93_pd(genpd);
+ const struct imx93_slice_info *slice_info = domain->slice_info;
+ void * __iomem addr = domain->base + slice_info->mix_off;
+ int ret;
+ u32 val;
+
+ /* Power off MIX */
+ val = readl(addr + MIX_SLICE_SW_CTRL_OFF);
+ val |= SLICE_SW_CTRL_PDN_SOFT_MASK;
+ writel(val, addr + MIX_SLICE_SW_CTRL_OFF);
+
+ ret = readl_poll_timeout(addr + MIX_FUNC_STAT_OFF, val,
+ val & FUNC_STAT_PSW_STAT_MASK, 1, 1000);
+ if (ret) {
+ dev_err(domain->dev, "pd_off timeout: name: %s, stat: %x\n", genpd->name, val);
+ return ret;
+ }
+
+ clk_bulk_disable_unprepare(domain->num_clks, domain->clks);
+
+ return 0;
+};
+
+static const struct of_device_id imx93_power_domain_ids[] = {
+ { .compatible = "fsl,imx93-src", .data = &imx93_plat_data, },
+ {},
+};
+
+static int imx93_pd_remove(struct platform_device *pdev)
+{
+ struct imx93_power_domain *pd = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+ const struct imx93_plat_data *data = of_device_get_match_data(dev);
+ u32 num_domains = data->num_slice;
+ struct device_node *slice_np, *np;
+ int ret;
+
+ slice_np = of_get_child_by_name(pdev->dev.of_node, "slice");
+
+ for_each_child_of_node(slice_np, np) {
+ struct imx93_power_domain *domain;
+ u32 index;
+
+ if (!of_device_is_available(np))
+ continue;
+
+ ret = of_property_read_u32(np, "reg", &index);
+ if (ret) {
+ dev_err(dev, "Failed to read 'reg' property\n");
+ of_node_put(np);
+ return ret;
+ }
+
+ if (index >= num_domains) {
+ dev_warn(dev, "Domain index %d is out of bounds\n", index);
+ continue;
+ }
+
+ domain = &pd[index];
+
+ of_genpd_del_provider(np);
+
+ pm_genpd_remove(&domain->genpd);
+ clk_bulk_put_all(domain->num_clks, domain->clks);
+ };
+
+ return 0;
+}
+
+static int imx93_pd_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ const struct imx93_plat_data *data = of_device_get_match_data(dev);
+ const struct imx93_slice_info *slice_info = data->slices;
+ struct imx93_power_domain *pd;
+ u32 num_domains = data->num_slice;
+ struct device_node *slice_np, *np;
+ void __iomem *base;
+ bool is_off;
+ int ret;
+
+ slice_np = of_get_child_by_name(dev->of_node, "slice");
+ if (!slice_np) {
+ dev_err(dev, "No slices specified in DT\n");
+ return -EINVAL;
+ }
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ pd = devm_kcalloc(dev, num_domains, sizeof(*pd), GFP_KERNEL);
+ if (!pd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, pd);
+
+ for_each_child_of_node(slice_np, np) {
+ struct imx93_power_domain *domain;
+ u32 index;
+
+ if (!of_device_is_available(np))
+ continue;
+
+ ret = of_property_read_u32(np, "reg", &index);
+ if (ret) {
+ dev_err(dev, "Failed to read 'reg' property\n");
+ of_node_put(np);
+ return ret;
+ }
+
+ if (index >= num_domains) {
+ dev_warn(dev, "Domain index %d is out of bounds\n", index);
+ continue;
+ }
+
+ domain = &pd[index];
+ domain->dev = dev;
+
+ domain->num_clks = of_clk_bulk_get_all(np, &domain->clks);
+ if (domain->num_clks < 0) {
+ return dev_err_probe(domain->dev, domain->num_clks,
+ "Failed to get %s's clocks\n",
+ slice_info[index].name);
+ }
+
+ domain->genpd.name = slice_info[index].name;
+ domain->genpd.power_off = imx93_pd_off;
+ domain->genpd.power_on = imx93_pd_on;
+ domain->slice_info = &slice_info[index];
+ domain->base = base;
+
+ is_off = readl(domain->base + slice_info->mix_off + MIX_FUNC_STAT_OFF) &
+ FUNC_STAT_ISO_STAT_MASK;
+ /* Just to sync the status of hardware */
+ if (!is_off) {
+ ret = clk_bulk_prepare_enable(domain->num_clks, domain->clks);
+ if (ret) {
+ dev_err(domain->dev, "failed to enable clocks for domain: %s\n",
+ domain->genpd.name);
+ clk_bulk_put_all(domain->num_clks, domain->clks);
+ return 0;
+ }
+ }
+
+ dev_info(dev, "%s: state: %x\n", domain->genpd.name,
+ readl(domain->base + MIX_FUNC_STAT_OFF));
+ ret = pm_genpd_init(&domain->genpd, NULL, is_off);
+ if (ret) {
+ dev_err(dev, "failed to init genpd\n");
+ clk_bulk_put_all(domain->num_clks, domain->clks);
+ return ret;
+ }
+
+ ret = of_genpd_add_provider_simple(np, &domain->genpd);
+ if (ret) {
+ clk_bulk_put_all(domain->num_clks, domain->clks);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct of_device_id imx93_dt_ids[] = {
+ { .compatible = "fsl,imx93-src", .data = &imx93_plat_data, },
+ { }
+};
+
+static struct platform_driver imx93_power_domain_driver = {
+ .driver = {
+ .name = "imx93_power_domain",
+ .owner = THIS_MODULE,
+ .of_match_table = imx93_dt_ids,
+ },
+ .probe = imx93_pd_probe,
+ .remove = imx93_pd_remove,
+};
+module_platform_driver(imx93_power_domain_driver);
+
+MODULE_AUTHOR("Peng Fan <peng.fan@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX93 power domain driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/imx/mu/Kconfig b/drivers/soc/imx/mu/Kconfig
new file mode 100644
index 000000000000..890f72d15ec5
--- /dev/null
+++ b/drivers/soc/imx/mu/Kconfig
@@ -0,0 +1,4 @@
+config SOC_IMX_MU
+ tristate "i.MX SoC MU support"
+ depends on ARCH_MXC
+ default ARCH_MXC && ARM64
diff --git a/drivers/soc/imx/mu/Makefile b/drivers/soc/imx/mu/Makefile
new file mode 100644
index 000000000000..64246b130d33
--- /dev/null
+++ b/drivers/soc/imx/mu/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_SOC_IMX_MU) += mx8_mu.o
diff --git a/drivers/soc/imx/mu/mx8_mu.c b/drivers/soc/imx/mu/mx8_mu.c
new file mode 100644
index 000000000000..c4f9d5dfcc15
--- /dev/null
+++ b/drivers/soc/imx/mu/mx8_mu.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2016 Freescale Semiconductor, Inc.
+ * Copyright 2017 NXP
+ *
+ * SPDX-License-Identifier: GPL-2.0+
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mx8_mu.h>
+#include <linux/of.h>
+
+static int version;
+
+/*!
+ * This function sets the Flag n of the MU.
+ */
+int32_t MU_SetFn(void __iomem *base, uint32_t Fn)
+{
+ uint32_t reg, offset;
+
+ reg = Fn & (~MU_CR_Fn_MASK1);
+ if (reg > 0)
+ return -EINVAL;
+
+ offset = unlikely(version == MU_VER_ID_V10)
+ ? MU_V10_ACR_OFFSET1 : MU_ACR_OFFSET1;
+
+ reg = readl_relaxed(base + offset);
+ /* Clear ABFn. */
+ reg &= ~MU_CR_Fn_MASK1;
+ reg |= Fn;
+ writel_relaxed(reg, base + offset);
+
+ return 0;
+}
+
+/*!
+ * This function reads the status from status register.
+ */
+uint32_t MU_ReadStatus(void __iomem *base)
+{
+ uint32_t reg, offset;
+
+ offset = unlikely(version == MU_VER_ID_V10)
+ ? MU_V10_ASR_OFFSET1 : MU_ASR_OFFSET1;
+
+ reg = readl_relaxed(base + offset);
+
+ return reg;
+}
+
+/*!
+ * This function enables specific RX full interrupt.
+ */
+void MU_EnableRxFullInt(void __iomem *base, uint32_t index)
+{
+ uint32_t reg, offset;
+
+ offset = unlikely(version == MU_VER_ID_V10)
+ ? MU_V10_ACR_OFFSET1 : MU_ACR_OFFSET1;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~(MU_CR_GIRn_MASK1 | MU_CR_NMI_MASK1);
+ reg |= MU_CR_RIE0_MASK1 >> index;
+ writel_relaxed(reg, base + offset);
+}
+EXPORT_SYMBOL(MU_EnableRxFullInt);
+
+/*!
+ * This function enables specific general purpose interrupt.
+ */
+void MU_EnableGeneralInt(void __iomem *base, uint32_t index)
+{
+ uint32_t reg, offset;
+
+ offset = unlikely(version == MU_VER_ID_V10)
+ ? MU_V10_ACR_OFFSET1 : MU_ACR_OFFSET1;
+
+ reg = readl_relaxed(base + offset);
+ reg &= ~(MU_CR_GIRn_MASK1 | MU_CR_NMI_MASK1);
+ reg |= MU_CR_GIE0_MASK1 >> index;
+ writel_relaxed(reg, base + offset);
+}
+
+/*
+ * Wait and send message to the other core.
+ */
+void MU_SendMessage(void __iomem *base, uint32_t regIndex, uint32_t msg)
+{
+ uint32_t mask = MU_SR_TE0_MASK1 >> regIndex;
+
+ if (unlikely(version == MU_VER_ID_V10)) {
+ /* Wait TX register to be empty. */
+ while (!(readl_relaxed(base + MU_V10_ASR_OFFSET1) & mask))
+ ;
+ writel_relaxed(msg, base + MU_V10_ATR0_OFFSET1
+ + (regIndex * 4));
+ } else {
+ /* Wait TX register to be empty. */
+ while (!(readl_relaxed(base + MU_ASR_OFFSET1) & mask))
+ ;
+ writel_relaxed(msg, base + MU_ATR0_OFFSET1 + (regIndex * 4));
+ }
+}
+EXPORT_SYMBOL(MU_SendMessage);
+
+/*
+ * Wait and send message to the other core with timeout mechanism.
+ */
+void MU_SendMessageTimeout(void __iomem *base, uint32_t regIndex, uint32_t msg,
+ uint32_t t)
+{
+ uint32_t mask = MU_SR_TE0_MASK1 >> regIndex;
+ uint32_t timeout = t;
+
+ if (unlikely(version == MU_VER_ID_V10)) {
+ /* Wait TX register to be empty. */
+ while (!(readl_relaxed(base + MU_V10_ASR_OFFSET1) & mask)) {
+ udelay(10);
+ if (timeout-- == 0)
+ return;
+ };
+
+ writel_relaxed(msg, base + MU_V10_ATR0_OFFSET1
+ + (regIndex * 4));
+ } else {
+ /* Wait TX register to be empty. */
+ while (!(readl_relaxed(base + MU_ASR_OFFSET1) & mask)) {
+ udelay(10);
+ if (timeout-- == 0)
+ return;
+ };
+
+ writel_relaxed(msg, base + MU_ATR0_OFFSET1 + (regIndex * 4));
+ }
+}
+EXPORT_SYMBOL(MU_SendMessageTimeout);
+
+/*
+ * Wait to receive message from the other core.
+ */
+void MU_ReceiveMsg(void __iomem *base, uint32_t regIndex, uint32_t *msg)
+{
+ uint32_t mask = MU_SR_RF0_MASK1 >> regIndex;
+
+ if (unlikely(version == MU_VER_ID_V10)) {
+ /* Wait RX register to be full. */
+ while (!(readl_relaxed(base + MU_V10_ASR_OFFSET1) & mask))
+ ;
+ *msg = readl_relaxed(base + MU_V10_ARR0_OFFSET1
+ + (regIndex * 4));
+ } else {
+ /* Wait RX register to be full. */
+ while (!(readl_relaxed(base + MU_ASR_OFFSET1) & mask))
+ ;
+ *msg = readl_relaxed(base + MU_ARR0_OFFSET1 + (regIndex * 4));
+ }
+}
+EXPORT_SYMBOL(MU_ReceiveMsg);
+
+
+
+void MU_Init(void __iomem *base)
+{
+ uint32_t reg, offset;
+
+ version = readl_relaxed(base) >> 16;
+
+ offset = unlikely(version == MU_VER_ID_V10)
+ ? MU_V10_ACR_OFFSET1 : MU_ACR_OFFSET1;
+
+ reg = readl_relaxed(base + offset);
+ /* Clear GIEn, TIEn, GIRn and ABFn. */
+ reg &= ~(MU_CR_GIEn_MASK1 | MU_CR_TIEn_MASK1
+ | MU_CR_GIRn_MASK1 | MU_CR_NMI_MASK1 | MU_CR_Fn_MASK1);
+
+ /*
+ * i.MX6SX and i.MX7D have multi-core power management which need
+ * to use RIE interrupts.
+ */
+ if (!(of_machine_is_compatible("fsl,imx6sx") ||
+ of_machine_is_compatible("fsl,imx7d")))
+ reg &= ~MU_CR_RIEn_MASK1;
+
+ writel_relaxed(reg, base + offset);
+}
+EXPORT_SYMBOL(MU_Init);
+MODULE_DESCRIPTION("i.MX8 SoC MU driver");
+MODULE_LICENSE("GPL v2");
+
+/**@}*/
+
diff --git a/drivers/soc/imx/rpmsg_life_cycle.c b/drivers/soc/imx/rpmsg_life_cycle.c
new file mode 100644
index 000000000000..d4e91c9c0f70
--- /dev/null
+++ b/drivers/soc/imx/rpmsg_life_cycle.c
@@ -0,0 +1,197 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2021 NXP
+ */
+
+#include <linux/cpu.h>
+#include <linux/imx_rpmsg.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/rpmsg.h>
+#include <linux/suspend.h>
+
+#define RPMSG_TIMEOUT 1000
+
+#define PM_RPMSG_TYPE 0
+
+struct pm_rpmsg_data {
+ struct imx_rpmsg_head header;
+ u8 data;
+ u8 reserved;
+} __packed;
+
+enum pm_rpmsg_cmd {
+ PM_RPMSG_MODE,
+};
+
+enum pm_rpmsg_power_mode {
+ PM_RPMSG_ACTIVE = 1,
+ PM_RPMSG_SUSPEND = 5,
+ PM_RPMSG_SHUTDOWN = 7,
+};
+
+static struct rpmsg_device *life_cycle_rpdev;
+static struct completion cmd_complete;
+
+static int rpmsg_life_cycle_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ int ret;
+#ifdef CONFIG_HOTPLUG_CPU
+ int cpu;
+#endif
+ struct pm_rpmsg_data msg = {};
+
+ /* return early if it is RESTART case */
+ if (action == SYS_RESTART)
+ return NOTIFY_DONE;
+
+ /*
+ * unplug the non-boot cpu to make sure A35 cluster can be
+ * put into DPD mode without risk.
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+ for_each_online_cpu(cpu) {
+ if (cpu == cpumask_first(cpu_online_mask))
+ continue;
+ ret = remove_cpu(cpu);
+ if (ret) {
+ pr_info("unplug the non-boot cpu failed:%d\n", ret);
+ return NOTIFY_BAD;
+ }
+ }
+#endif
+ msg.header.cate = IMX_RMPSG_LIFECYCLE;
+ msg.header.major = IMX_RMPSG_MAJOR;
+ msg.header.minor = IMX_RMPSG_MINOR;
+ msg.header.type = PM_RPMSG_TYPE;
+ msg.header.cmd = PM_RPMSG_MODE;
+ msg.data = PM_RPMSG_SHUTDOWN;
+
+ /* No ACK from M core */
+ ret = rpmsg_send(life_cycle_rpdev->ept, &msg, sizeof(struct pm_rpmsg_data));
+
+ if (ret) {
+ pr_info("rpmsg send failed:%d\n", ret);
+ return NOTIFY_BAD;
+ }
+
+ return NOTIFY_DONE;
+};
+
+static struct notifier_block rpmsg_life_cycle_nb = {
+ .notifier_call = rpmsg_life_cycle_notifier,
+};
+
+static int rpmsg_life_cycle_cb(struct rpmsg_device *rpdev, void *data, int len,
+ void *priv, u32 src)
+{
+ /* no need to handle the received msg, just complete */
+ complete(&cmd_complete);
+
+ return 0;
+}
+
+static int rpmsg_life_cycle_probe(struct rpmsg_device *rpdev)
+{
+ life_cycle_rpdev = rpdev;
+
+ dev_info(&rpdev->dev, "new channel: 0x%x -> 0x%x!\n",
+ rpdev->src, rpdev->dst);
+
+ return register_reboot_notifier(&rpmsg_life_cycle_nb);
+}
+
+static struct rpmsg_device_id rpmsg_life_cycle_id_table[] = {
+ { .name = "rpmsg-life-cycle-channel" },
+ { },
+};
+
+static struct rpmsg_driver rpmsg_life_cycle_driver = {
+ .drv.name = "rpmsg_life_cycle",
+ .drv.owner = THIS_MODULE,
+ .id_table = rpmsg_life_cycle_id_table,
+ .probe = rpmsg_life_cycle_probe,
+ .callback = rpmsg_life_cycle_cb,
+};
+
+static int __maybe_unused rpmsg_lifecycle_pm_notify(bool enter)
+{
+ struct pm_rpmsg_data msg = {};
+ int ret;
+
+ /* Only need to do lifecycle notify when APD enter mem(HW PD) mode */
+ if (pm_suspend_target_state != PM_SUSPEND_MEM)
+ return 0;
+
+ msg.data = enter ? PM_RPMSG_SUSPEND : PM_RPMSG_ACTIVE;
+ msg.header.cate = IMX_RMPSG_LIFECYCLE;
+ msg.header.major = IMX_RMPSG_MAJOR;
+ msg.header.minor = IMX_RMPSG_MINOR;
+ msg.header.type = PM_RPMSG_TYPE;
+ msg.header.cmd = PM_RPMSG_MODE;
+
+ reinit_completion(&cmd_complete);
+
+ ret = rpmsg_send(life_cycle_rpdev->ept, &msg, sizeof(struct pm_rpmsg_data));
+ if (ret) {
+ dev_err(&life_cycle_rpdev->dev, "rpmsg send failed:%d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&cmd_complete,
+ msecs_to_jiffies(RPMSG_TIMEOUT));
+ if (!ret) {
+ dev_err(&life_cycle_rpdev->dev, "rpmsg_send timeout!\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused rpmsg_lifecycle_suspend_noirq(struct device *dev)
+{
+ return rpmsg_lifecycle_pm_notify(true);
+}
+
+static int __maybe_unused rpmsg_lifecycle_resume_noirq(struct device *dev)
+{
+ return rpmsg_lifecycle_pm_notify(false);
+}
+
+static const struct dev_pm_ops rpmsg_lifecyle_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(rpmsg_lifecycle_suspend_noirq,
+ rpmsg_lifecycle_resume_noirq)
+};
+
+static int rpmsg_lifecycle_probe(struct platform_device *pdev)
+{
+ init_completion(&cmd_complete);
+
+ return register_rpmsg_driver(&rpmsg_life_cycle_driver);
+
+ return 0;
+}
+
+static const struct of_device_id rpmsg_lifecycle_id[] = {
+ { "nxp,rpmsg-lifecycle", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, rpmsg_lifecycle_id);
+
+static struct platform_driver rpmsg_lifecycle_driver = {
+ .driver = {
+ .name = "rpmsg-lifecycle",
+ .owner = THIS_MODULE,
+ .of_match_table = rpmsg_lifecycle_id,
+ .pm = &rpmsg_lifecyle_ops,
+ },
+ .probe = rpmsg_lifecycle_probe,
+};
+module_platform_driver(rpmsg_lifecycle_driver);
+
+MODULE_AUTHOR("NXP Semiconductor");
+MODULE_DESCRIPTION("NXP rpmsg life cycle driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/imx/secvio/Makefile b/drivers/soc/imx/secvio/Makefile
new file mode 100644
index 000000000000..d5a89ba24bbc
--- /dev/null
+++ b/drivers/soc/imx/secvio/Makefile
@@ -0,0 +1,3 @@
+obj-y += imx-secvio-sc.o
+obj-$(CONFIG_DEBUG_FS) += imx-secvio-debugfs.o
+obj-$(CONFIG_AUDIT) += imx-secvio-audit.o
diff --git a/drivers/soc/imx/secvio/imx-secvio-audit.c b/drivers/soc/imx/secvio/imx-secvio-audit.c
new file mode 100644
index 000000000000..e3c51311424f
--- /dev/null
+++ b/drivers/soc/imx/secvio/imx-secvio-audit.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ *
+ */
+
+#include <linux/audit.h>
+
+#include <soc/imx/imx-secvio-sc.h>
+
+int report_to_audit_notify(struct notifier_block *nb, unsigned long status,
+ void *notif_info)
+{
+ int ret = 0;
+ struct audit_buffer *ab;
+ struct secvio_sc_notifier_info *info = notif_info;
+
+ ab = audit_log_start(audit_context(), GFP_KERNEL, AUDIT_INTEGRITY_RULE);
+ if (!ab) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ audit_log_format(ab, " hpsvs=0x%.08x lps=0x%.08x lptds=0x%.08x",
+ info->hpsvs, info->lps, info->lptds);
+ audit_log_task_info(ab);
+ audit_log_end(ab);
+
+exit:
+ return ret;
+}
diff --git a/drivers/soc/imx/secvio/imx-secvio-debugfs.c b/drivers/soc/imx/secvio/imx-secvio-debugfs.c
new file mode 100644
index 000000000000..a7ce1a98e6c9
--- /dev/null
+++ b/drivers/soc/imx/secvio/imx-secvio-debugfs.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ *
+ */
+
+/*
+ * The module exposes 3 files in debugfs:
+ * - secvio/info:
+ * * Read: It returns the value of the fuses and SNVS registers which are
+ * readable and related to secvio and tampers
+ * * Write: A write of the format "<hex id> [<hex value 0> <hex value 1>
+ * <hex value 2> <hex value 3> <hex value 4>](<nb values>)"
+ * will write the SNVS register having the provided id with the
+ * values provided (cf SECO ducumentation)
+ * - secvio/enable: State of the IRQ
+ * - secvio/check: Check the state of the security violation and tampers
+ * and calls notifier
+ * - secvio/clear: Clear the state of all secvio and tampers
+ */
+
+/* Includes */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/nvmem-consumer.h>
+
+#include <linux/firmware/imx/svc/misc.h>
+#include <linux/firmware/imx/svc/seco.h>
+
+#include <soc/imx/imx-secvio-sc.h>
+#include "imx-secvio-sc-int.h"
+
+int fuse_reader(struct device *dev, u32 id, u32 *value, u8 mul)
+{
+ struct imx_secvio_sc_data *data = dev_get_drvdata(dev);
+ u32 size_to_read = mul * sizeof(u32);
+ int ret;
+
+ ret = nvmem_device_read(data->nvmem, id, size_to_read, value);
+ if (ret < 0) {
+ dev_err(data->dev, "Failed to read fuse %d: %d\n", id, ret);
+ return ret;
+ }
+
+ if (ret != size_to_read) {
+ dev_err(data->dev, "Read only %d instead of %d\n", ret,
+ size_to_read);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+int snvs_reader(struct device *dev, u32 id, u32 *value, u8 mul)
+{
+ int ret;
+ u32 *v1, *v2, *v3, *v4, *v5;
+
+ v1 = NULL;
+ v2 = NULL;
+ v3 = NULL;
+ v4 = NULL;
+ v5 = NULL;
+
+ switch (mul) {
+ case 5:
+ v5 = &value[4];
+ fallthrough;
+ case 4:
+ v4 = &value[3];
+ fallthrough;
+ case 3:
+ v3 = &value[2];
+ fallthrough;
+ case 2:
+ v2 = &value[1];
+ fallthrough;
+ case 1:
+ v1 = &value[0];
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = call_secvio_config(dev, id, SECVIO_CONFIG_READ, v1, v2, v3, v4,
+ v5, mul);
+ if (ret < 0)
+ dev_err(dev, "Failed to read snvs reg %d: %d\n", id, ret);
+
+ return ret;
+}
+
+int snvs_dgo_reader(struct device *dev, u32 id, u32 *value, u8 mul)
+{
+ struct imx_secvio_sc_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ if (mul != 1)
+ return -EINVAL;
+
+ ret = imx_sc_seco_secvio_dgo_config(data->ipc_handle, id,
+ SECVIO_CONFIG_READ, value);
+ if (ret)
+ dev_err(dev, "Failed to read snvs dgo reg %d: %d\n", id, ret);
+
+ return ret;
+}
+
+static const struct imx_secvio_info_entry {
+ int (*reader)(struct device *dev, u32 id, u32 *value, u8 mul);
+ const char *type;
+ const char *name;
+ u32 id;
+ u8 mul;
+} gs_imx_secvio_info_list[] = {
+ {fuse_reader, "fuse", "trim", 30, 1},
+ {fuse_reader, "fuse", "trim2", 31, 1},
+ {fuse_reader, "fuse", "ctrim1", 260, 1},
+ {fuse_reader, "fuse", "ctrim2", 261, 1},
+ {fuse_reader, "fuse", "ctrim3", 262, 1},
+ {fuse_reader, "fuse", "ctrim4", 263, 1},
+ {fuse_reader, "fuse", "OSC_CAP", 768, 1},
+
+ {snvs_reader, "snvs", "HPLR", 0x0, 1},
+ {snvs_reader, "snvs", "LPLR", 0x34, 1},
+ {snvs_reader, "snvs", "HPSICR", 0xc, 1},
+ {snvs_reader, "snvs", "HPSVCR", 0x10, 1},
+ {snvs_reader, "snvs", "HPSVS", 0x18, 1},
+ {snvs_reader, "snvs", "LPSVC", 0x40, 1},
+ {snvs_reader, "snvs", "LPTDC", 0x48, 2},
+ {snvs_reader, "snvs", "LPSR", 0x4c, 1},
+ {snvs_reader, "snvs", "LPTDS", 0xa4, 1},
+ {snvs_reader, "snvs", "LPTGFC", 0x44, 3},
+ {snvs_reader, "snvs", "LPATCTL", 0xe0, 1},
+ {snvs_reader, "snvs", "LPATCLK", 0xe4, 1},
+ {snvs_reader, "snvs", "LPATRC1", 0xe8, 2},
+ {snvs_reader, "snvs", "LPMKC", 0x3c, 1},
+ {snvs_reader, "snvs", "LPSMC", 0x5c, 2},
+ {snvs_reader, "snvs", "LPPGD", 0x64, 1},
+ {snvs_reader, "snvs", "HPVID", 0xf8, 2},
+
+ {snvs_dgo_reader, "dgo", "Offset", 0x0, 1},
+ {snvs_dgo_reader, "dgo", "PUP/PD", 0x10, 1},
+ {snvs_dgo_reader, "dgo", "Anatest", 0x20, 1},
+ {snvs_dgo_reader, "dgo", "T trim", 0x30, 1},
+ {snvs_dgo_reader, "dgo", "Misc", 0x40, 1},
+ {snvs_dgo_reader, "dgo", "Vmon", 0x50, 1},
+};
+
+struct imx_secvio_sc_info_seq_data {
+ struct device *dev;
+ const struct imx_secvio_info_entry *list;
+ int size;
+};
+
+static void *imx_secvio_sc_info_seq_start(struct seq_file *m, loff_t *pos)
+{
+ struct imx_secvio_sc_info_seq_data *data = m->private;
+
+ /* Check we are not out of bound */
+ if (*pos >= data->size)
+ return NULL;
+
+ return (void *)pos;
+}
+
+static void *imx_secvio_sc_info_seq_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ /* Increment the counter */
+ ++*pos;
+
+ /* call the start function which will check the index */
+ return imx_secvio_sc_info_seq_start(m, pos);
+}
+
+static void imx_secvio_sc_info_seq_stop(struct seq_file *m, void *v)
+{
+}
+
+static int imx_secvio_sc_info_seq_show(struct seq_file *m, void *v)
+{
+ struct imx_secvio_sc_info_seq_data *data = m->private;
+ const struct imx_secvio_info_entry *e;
+ int ret;
+ u32 vals[5];
+ int idx;
+
+ idx = *(loff_t *)v;
+ e = &data->list[idx];
+
+ /* Read the values */
+ ret = e->reader(data->dev, e->id, (u32 *)&vals, e->mul);
+ if (ret) {
+ dev_err(data->dev, "Fail to read %s %s (idx %d)\n", e->type,
+ e->name, e->id);
+ return 0;
+ }
+
+ seq_printf(m, "%5s/%-10s(%.3d):", e->type, e->name, e->id);
+
+ /* Loop over the values */
+ for (idx = 0; idx < e->mul; idx++)
+ seq_printf(m, " %.8x", vals[idx]);
+
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
+static const struct seq_operations imx_secvio_sc_info_seq_ops = {
+ .start = imx_secvio_sc_info_seq_start,
+ .next = imx_secvio_sc_info_seq_next,
+ .stop = imx_secvio_sc_info_seq_stop,
+ .show = imx_secvio_sc_info_seq_show,
+};
+
+static int imx_secvio_sc_info_open(struct inode *inode, struct file *file)
+{
+ struct imx_secvio_sc_info_seq_data *data;
+
+ data = __seq_open_private(file, &imx_secvio_sc_info_seq_ops, sizeof(*data));
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = inode->i_private;
+ data->list = gs_imx_secvio_info_list;
+ data->size = ARRAY_SIZE(gs_imx_secvio_info_list);
+
+ return 0;
+}
+
+static const struct file_operations imx_secvio_sc_info_ops = {
+ .owner = THIS_MODULE,
+ .open = imx_secvio_sc_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+static void if_debugfs_remove_recursive(void *dentry)
+{
+ debugfs_remove_recursive(dentry);
+}
+
+int imx_secvio_sc_debugfs(struct device *dev)
+{
+ struct imx_secvio_sc_data *data = dev_get_drvdata(dev);
+ struct dentry *dir;
+ int ret = 0;
+
+ /* Create a folder */
+ dir = debugfs_create_dir(dev_name(dev), NULL);
+ if (IS_ERR(dir)) {
+ dev_err(dev, "Failed to create dfs dir\n");
+ ret = PTR_ERR(dir);
+ goto exit;
+ }
+ data->dfs = dir;
+
+ ret = devm_add_action(dev, if_debugfs_remove_recursive, data->dfs);
+ if (ret) {
+ dev_err(dev, "Failed to add managed action to disable IRQ\n");
+ goto remove_fs;
+ }
+
+ /* Create the file to read info and write to reg */
+ dir = debugfs_create_file("info", 0x666, data->dfs, dev,
+ &imx_secvio_sc_info_ops);
+ if (IS_ERR(dir)) {
+ dev_err(dev, "Failed to add info to debugfs\n");
+ ret = PTR_ERR(dir);
+ goto exit;
+ }
+
+exit:
+ return ret;
+
+remove_fs:
+ debugfs_remove_recursive(data->dfs);
+ goto exit;
+}
diff --git a/drivers/soc/imx/secvio/imx-secvio-sc-int.h b/drivers/soc/imx/secvio/imx-secvio-sc-int.h
new file mode 100644
index 000000000000..3152ec246bbd
--- /dev/null
+++ b/drivers/soc/imx/secvio/imx-secvio-sc-int.h
@@ -0,0 +1,83 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright 2019 NXP
+ */
+
+#ifndef SECVIO_SC_H
+#define SECVIO_SC_H
+
+/* Includes */
+#include <linux/kernel.h>
+#include <linux/notifier.h>
+#include <linux/semaphore.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/miscdevice.h>
+
+/* Access for sc_seco_secvio_config API */
+#define SECVIO_CONFIG_READ 0
+#define SECVIO_CONFIG_WRITE 1
+
+/* Internal Structure */
+struct imx_secvio_sc_data {
+ struct device *dev;
+
+ struct imx_sc_ipc *ipc_handle;
+
+ struct notifier_block irq_nb;
+ struct notifier_block report_nb;
+ struct notifier_block audit_nb;
+
+ struct nvmem_device *nvmem;
+
+ struct miscdevice miscdev;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dfs;
+#endif
+
+ u32 version;
+};
+
+/* Function declarations */
+extern
+int call_secvio_config(struct device *dev, u8 id, u8 access, u32 *data0,
+ u32 *data1, u32 *data2, u32 *data3, u32 *data4, u8 size);
+
+extern
+int int_imx_secvio_sc_get_state(struct device *dev,
+ struct secvio_sc_notifier_info *info);
+
+extern
+int int_imx_secvio_sc_clear_state(struct device *dev, u32 hpsvs, u32 lps,
+ u32 lptds);
+
+extern
+int int_imx_secvio_sc_enable_irq(struct device *dev);
+
+extern
+int int_imx_secvio_sc_disable_irq(struct device *dev);
+
+#ifdef CONFIG_DEBUG_FS
+extern
+int imx_secvio_sc_debugfs(struct device *dev);
+#else
+static inline
+int imx_secvio_sc_debugfs(struct device *dev)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+#ifdef CONFIG_AUDIT
+int report_to_audit_notify(struct notifier_block *nb, unsigned long status,
+ void *notif_info);
+#else /* CONFIG_AUDIT */
+static inline
+int report_to_audit_notify(struct notifier_block *nb, unsigned long status,
+ void *notif_info)
+{
+ return 0;
+}
+#endif /* CONFIG_AUDIT */
+
+#endif /* SECVIO_SC_H */
diff --git a/drivers/soc/imx/secvio/imx-secvio-sc.c b/drivers/soc/imx/secvio/imx-secvio-sc.c
new file mode 100644
index 000000000000..ccf4c1a6fb22
--- /dev/null
+++ b/drivers/soc/imx/secvio/imx-secvio-sc.c
@@ -0,0 +1,675 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 NXP
+ *
+ */
+
+/*
+ * The i.MX8QXP SoC contains the Secure Non-Volatile Storage (SNVS) block. This
+ * block can detect specific hardware attacks. Due to the presence of the SECO,
+ * this block can only be accessible using the SCFW API.
+ *
+ * This module interact with the SCU which relay request to/from the SNVS block
+ * to detect if security violation occurred.
+ *
+ * The module exports an API to add processing when a SV is detected:
+ * - register_imx_secvio_sc_notifier
+ * - unregister_imx_secvio_sc_notifier
+ * - imx_secvio_sc_check_state
+ * - int_imx_secvio_sc_clear_state
+ * - imx_secvio_sc_enable_irq
+ * - imx_secvio_sc_disable_irq
+ */
+
+/* Includes */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/miscdevice.h>
+
+#include <linux/firmware/imx/ipc.h>
+#include <linux/firmware/imx/sci.h>
+#include <linux/firmware/imx/svc/seco.h>
+#include <linux/firmware/imx/svc/rm.h>
+#include <dt-bindings/firmware/imx/rsrc.h>
+
+#include <soc/imx/imx-secvio-sc.h>
+#include "imx-secvio-sc-int.h"
+
+/* Definitions */
+
+/* Reference on the driver_device */
+static struct device *gs_imx_secvio_sc_dev;
+
+/* Register IDs for sc_seco_secvio_config API */
+#define HPSVS_ID 0x18
+#define LPS_ID 0x4c
+#define LPTDS_ID 0xa4
+#define HPVIDR_ID 0xf8
+
+#define SECO_MINOR_VERSION_SUPPORT_SECVIO_TAMPER 0x53
+#define SECO_VERSION_MINOR_MASK GENMASK(15, 0)
+
+/* Notifier list for new CB */
+static BLOCKING_NOTIFIER_HEAD(imx_secvio_sc_notifier_chain);
+
+int register_imx_secvio_sc_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&imx_secvio_sc_notifier_chain,
+ nb);
+}
+EXPORT_SYMBOL(register_imx_secvio_sc_notifier);
+
+int unregister_imx_secvio_sc_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&imx_secvio_sc_notifier_chain,
+ nb);
+}
+EXPORT_SYMBOL(unregister_imx_secvio_sc_notifier);
+
+static void if_imx_scu_irq_register_notifier(void *nb)
+{
+ imx_scu_irq_register_notifier(nb);
+}
+
+static void if_unregister_imx_secvio_sc_notifier(void *nb)
+{
+ unregister_imx_secvio_sc_notifier(nb);
+}
+
+static
+int imx_secvio_sc_notifier_call_chain(struct secvio_sc_notifier_info *info)
+{
+ return blocking_notifier_call_chain(&imx_secvio_sc_notifier_chain, 0,
+ (void *)info);
+}
+
+int int_imx_secvio_sc_get_state(struct device *dev,
+ struct secvio_sc_notifier_info *info)
+{
+ struct secvio_sc_notifier_info _info = {0};
+ struct secvio_sc_notifier_info *p_info;
+ int ret = 0, ret2 = 0;
+
+ p_info = info ? info : &_info;
+
+ /* Read secvio status */
+ ret = call_secvio_config(dev, HPSVS_ID, SECVIO_CONFIG_READ,
+ &p_info->hpsvs, NULL, NULL, NULL, NULL, 1);
+ if (ret) {
+ ret2 = ret;
+ dev_err(dev, "Cannot read secvio status: %d\n", ret);
+ }
+ p_info->hpsvs &= HPSVS__ALL_SV__MASK;
+
+ /* Read tampers status */
+ ret = call_secvio_config(dev, LPS_ID, SECVIO_CONFIG_READ,
+ &p_info->lps, NULL, NULL, NULL, NULL, 1);
+ if (ret) {
+ ret2 = ret;
+ dev_err(dev, "Cannot read tamper 1 status: %d\n", ret);
+ }
+ p_info->lps &= LPS__ALL_TP__MASK;
+
+ ret = call_secvio_config(dev, LPTDS_ID, SECVIO_CONFIG_READ,
+ &p_info->lptds, NULL, NULL, NULL, NULL, 1);
+ if (ret) {
+ ret2 = ret;
+ dev_err(dev, "Cannot read tamper 2 status: %d\n", ret);
+ }
+ p_info->lptds &= LPTDS__ALL_TP__MASK;
+
+ dev_dbg(dev, "Status: %.8x, %.8x, %.8x\n", p_info->hpsvs,
+ p_info->lps, p_info->lptds);
+
+ return ret2;
+}
+
+inline int imx_secvio_sc_get_state(struct secvio_sc_notifier_info *info)
+{
+ return int_imx_secvio_sc_get_state(gs_imx_secvio_sc_dev, info);
+}
+EXPORT_SYMBOL(imx_secvio_sc_get_state);
+
+int int_imx_secvio_sc_check_state(struct device *dev)
+{
+ struct secvio_sc_notifier_info info = {0};
+ int ret = 0;
+
+ ret = int_imx_secvio_sc_get_state(dev, &info);
+ if (ret) {
+ dev_err(dev, "Failed to get secvio state\n");
+ goto exit;
+ }
+
+ /* Call chain of CB registered to this module if status detected */
+ if (info.hpsvs || info.lps || info.lptds)
+ if (imx_secvio_sc_notifier_call_chain(&info))
+ dev_warn(dev,
+ "Issues when calling the notifier chain\n");
+
+exit:
+ return ret;
+}
+
+inline int imx_secvio_sc_check_state(void)
+{
+ return int_imx_secvio_sc_check_state(gs_imx_secvio_sc_dev);
+}
+EXPORT_SYMBOL(imx_secvio_sc_check_state);
+
+static int imx_secvio_sc_notify(struct notifier_block *nb,
+ unsigned long event, void *group)
+{
+ struct imx_secvio_sc_data *data =
+ container_of(nb, struct imx_secvio_sc_data,
+ irq_nb);
+ struct device *dev = data->dev;
+ int ret = 0;
+
+ /* Filter event for us */
+ if (!((event & IMX_SC_IRQ_SECVIO) &&
+ (*(u8 *)group == IMX_SC_IRQ_GROUP_WAKE)))
+ goto exit;
+
+ dev_warn(dev, "secvio security violation detected\n");
+
+ ret = int_imx_secvio_sc_check_state(dev);
+
+ /* Re-enable interrupt */
+ ret = int_imx_secvio_sc_enable_irq(dev);
+ if (ret)
+ dev_err(dev, "Failed to enable IRQ\n");
+
+exit:
+ return ret;
+}
+
+int int_imx_secvio_sc_clear_state(struct device *dev, u32 hpsvs, u32 lps,
+ u32 lptds)
+{
+ int ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ret = call_secvio_config(dev, HPSVS_ID, SECVIO_CONFIG_WRITE, &hpsvs,
+ NULL, NULL, NULL, NULL, 1);
+ if (ret) {
+ dev_err(dev, "Cannot clear secvio status: %d\n", ret);
+ goto exit;
+ }
+
+ ret = call_secvio_config(dev, LPS_ID, SECVIO_CONFIG_WRITE, &lps, NULL,
+ NULL, NULL, NULL, 1);
+ if (ret) {
+ dev_err(dev, "Cannot clear tamper 1 status: %d\n", ret);
+ goto exit;
+ }
+
+ ret = call_secvio_config(dev, LPTDS_ID, SECVIO_CONFIG_WRITE, &lptds,
+ NULL, NULL, NULL, NULL, 1);
+ if (ret) {
+ dev_err(dev, "Cannot clear tamper 2 status: %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+inline int imx_secvio_sc_clear_state(u32 hpsvs, u32 lps, u32 lptds)
+{
+ return int_imx_secvio_sc_clear_state(gs_imx_secvio_sc_dev, hpsvs, lps,
+ lptds);
+}
+EXPORT_SYMBOL(imx_secvio_sc_clear_state);
+
+static int report_to_user_notify(struct notifier_block *nb,
+ unsigned long status, void *notif_info)
+{
+ struct secvio_sc_notifier_info *info = notif_info;
+ struct imx_secvio_sc_data *data =
+ container_of(nb, struct imx_secvio_sc_data,
+ report_nb);
+ struct device *dev = data->dev;
+
+ /* Information about the security violation */
+ if (info->hpsvs & HPSVS__LP_SEC_VIO__MASK)
+ dev_info(dev, "SNVS secvio: LPSV\n");
+ if (info->hpsvs & HPSVS__SW_LPSV__MASK)
+ dev_info(dev, "SNVS secvio: SW LPSV\n");
+ if (info->hpsvs & HPSVS__SW_FSV__MASK)
+ dev_info(dev, "SNVS secvio: SW FSV\n");
+ if (info->hpsvs & HPSVS__SW_SV__MASK)
+ dev_info(dev, "SNVS secvio: SW SV\n");
+ if (info->hpsvs & HPSVS__SV5__MASK)
+ dev_info(dev, "SNVS secvio: SV 5\n");
+ if (info->hpsvs & HPSVS__SV4__MASK)
+ dev_info(dev, "SNVS secvio: SV 4\n");
+ if (info->hpsvs & HPSVS__SV3__MASK)
+ dev_info(dev, "SNVS secvio: SV 3\n");
+ if (info->hpsvs & HPSVS__SV2__MASK)
+ dev_info(dev, "SNVS secvio: SV 2\n");
+ if (info->hpsvs & HPSVS__SV1__MASK)
+ dev_info(dev, "SNVS secvio: SV 1\n");
+ if (info->hpsvs & HPSVS__SV0__MASK)
+ dev_info(dev, "SNVS secvio: SV 0\n");
+
+ /* Information about the tampers */
+ if (info->lps & LPS__ESVD__MASK)
+ dev_info(dev, "SNVS tamper: External SV\n");
+ if (info->lps & LPS__ET2D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 2\n");
+ if (info->lps & LPS__ET1D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 1\n");
+ if (info->lps & LPS__WMT2D__MASK)
+ dev_info(dev, "SNVS tamper: Wire Mesh 2\n");
+ if (info->lps & LPS__WMT1D__MASK)
+ dev_info(dev, "SNVS tamper: Wire Mesh 1\n");
+ if (info->lps & LPS__VTD__MASK)
+ dev_info(dev, "SNVS tamper: Voltage\n");
+ if (info->lps & LPS__TTD__MASK)
+ dev_info(dev, "SNVS tamper: Temperature\n");
+ if (info->lps & LPS__CTD__MASK)
+ dev_info(dev, "SNVS tamper: Clock\n");
+ if (info->lps & LPS__PGD__MASK)
+ dev_info(dev, "SNVS tamper: Power Glitch\n");
+ if (info->lps & LPS__MCR__MASK)
+ dev_info(dev, "SNVS tamper: Monotonic Counter rollover\n");
+ if (info->lps & LPS__SRTCR__MASK)
+ dev_info(dev, "SNVS tamper: Secure RTC rollover\n");
+ if (info->lps & LPS__LPTA__MASK)
+ dev_info(dev, "SNVS tamper: Time alarm\n");
+
+ if (info->lptds & LPTDS__ET10D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 10\n");
+ if (info->lptds & LPTDS__ET9D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 9\n");
+ if (info->lptds & LPTDS__ET8D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 8\n");
+ if (info->lptds & LPTDS__ET7D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 7\n");
+ if (info->lptds & LPTDS__ET6D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 6\n");
+ if (info->lptds & LPTDS__ET5D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 5\n");
+ if (info->lptds & LPTDS__ET4D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 4\n");
+ if (info->lptds & LPTDS__ET3D__MASK)
+ dev_info(dev, "SNVS tamper: Tamper 3\n");
+
+ return 0;
+}
+
+int call_secvio_config(struct device *dev, u8 id, u8 access, u32 *data0,
+ u32 *data1, u32 *data2, u32 *data3, u32 *data4, u8 size)
+{
+ int ret = 0;
+ struct imx_secvio_sc_data *data;
+
+ if (!dev)
+ return -EINVAL;
+
+ data = dev_get_drvdata(dev);
+
+ ret = imx_sc_seco_secvio_config(data->ipc_handle, id, access, data0,
+ data1, data2, data3, data4, size);
+ if (ret)
+ dev_err(dev, "Fail %s secvio config %d",
+ ((access) ? "write" : "read"), ret);
+
+ return ret;
+}
+
+int int_imx_secvio_sc_enable_irq(struct device *dev)
+{
+ int ret = 0, ret2;
+ u32 irq_status;
+ struct imx_secvio_sc_data *data;
+
+ if (!dev)
+ return -EINVAL;
+
+ data = dev_get_drvdata(dev);
+
+ /* Enable the IRQ */
+ ret = imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_WAKE, IMX_SC_IRQ_SECVIO,
+ true);
+ if (ret) {
+ dev_err(dev, "Cannot enable SCU IRQ: %d\n", ret);
+ goto exit;
+ }
+
+ /* Enable interrupt */
+ ret = imx_sc_seco_secvio_enable(data->ipc_handle);
+ if (ret) {
+ dev_err(dev, "Cannot enable SNVS irq: %d\n", ret);
+ goto exit;
+ };
+
+ /* Unmask interrupt */
+ ret = imx_scu_irq_get_status(IMX_SC_IRQ_GROUP_WAKE, &irq_status);
+ if (ret) {
+ dev_err(dev, "Cannot unmask irq: %d\n", ret);
+ goto exit;
+ };
+
+exit:
+ if (ret) {
+ ret2 = int_imx_secvio_sc_disable_irq(dev);
+ if (ret2)
+ dev_warn(dev, "Failed to disable the IRQ\n");
+ }
+
+ return ret;
+}
+
+int int_imx_secvio_sc_disable_irq(struct device *dev)
+{
+ int ret = 0;
+ struct imx_secvio_sc_data *data;
+
+ if (!dev)
+ return -EINVAL;
+
+ data = dev_get_drvdata(dev);
+
+ /* Disable the IRQ */
+ ret = imx_scu_irq_group_enable(IMX_SC_IRQ_GROUP_WAKE, IMX_SC_IRQ_SECVIO,
+ false);
+ if (ret) {
+ dev_err(dev, "Cannot disable SCU IRQ: %d\n", ret);
+ goto exit;
+ }
+
+exit:
+ return ret;
+}
+
+static void if_imx_secvio_sc_disable_irq(void *dev)
+{
+ int_imx_secvio_sc_disable_irq(dev);
+}
+
+static int imx_secvio_sc_open(struct inode *node, struct file *filp)
+{
+ filp->private_data = node->i_private;
+
+ return 0;
+}
+
+static long imx_secvio_sc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct device *dev = file->private_data;
+ struct secvio_sc_notifier_info info;
+ int ret;
+
+ switch (cmd) {
+ case IMX_SECVIO_SC_GET_STATE:
+ ret = int_imx_secvio_sc_get_state(dev, &info);
+ if (ret) {
+ dev_err(dev, "Fail to get state\n");
+ goto exit;
+ }
+
+ ret = copy_to_user((void *)arg, &info, sizeof(info));
+ if (ret) {
+ dev_err(dev, "Fail to copy info to user\n");
+ ret = -EFAULT;
+ goto exit;
+ }
+ break;
+ case IMX_SECVIO_SC_CHECK_STATE:
+ ret = int_imx_secvio_sc_check_state(dev);
+ if (ret) {
+ dev_err(dev, "Fail to check state\n");
+ goto exit;
+ }
+ break;
+ case IMX_SECVIO_SC_CLEAR_STATE:
+ ret = copy_from_user(&info, (void *)arg, sizeof(info));
+ if (ret) {
+ dev_err(dev, "Fail to copy info from user\n");
+ ret = -EFAULT;
+ goto exit;
+ }
+
+ ret = int_imx_secvio_sc_clear_state(dev, info.hpsvs, info.lps,
+ info.lptds);
+ if (ret) {
+ dev_err(dev, "Fail to clear state\n");
+ goto exit;
+ }
+ break;
+ default:
+ ret = -ENOIOCTLCMD;
+ }
+
+exit:
+ return ret;
+}
+
+const static struct file_operations imx_secvio_sc_fops = {
+ .owner = THIS_MODULE,
+ .open = imx_secvio_sc_open,
+ .unlocked_ioctl = imx_secvio_sc_ioctl,
+};
+
+static void if_misc_deregister(void *miscdevice)
+{
+ misc_deregister(miscdevice);
+}
+
+static int imx_secvio_sc_setup(struct device *dev)
+{
+ struct imx_secvio_sc_data *data;
+ u32 seco_version = 0;
+ bool own_secvio;
+ u32 irq_status;
+ int ret = 0;
+
+ if (!devres_open_group(dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* Allocate private data */
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ dev_err(dev, "Failed to allocate mem for data\n");
+ goto clean;
+ }
+
+ data->dev = dev;
+
+ dev_set_drvdata(dev, data);
+
+ data->nvmem = devm_nvmem_device_get(dev, NULL);
+ if (IS_ERR(data->nvmem)) {
+ ret = PTR_ERR(data->nvmem);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to retrieve nvmem\n");
+
+ goto clean;
+ }
+
+ /* Get a handle */
+ ret = imx_scu_get_handle(&data->ipc_handle);
+ if (ret) {
+ dev_err(dev, "cannot get handle to scu: %d\n", ret);
+ goto clean;
+ };
+
+ /* Check the version of the SECO */
+ ret = imx_sc_seco_build_info(data->ipc_handle, &seco_version, NULL);
+ if (ret) {
+ dev_err(dev, "Failed to get seco version\n");
+ goto clean;
+ }
+
+ if ((seco_version & SECO_VERSION_MINOR_MASK) <
+ SECO_MINOR_VERSION_SUPPORT_SECVIO_TAMPER) {
+ dev_err(dev, "SECO version %.8x doesn't support all secvio\n",
+ seco_version);
+ ret = -EOPNOTSUPP;
+ goto clean;
+ }
+
+ /* Init debug FS */
+ ret = imx_secvio_sc_debugfs(dev);
+ if (ret) {
+ dev_err(dev, "Failed to set debugfs\n");
+ goto clean;
+ }
+
+ /* Check we own the SECVIO */
+ ret = imx_sc_rm_is_resource_owned(data->ipc_handle, IMX_SC_R_SECVIO);
+ if (ret < 0) {
+ dev_err(dev, "Failed to retrieve secvio ownership\n");
+ goto clean;
+ }
+
+ own_secvio = ret > 0;
+ if (!own_secvio) {
+ dev_err(dev, "Secvio resource is not owned\n");
+ ret = -EPERM;
+ goto clean;
+ }
+
+ /* Check IRQ exists and enable it */
+ ret = imx_scu_irq_get_status(IMX_SC_IRQ_GROUP_WAKE, &irq_status);
+ if (ret) {
+ dev_err(dev, "Cannot get IRQ state: %d\n", ret);
+ goto clean;
+ }
+
+ ret = int_imx_secvio_sc_enable_irq(dev);
+ if (ret) {
+ dev_err(dev, "Failed to enable IRQ\n");
+ goto clean;
+ }
+
+ ret = devm_add_action_or_reset(dev, if_imx_secvio_sc_disable_irq, dev);
+ if (ret) {
+ dev_err(dev, "Failed to add managed action to disable IRQ\n");
+ goto clean;
+ }
+
+ /* Register the notifier for IRQ from SNVS */
+ data->irq_nb.notifier_call = imx_secvio_sc_notify;
+ ret = imx_scu_irq_register_notifier(&data->irq_nb);
+ if (ret) {
+ dev_err(dev, "Failed to register IRQ notification handler\n");
+ goto clean;
+ }
+
+ ret = devm_add_action_or_reset(dev, if_imx_scu_irq_register_notifier,
+ &data->irq_nb);
+ if (ret) {
+ dev_err(dev, "Failed to add action to remove irq notif\n");
+ goto clean;
+ }
+
+ /* Register the notification for reporting to user */
+ data->report_nb.notifier_call = report_to_user_notify;
+ ret = register_imx_secvio_sc_notifier(&data->report_nb);
+ if (ret) {
+ dev_err(dev, "Failed to register report notif handler\n");
+ goto clean;
+ }
+
+ ret = devm_add_action_or_reset(dev, if_unregister_imx_secvio_sc_notifier,
+ &data->report_nb);
+ if (ret) {
+ dev_err(dev, "Failed to add action to remove report notif\n");
+ goto clean;
+ }
+
+ /* Register the notification to report to audit FW */
+ data->audit_nb.notifier_call = report_to_audit_notify;
+ ret = register_imx_secvio_sc_notifier(&data->audit_nb);
+ if (ret) {
+ dev_err(dev, "Failed to register report audit handler\n");
+ goto clean;
+ }
+
+ ret = devm_add_action(dev, if_unregister_imx_secvio_sc_notifier,
+ &data->audit_nb);
+ if (ret) {
+ dev_err(dev, "Failed to add action to remove audit notif\n");
+ goto clean;
+ }
+
+ /* Register misc device for IOCTL */
+ data->miscdev.name = devm_kstrdup(dev, "secvio-sc", GFP_KERNEL);
+ data->miscdev.minor = MISC_DYNAMIC_MINOR;
+ data->miscdev.fops = &imx_secvio_sc_fops;
+ data->miscdev.parent = dev;
+ ret = misc_register(&data->miscdev);
+ if (ret) {
+ dev_err(dev, "failed to register misc device\n");
+ goto exit;
+ }
+
+ ret = devm_add_action_or_reset(dev, if_misc_deregister, &data->miscdev);
+ if (ret) {
+ dev_err(dev, "Failed to add action to unregister miscdev\n");
+ goto clean;
+ }
+
+ gs_imx_secvio_sc_dev = dev;
+
+ /* Process current state of the secvio and tampers */
+ int_imx_secvio_sc_check_state(dev);
+
+ devres_remove_group(dev, NULL);
+
+ goto exit;
+
+clean:
+ devres_release_group(dev, NULL);
+
+exit:
+ return ret;
+}
+
+static int imx_secvio_sc_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device *dev = &pdev->dev;
+
+ ret = imx_secvio_sc_setup(dev);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(dev, "Failed to setup\n");
+
+ return ret;
+}
+
+static const struct of_device_id imx_secvio_sc_dt_ids[] = {
+ { .compatible = "fsl,imx-sc-secvio", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, imx_secvio_sc_dt_ids);
+
+static struct platform_driver imx_secvio_sc_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "imx-secvio-sc",
+ .of_match_table = imx_secvio_sc_dt_ids,
+ },
+ .probe = imx_secvio_sc_probe,
+};
+module_platform_driver(imx_secvio_sc_driver);
+
+MODULE_AUTHOR("Franck LENORMAND <franck.lenormand@nxp.com>");
+MODULE_DESCRIPTION("NXP i.MX driver to handle SNVS secvio irq sent by SCFW");
+MODULE_LICENSE("GPL");
diff --git a/drivers/soc/imx/soc-imx.c b/drivers/soc/imx/soc-imx.c
index 77bc12039c3d..b8bdd253aa9c 100644
--- a/drivers/soc/imx/soc-imx.c
+++ b/drivers/soc/imx/soc-imx.c
@@ -99,7 +99,10 @@ static int __init imx_soc_device_init(void)
break;
case MXC_CPU_IMX6Q:
ocotp_compat = "fsl,imx6q-ocotp";
- soc_id = "i.MX6Q";
+ if (imx_get_soc_revision() >= IMX_CHIP_REVISION_2_0)
+ soc_id = "i.MX6QP";
+ else
+ soc_id = "i.MX6Q";
break;
case MXC_CPU_IMX6UL:
ocotp_compat = "fsl,imx6ul-ocotp";
diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c
index 32ed9dc88e45..9e869db796a0 100644
--- a/drivers/soc/imx/soc-imx8m.c
+++ b/drivers/soc/imx/soc-imx8m.c
@@ -3,8 +3,10 @@
* Copyright 2019 NXP.
*/
+#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/io.h>
+#include <linux/module.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
@@ -13,6 +15,8 @@
#include <linux/of.h>
#include <linux/clk.h>
+#include <soc/imx/src.h>
+
#define REV_B1 0x21
#define IMX8MQ_SW_INFO_B1 0x40
@@ -20,6 +24,15 @@
#define IMX_SIP_GET_SOC_INFO 0xc2000006
+#define IMX_SIP_NOC 0xc2000008
+#define IMX_SIP_NOC_LCDIF 0x0
+#define IMX_SIP_NOC_PRIORITY 0x1
+#define NOC_GPU_PRIORITY 0x10
+#define NOC_DCSS_PRIORITY 0x11
+#define NOC_VPU_PRIORITY 0x12
+#define NOC_CPU_PRIORITY 0x13
+#define NOC_MIX_PRIORITY 0x14
+
#define OCOTP_UID_LOW 0x410
#define OCOTP_UID_HIGH 0x420
@@ -169,6 +182,23 @@ static __maybe_unused const struct of_device_id imx8_soc_match[] = {
{ }
};
+static void imx8mq_noc_init(void)
+{
+ struct arm_smccc_res res;
+
+ pr_info("Config NOC for VPU and CPU\n");
+
+ arm_smccc_smc(IMX_SIP_NOC, IMX_SIP_NOC_PRIORITY, NOC_CPU_PRIORITY,
+ 0x80000300, 0, 0, 0, 0, &res);
+ if (res.a0)
+ pr_err("Config NOC for CPU fail!\n");
+
+ arm_smccc_smc(IMX_SIP_NOC, IMX_SIP_NOC_PRIORITY, NOC_VPU_PRIORITY,
+ 0x80000300, 0, 0, 0, 0, &res);
+ if (res.a0)
+ pr_err("Config NOC for VPU fail!\n");
+}
+
#define imx8_revision(soc_rev) \
soc_rev ? \
kasprintf(GFP_KERNEL, "%d.%d", (soc_rev >> 4) & 0xf, soc_rev & 0xf) : \
@@ -230,6 +260,9 @@ static int __init imx8_soc_init(void)
if (IS_ENABLED(CONFIG_ARM_IMX_CPUFREQ_DT))
platform_device_register_simple("imx-cpufreq-dt", -1, NULL, 0);
+ if (of_machine_is_compatible("fsl,imx8mq"))
+ imx8mq_noc_init();
+
return 0;
free_serial_number:
@@ -241,4 +274,35 @@ free_soc:
kfree(soc_dev_attr);
return ret;
}
+
device_initcall(imx8_soc_init);
+
+#define FSL_SIP_SRC 0xc2000005
+#define FSL_SIP_SRC_M4_START 0x00
+#define FSL_SIP_SRC_M4_STARTED 0x01
+
+/* To indicate M4 enabled or not on i.MX8MQ */
+static bool m4_is_enabled;
+bool imx_src_is_m4_enabled(void)
+{
+ return m4_is_enabled;
+}
+EXPORT_SYMBOL_GPL(imx_src_is_m4_enabled);
+
+int check_m4_enabled(void)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_smc(FSL_SIP_SRC, FSL_SIP_SRC_M4_STARTED, 0,
+ 0, 0, 0, 0, 0, &res);
+ m4_is_enabled = !!res.a0;
+
+ if (m4_is_enabled)
+ printk("M4 is started\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(check_m4_enabled);
+
+MODULE_DESCRIPTION("i.MX8M SoC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/soc/imx/soc-imx9.c b/drivers/soc/imx/soc-imx9.c
new file mode 100644
index 000000000000..288736b8f756
--- /dev/null
+++ b/drivers/soc/imx/soc-imx9.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2022 NXP
+ */
+
+#include <linux/firmware/imx/ele_base_msg.h>
+#include <linux/module.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/sys_soc.h>
+
+#define DEVICE_ID 0x800
+#define DIGPROG_MAJOR_UPPER(x) (((x) & 0x00f00000) >> 20)
+#define DIGPROG_MAJOR_LOWER(x) (((x) & 0x0000f000) >> 12)
+#define BASE_LAYER_REV(x) (((x) & 0x000000f0) >> 4)
+
+static int imx9_soc_device_register(struct device *dev)
+{
+ struct soc_device_attribute *attr;
+ struct device_node *anaosc_np;
+ struct soc_device *sdev;
+ void __iomem *anaosc;
+ u32 device_id;
+ u64 v;
+ int err;
+
+ attr = kzalloc(sizeof(*attr), GFP_KERNEL);
+ if (!attr)
+ return -ENOMEM;
+
+ err = of_property_read_string(of_root, "model", &attr->machine);
+ if (err) {
+ err = -EINVAL;
+ goto attr;
+ }
+
+ attr->family = kasprintf(GFP_KERNEL, "Freescale i.MX");
+
+ anaosc_np = of_find_compatible_node(NULL, NULL, "fsl,imx93-anatop");
+ if (!anaosc_np) {
+ err = -ENOENT;
+ goto family;
+ }
+ anaosc = of_iomap(anaosc_np, 0);
+ WARN_ON(!anaosc);
+
+ device_id = readl(anaosc + DEVICE_ID);
+
+ iounmap(anaosc);
+ of_node_put(anaosc_np);
+
+ if (BASE_LAYER_REV(device_id) == 0x1) {
+ attr->revision = kasprintf(GFP_KERNEL, "1.0");
+ } else {
+ attr->revision = kasprintf(GFP_KERNEL, "unknown" );
+ }
+
+ err = nvmem_cell_read_u64(dev, "soc_unique_id", &v);
+ if (err)
+ goto revision;
+ attr->serial_number = kasprintf(GFP_KERNEL, "%016llX", v);
+
+ if (DIGPROG_MAJOR_UPPER(device_id) == 0x9 && DIGPROG_MAJOR_LOWER(device_id) == 0x2) {
+ attr->soc_id = kasprintf(GFP_KERNEL, "i.MX93");
+ } else {
+ attr->soc_id = kasprintf(GFP_KERNEL, "unknown");
+ }
+
+ sdev = soc_device_register(attr);
+ if (IS_ERR(sdev)) {
+ err = -ENODEV;
+ goto soc_id;
+ }
+
+ return 0;
+
+soc_id:
+ kfree(attr->soc_id);
+ kfree(attr->serial_number);
+revision:
+ kfree(attr->revision);
+family:
+ kfree(attr->family);
+attr:
+ kfree(attr);
+ return err;
+}
+
+static int imx9_init_soc_probe(struct platform_device *pdev)
+{
+ int ret;
+
+ ret = imx9_soc_device_register(&pdev->dev);
+ if (ret) {
+ pr_err("failed to register SoC device: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id imx9_soc_of_match[] = {
+ { .compatible = "fsl,imx93-soc", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx9_soc_of_match);
+
+static struct platform_driver imx9_init_soc_driver = {
+ .driver = {
+ .name = "imx9_init_soc",
+ .of_match_table = of_match_ptr(imx9_soc_of_match),
+ },
+ .probe = imx9_init_soc_probe,
+};
+module_platform_driver(imx9_init_soc_driver);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP i.MX9 SoC");
+MODULE_LICENSE("GPL v2");