summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJacky Bai <ping.bai@nxp.com>2020-03-26 15:24:03 +0800
committerJacky Bai <ping.bai@nxp.com>2020-03-27 20:52:50 +0800
commitc6a3b2479299fc8261fa65b60fa5172d7f7887c7 (patch)
treea5f4ea9160887e3ce49c0ac920b8361f1696ec63
parent6b8249ff58cc8853396498a074535dcd7f81beaf (diff)
plat: imx8mp: imx8mp wait mode workaround
Add the i.MX8MP workaround for wait mode just for Alpha release, this patch will be dropped in the future. Signed-off-by: Jacky Bai <ping.bai@nxp.com>
-rw-r--r--plat/imx/imx8m/ddr/dram.c2
-rw-r--r--plat/imx/imx8m/imx8mp/gpc.c204
-rw-r--r--plat/imx/imx8m/imx8mp/imx8mp_psci.c46
-rw-r--r--plat/imx/imx8m/imx8mp/platform.mk1
-rw-r--r--plat/imx/imx8m/include/gpc.h2
5 files changed, 252 insertions, 3 deletions
diff --git a/plat/imx/imx8m/ddr/dram.c b/plat/imx/imx8m/ddr/dram.c
index 7dbaabfe..f785d2a0 100644
--- a/plat/imx/imx8m/ddr/dram.c
+++ b/plat/imx/imx8m/ddr/dram.c
@@ -252,7 +252,7 @@ int dram_dvfs_handler(uint32_t smc_fid, void *handle,
for (int i = 0; i < PLATFORM_CORE_COUNT; i++)
if (cpu_id != i && (online_cores & (0x1 << (i * 8))))
plat_ic_raise_el3_sgi(0x8, i);
-#if defined(PLAT_imx8mq)
+#if defined(PLAT_imx8mq) || defined(PLAT_imx8mp)
for (int i = 0; i < 4; i++) {
if (i != cpu_id && online_cores & (1 << (i * 8)))
imx_gpc_core_wake(1 << i);
diff --git a/plat/imx/imx8m/imx8mp/gpc.c b/plat/imx/imx8m/imx8mp/gpc.c
index 32a40e40..3d53ccb1 100644
--- a/plat/imx/imx8m/imx8mp/gpc.c
+++ b/plat/imx/imx8m/imx8mp/gpc.c
@@ -13,6 +13,7 @@
#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
+#include <lib/spinlock.h>
#include <lib/smccc.h>
#include <platform_def.h>
#include <services/std_svc.h>
@@ -21,6 +22,14 @@
#include <imx_sip_svc.h>
#include <plat_imx8.h>
+#define FSL_SIP_CONFIG_GPC_MASK 0x00
+#define FSL_SIP_CONFIG_GPC_UNMASK 0x01
+#define FSL_SIP_CONFIG_GPC_SET_WAKE 0x02
+#define FSL_SIP_CONFIG_GPC_PM_DOMAIN 0x03
+#define FSL_SIP_CONFIG_GPC_SET_AFF 0x04
+#define FSL_SIP_CONFIG_GPC_CORE_WAKE 0x05
+
+
#define CCGR(x) (0x4000 + (x) * 16)
struct imx_noc_setting {
@@ -100,7 +109,158 @@ static struct imx_noc_setting noc_setting[] = {
{ VPU_H1, 0xe80, 0xe80, 0x80000303, 0x0, 0x0}
};
+static uint32_t gpc_saved_imrs[20];
+static uint32_t gpc_wake_irqs[5];
+static uint32_t gpc_imr_offset[] = {
+ IMX_GPC_BASE + IMR1_CORE0_A53,
+ IMX_GPC_BASE + IMR1_CORE1_A53,
+ IMX_GPC_BASE + IMR1_CORE2_A53,
+ IMX_GPC_BASE + IMR1_CORE3_A53,
+ IMX_GPC_BASE + IMR1_CORE0_M4,
+};
+
static unsigned int pu_domain_status;
+spinlock_t gpc_imr_lock[4];
+
+static void gpc_imr_core_spin_lock(unsigned int core_id)
+{
+ spin_lock(&gpc_imr_lock[core_id]);
+}
+
+static void gpc_imr_core_spin_unlock(unsigned int core_id)
+{
+ spin_unlock(&gpc_imr_lock[core_id]);
+}
+
+static void gpc_save_imr_lpm(unsigned int core_id, unsigned int imr_idx)
+{
+ uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
+
+ gpc_imr_core_spin_lock(core_id);
+
+ gpc_saved_imrs[core_id + imr_idx * 4] = mmio_read_32(reg);
+ mmio_write_32(reg, ~gpc_wake_irqs[imr_idx]);
+
+ gpc_imr_core_spin_unlock(core_id);
+}
+
+static void gpc_restore_imr_lpm(unsigned int core_id, unsigned int imr_idx)
+{
+ uint32_t reg = gpc_imr_offset[core_id] + imr_idx * 4;
+ uint32_t val = gpc_saved_imrs[core_id + imr_idx * 4];
+
+ gpc_imr_core_spin_lock(core_id);
+
+ mmio_write_32(reg, val);
+
+ gpc_imr_core_spin_unlock(core_id);
+}
+
+void imx_set_sys_wakeup(unsigned int last_core, bool pdn)
+{
+ unsigned int imr, core;
+
+ if (pdn)
+ for (imr = 0; imr < 5; imr++)
+ for (core = 0; core < 4; core++)
+ gpc_save_imr_lpm(core, imr);
+ else
+ for (imr = 0; imr < 5; imr++)
+ for (core = 0; core < 4; core++)
+ gpc_restore_imr_lpm(core, imr);
+
+ /* enable the MU wakeup */
+ if (imx_m4_lpa_active())
+ mmio_clrbits_32(IMX_GPC_BASE + gpc_imr_offset[last_core] + 0x8, BIT(24));
+}
+
+static void imx_gpc_hwirq_mask(unsigned int hwirq)
+{
+ uintptr_t reg;
+ unsigned int val;
+
+ gpc_imr_core_spin_lock(0);
+ reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val |= 1 << hwirq % 32;
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(0);
+}
+
+static void imx_gpc_hwirq_unmask(unsigned int hwirq)
+{
+ uintptr_t reg;
+ unsigned int val;
+
+ gpc_imr_core_spin_lock(0);
+ reg = gpc_imr_offset[0] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val &= ~(1 << hwirq % 32);
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(0);
+}
+
+static void imx_gpc_set_wake(uint32_t hwirq, unsigned int on)
+{
+ uint32_t mask, idx;
+
+ mask = 1 << hwirq % 32;
+ idx = hwirq / 32;
+ gpc_wake_irqs[idx] = on ? gpc_wake_irqs[idx] | mask :
+ gpc_wake_irqs[idx] & ~mask;
+}
+
+static void imx_gpc_mask_irq0(uint32_t core_id, uint32_t mask)
+{
+ gpc_imr_core_spin_lock(core_id);
+ if (mask)
+ mmio_setbits_32(gpc_imr_offset[core_id], 1);
+ else
+ mmio_clrbits_32(gpc_imr_offset[core_id], 1);
+ dsb();
+ gpc_imr_core_spin_unlock(core_id);
+}
+
+void imx_gpc_core_wake(uint32_t cpumask)
+{
+ for (int i = 0; i < 4; i++)
+ if (cpumask & (1 << i))
+ imx_gpc_mask_irq0(i, false);
+}
+
+void imx_gpc_set_a53_core_awake(uint32_t core_id)
+{
+ imx_gpc_mask_irq0(core_id, true);
+}
+
+static void imx_gpc_set_affinity(uint32_t hwirq, unsigned cpu_idx)
+{
+ uintptr_t reg;
+ unsigned int val;
+
+ /*
+ * using the mask/unmask bit as affinity function.unmask the
+ * IMR bit to enable IRQ wakeup for this core.
+ */
+ gpc_imr_core_spin_lock(cpu_idx);
+ reg = gpc_imr_offset[cpu_idx] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val &= ~(1 << hwirq % 32);
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(cpu_idx);
+
+ /* clear affinity of other core */
+ for (int i = 0; i < 4; i++) {
+ if (cpu_idx != i) {
+ gpc_imr_core_spin_lock(i);
+ reg = gpc_imr_offset[i] + (hwirq / 32) * 4;
+ val = mmio_read_32(reg);
+ val |= (1 << hwirq % 32);
+ mmio_write_32(reg, val);
+ gpc_imr_core_spin_unlock(i);
+ }
+ }
+}
static void imx_config_noc(uint32_t domain_id)
{
@@ -341,9 +501,20 @@ void imx_gpc_init(void)
mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0);
}
+ /* Due to the hardware design requirement, need to make
+ * sure GPR interrupt(#32) is unmasked during RUN mode to
+ * avoid entering DSM mode by mistake.
+ */
+ for (i = 0; i < 4; i++)
+ mmio_write_32(gpc_imr_offset[i], ~0x1);
+
+ /* leave the IOMUX_GPC bit 12 on for core wakeup */
+ mmio_setbits_32(IMX_IOMUX_GPR_BASE + 0x4, 1 << 12);
+
+
val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC);
/* use GIC wake_request to wakeup C0~C3 from LPM */
- val |= 0x30c00000;
+ val |= 0x40000000;
/* clear the MASTER0 LPM handshake */
val &= ~(1 << 6);
mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val);
@@ -422,3 +593,34 @@ void imx_gpc_init(void)
mmio_write_32 (0x3270010c, 0x0);
}
+
+int imx_gpc_handler(uint32_t smc_fid,
+ u_register_t x1,
+ u_register_t x2,
+ u_register_t x3)
+{
+ switch(x1) {
+ case FSL_SIP_CONFIG_GPC_PM_DOMAIN:
+ imx_gpc_pm_domain_enable(x2, x3);
+ break;
+ case FSL_SIP_CONFIG_GPC_CORE_WAKE:
+ imx_gpc_core_wake(x2);
+ break;
+ case FSL_SIP_CONFIG_GPC_SET_WAKE:
+ imx_gpc_set_wake(x2, x3);
+ break;
+ case FSL_SIP_CONFIG_GPC_MASK:
+ imx_gpc_hwirq_mask(x2);
+ break;
+ case FSL_SIP_CONFIG_GPC_UNMASK:
+ imx_gpc_hwirq_unmask(x2);
+ break;
+ case FSL_SIP_CONFIG_GPC_SET_AFF:
+ imx_gpc_set_affinity(x2, x3);
+ break;
+ default:
+ return SMC_UNK;
+ }
+
+ return 0;
+}
diff --git a/plat/imx/imx8m/imx8mp/imx8mp_psci.c b/plat/imx/imx8m/imx8mp/imx8mp_psci.c
index f541fc13..96bed88e 100644
--- a/plat/imx/imx8m/imx8mp/imx8mp_psci.c
+++ b/plat/imx/imx8m/imx8mp/imx8mp_psci.c
@@ -9,13 +9,59 @@
#include <arch.h>
#include <arch_helpers.h>
#include <common/debug.h>
+#include <drivers/delay_timer.h>
#include <lib/mmio.h>
#include <lib/psci/psci.h>
+#include <dram.h>
#include <gpc.h>
#include <imx8m_psci.h>
#include <plat_imx8.h>
+void imx_pwr_domain_off(const psci_power_state_t *target_state)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+ plat_gic_cpuif_disable();
+ imx_set_cpu_pwr_off(core_id);
+
+ /* TODO: Find out why this is still
+ * needed in order not to break suspend */
+ udelay(50);
+}
+
+void imx_domain_suspend_finish(const psci_power_state_t *target_state)
+{
+ uint64_t mpidr = read_mpidr_el1();
+ unsigned int core_id = MPIDR_AFFLVL0_VAL(mpidr);
+
+ if (is_local_state_off(SYSTEM_PWR_STATE(target_state))) {
+ if (!imx_m4_lpa_active()) {
+ imx_noc_wrapper_post_resume(core_id);
+ imx_anamix_override(false);
+ dram_exit_retention();
+ imx_set_sys_lpm(core_id, false);
+ }
+ imx_set_sys_wakeup(core_id, false);
+ }
+
+ if (!is_local_state_run(CLUSTER_PWR_STATE(target_state))) {
+ imx_clear_rbc_count();
+ imx_set_cluster_powerdown(core_id, PSCI_LOCAL_STATE_RUN);
+ }
+
+ if (is_local_state_off(CORE_PWR_STATE(target_state))) {
+ /* mark this core as awake by masking IRQ0 */
+ imx_gpc_set_a53_core_awake(core_id);
+ imx_set_cpu_lpm(core_id, false);
+ plat_gic_cpuif_enable();
+ } else {
+ write_scr_el3(read_scr_el3() & (~SCR_FIQ_BIT));
+ isb();
+ }
+}
+
static const plat_psci_ops_t imx_plat_psci_ops = {
.pwr_domain_on = imx_pwr_domain_on,
.pwr_domain_on_finish = imx_pwr_domain_on_finish,
diff --git a/plat/imx/imx8m/imx8mp/platform.mk b/plat/imx/imx8m/imx8mp/platform.mk
index c668ca70..b5deff8c 100644
--- a/plat/imx/imx8m/imx8mp/platform.mk
+++ b/plat/imx/imx8m/imx8mp/platform.mk
@@ -52,6 +52,7 @@ BL31_SOURCES += plat/imx/common/imx8_helpers.S \
USE_COHERENT_MEM := 1
RESET_TO_BL31 := 1
A53_DISABLE_NON_TEMPORAL_HINT := 0
+WARMBOOT_ENABLE_DCACHE_EARLY := 1
ERRATA_A53_835769 := 1
ERRATA_A53_843419 := 1
diff --git a/plat/imx/imx8m/include/gpc.h b/plat/imx/imx8m/include/gpc.h
index 441c6f98..0968d7f9 100644
--- a/plat/imx/imx8m/include/gpc.h
+++ b/plat/imx/imx8m/include/gpc.h
@@ -84,7 +84,7 @@ void imx_noc_wrapper_pre_suspend(unsigned int proc_num);
void imx_noc_wrapper_post_resume(unsigned int proc_num);
bool imx_m4_lpa_active(void);
-#if defined(PLAT_imx8mq)
+#if defined(PLAT_imx8mq) || defined(PLAT_imx8mp)
void imx_gpc_set_a53_core_awake(uint32_t core_id);
void imx_gpc_core_wake(uint32_t cpumask);
#endif