/* * Copyright (c) 2019, ARM Limited and Contributors. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define MIPI_PWR_REQ BIT(0) #define PCIE_PWR_REQ BIT(1) #define OTG1_PWR_REQ BIT(2) #define OTG2_PWR_REQ BIT(3) #define HSIOMIX_PWR_REQ BIT(4) #define GPU2D_PWR_REQ BIT(6) #define GPUMIX_PWR_REQ BIT(7) #define VPUMIX_PWR_REQ BIT(8) #define GPU3D_PWR_REQ BIT(9) #define DISPMIX_PWR_REQ BIT(10) #define VPU_G1_PWR_REQ BIT(11) #define VPU_G2_PWR_REQ BIT(12) #define VPU_H1_PWR_REQ BIT(13) #define HSIOMIX_ADB400_SYNC (0x3 << 5) #define DISPMIX_ADB400_SYNC BIT(7) #define VPUMIX_ADB400_SYNC BIT(8) #define GPU3D_ADB400_SYNC BIT(9) #define GPU2D_ADB400_SYNC BIT(10) #define GPUMIX_ADB400_SYNC BIT(11) #define HSIOMIX_ADB400_ACK (0x3 << 23) #define DISPMIX_ADB400_ACK BIT(25) #define VPUMIX_ADB400_ACK BIT(26) #define GPU3D_ADB400_ACK BIT(27) #define GPU2D_ADB400_ACK BIT(28) #define GPUMIX_ADB400_ACK BIT(29) #define MIPI_PGC 0xc00 #define PCIE_PGC 0xc40 #define OTG1_PGC 0xc80 #define OTG2_PGC 0xcc0 #define HSIOMIX_PGC 0xd00 #define GPU2D_PGC 0xd80 #define GPUMIX_PGC 0xdc0 #define VPUMIX_PGC 0xe00 #define GPU3D_PGC 0xe40 #define DISPMIX_PGC 0xe80 #define VPU_G1_PGC 0xec0 #define VPU_G2_PGC 0xf00 #define VPU_H1_PGC 0xf40 #define CCGR(x) (0x4000 + (x) * 16) enum pu_domain_id { HSIOMIX, PCIE, OTG1, OTG2, GPUMIX, VPUMIX, VPU_G1, VPU_G2, VPU_H1, DISPMIX, MIPI, /* below two domain only for ATF internal use */ GPU2D, GPU3D, }; /* PU domain */ static struct imx_pwr_domain pu_domains[] = { IMX_MIX_DOMAIN(HSIOMIX), IMX_PD_DOMAIN(PCIE), IMX_PD_DOMAIN(OTG1), IMX_PD_DOMAIN(OTG2), IMX_MIX_DOMAIN(GPUMIX), IMX_MIX_DOMAIN(VPUMIX), IMX_PD_DOMAIN(VPU_G1), IMX_PD_DOMAIN(VPU_G2), IMX_PD_DOMAIN(VPU_H1), IMX_MIX_DOMAIN(DISPMIX), IMX_PD_DOMAIN(MIPI), /* below two domain only for ATF internal use */ IMX_MIX_DOMAIN(GPU2D), IMX_MIX_DOMAIN(GPU3D), }; static unsigned int pu_domain_status; #define GPU_RCR 0x40 #define VPU_RCR 0x44 #define VPU_CTL_BASE 0x38330000 #define BLK_SFT_RSTN_CSR 0x0 #define H1_SFT_RSTN BIT(2) #define G1_SFT_RSTN BIT(1) #define G2_SFT_RSTN BIT(0) #define DISP_CTL_BASE 0x32e28000 void vpu_sft_reset_assert(uint32_t domain_id) { uint32_t val; val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR); switch(domain_id) { case VPU_G1: val &= ~G1_SFT_RSTN; mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); break; case VPU_G2: val &= ~G2_SFT_RSTN; mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); break; case VPU_H1: val &= ~H1_SFT_RSTN; mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); break; default: break; } } void vpu_sft_reset_deassert(uint32_t domain_id) { uint32_t val; val = mmio_read_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR); switch(domain_id) { case VPU_G1: val |= G1_SFT_RSTN; mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); break; case VPU_G2: val |= G2_SFT_RSTN; mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); break; case VPU_H1: val |= H1_SFT_RSTN; mmio_write_32(VPU_CTL_BASE + BLK_SFT_RSTN_CSR, val); break; default: break; } } void imx_gpc_pm_domain_enable(uint32_t domain_id, bool on) { struct imx_pwr_domain *pwr_domain = &pu_domains[domain_id]; if (on) { pu_domain_status |= (1 << domain_id); if (domain_id == VPU_G1 || domain_id == VPU_G2 || domain_id == VPU_H1) vpu_sft_reset_assert(domain_id); /* HSIOMIX has no PU bit, so skip for it */ if (domain_id != HSIOMIX) { /* clear the PGC bit */ mmio_clrbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1); /* power up the domain */ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, pwr_domain->pwr_req); /* wait for power request done */ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & pwr_domain->pwr_req) ; } if (domain_id == VPU_G1 || domain_id == VPU_G2 || domain_id == VPU_H1) { vpu_sft_reset_deassert(domain_id); /* dealy for a while to make sure reset done */ udelay(100); } if (domain_id == GPUMIX) { /* assert reset */ mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x1); /* power up GPU2D */ mmio_clrbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1); mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU2D_PWR_REQ); /* wait for power request done */ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU2D_PWR_REQ) ; udelay(1); /* power up GPU3D */ mmio_clrbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1); mmio_setbits_32(IMX_GPC_BASE + PU_PGC_UP_TRG, GPU3D_PWR_REQ); /* wait for power request done */ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_UP_TRG) & GPU3D_PWR_REQ) ; udelay(10); /* release the gpumix reset */ mmio_write_32(IMX_SRC_BASE + GPU_RCR, 0x0); udelay(10); } /* vpu sft clock enable */ if (domain_id == VPUMIX) { mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x1); udelay(5); mmio_write_32(IMX_SRC_BASE + VPU_RCR, 0x0); udelay(5); /* enable all clock */ mmio_write_32(VPU_CTL_BASE + 0x4, 0x7); } if (domain_id == DISPMIX) { /* special setting for DISPMIX */ mmio_write_32(DISP_CTL_BASE + 0x4, 0x1fff); mmio_write_32(DISP_CTL_BASE, 0x7f); mmio_write_32(DISP_CTL_BASE + 0x8, 0x30000); } /* handle the ADB400 sync */ if (!pwr_domain->init_on && pwr_domain->need_sync) { /* clear adb power down request */ mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync); /* wait for adb power request ack */ while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) ; } if (domain_id == GPUMIX) { /* power up GPU2D ADB */ mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC); /* wait for adb power request ack */ while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) ; /* power up GPU3D ADB */ mmio_setbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC); /* wait for adb power request ack */ while (!(mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) ; } } else { pu_domain_status &= ~(1 << domain_id); if (domain_id == OTG1 || domain_id == OTG2) return; /* GPU2D & GPU3D ADB power down */ if (domain_id == GPUMIX) { mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU2D_ADB400_SYNC); /* wait for adb power request ack */ while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU2D_ADB400_ACK)) ; mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, GPU3D_ADB400_SYNC); /* wait for adb power request ack */ while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & GPU3D_ADB400_ACK)) ; } /* handle the ADB400 sync */ if (!pwr_domain->init_on && pwr_domain->need_sync) { /* set adb power down request */ mmio_clrbits_32(IMX_GPC_BASE + GPC_PU_PWRHSK, pwr_domain->adb400_sync); /* wait for adb power request ack */ while ((mmio_read_32(IMX_GPC_BASE + GPC_PU_PWRHSK) & pwr_domain->adb400_ack)) ; } if (domain_id == GPUMIX) { /* power down GPU2D */ mmio_setbits_32(IMX_GPC_BASE + GPU2D_PGC, 0x1); mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU2D_PWR_REQ); /* wait for power request done */ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU2D_PWR_REQ); /* power down GPU3D */ mmio_setbits_32(IMX_GPC_BASE + GPU3D_PGC, 0x1); mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, GPU3D_PWR_REQ); /* wait for power request done */ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & GPU3D_PWR_REQ) ; } /* HSIOMIX has no PU bit, so skip for it */ if (domain_id != HSIOMIX) { /* set the PGC bit */ mmio_setbits_32(IMX_GPC_BASE + pwr_domain->pgc_offset, 0x1); /* power down the domain */ mmio_setbits_32(IMX_GPC_BASE + PU_PGC_DN_TRG, pwr_domain->pwr_req); /* wait for power request done */ while (mmio_read_32(IMX_GPC_BASE + PU_PGC_DN_TRG) & pwr_domain->pwr_req) ; } } pwr_domain->init_on = false; } static void imx8mm_tz380_init(void) { unsigned int val; val = mmio_read_32(IMX_IOMUX_GPR_BASE + 0x28); if ((val & GPR_TZASC_EN) != GPR_TZASC_EN) return; tzc380_init(IMX_TZASC_BASE); /* Enable 1G-5G S/NS RW */ tzc380_configure_region(0, 0x00000000, TZC_ATTR_REGION_SIZE(TZC_REGION_SIZE_4G) | TZC_ATTR_REGION_EN_MASK | TZC_ATTR_SP_ALL); } void imx_noc_wrapper_pre_suspend(unsigned int proc_num) { /* enable MASTER1 & MASTER2 power down in A53 LPM mode */ mmio_clrbits_32(IMX_GPC_BASE + LPCR_A53_BSC, MASTER1_LPM_HSK | MASTER2_LPM_HSK); mmio_setbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, MASTER1_MAPPING | MASTER2_MAPPING); /* noc can only be power down when all the pu domain is off */ if (!pu_domain_status) { /* enable noc power down */ imx_noc_slot_config(true); /* * below clocks must be enabled to make sure RDC MRCs * can be successfully reloaded. */ mmio_setbits_32(IMX_CCM_BASE + 0xa300, (0x1 << 28)); mmio_write_32(IMX_CCM_BASE + CCGR(5), 0x3); mmio_write_32(IMX_CCM_BASE + CCGR(37), 0x3); mmio_write_32(IMX_CCM_BASE + CCGR(87), 0x3); } /* * gic redistributor context save must be called when * the GIC CPU interface is disabled and before distributor save. */ plat_gic_save(proc_num, &imx_gicv3_ctx); } void imx_noc_wrapper_post_resume(unsigned int proc_num) { /* disable MASTER1 & MASTER2 power down in A53 LPM mode */ mmio_setbits_32(IMX_GPC_BASE + LPCR_A53_BSC, MASTER1_LPM_HSK | MASTER2_LPM_HSK); mmio_clrbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, MASTER1_MAPPING | MASTER2_MAPPING); /* noc can only be power down when all the pu domain is off */ if (!pu_domain_status) { /* re-init the tz380 if resume from noc power down */ imx8mm_tz380_init(); /* disable noc power down */ imx_noc_slot_config(false); } /* restore gic context */ plat_gic_restore(proc_num, &imx_gicv3_ctx); } void imx_gpc_init(void) { unsigned int val; int i; /* mask all the wakeup irq by default */ for (i = 0; i < 4; i++) { mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_A53 + i * 4, ~0x0); mmio_write_32(IMX_GPC_BASE + IMR1_CORE1_A53 + i * 4, ~0x0); mmio_write_32(IMX_GPC_BASE + IMR1_CORE2_A53 + i * 4, ~0x0); mmio_write_32(IMX_GPC_BASE + IMR1_CORE3_A53 + i * 4, ~0x0); mmio_write_32(IMX_GPC_BASE + IMR1_CORE0_M4 + i * 4, ~0x0); } val = mmio_read_32(IMX_GPC_BASE + LPCR_A53_BSC); /* use GIC wake_request to wakeup C0~C3 from LPM */ val |= 0x30c00000; /* clear the MASTER0 LPM handshake */ val &= ~(1 << 6); mmio_write_32(IMX_GPC_BASE + LPCR_A53_BSC, val); /* clear MASTER1 & MASTER2 mapping in CPU0(A53) */ mmio_clrbits_32(IMX_GPC_BASE + MST_CPU_MAPPING, (MASTER1_MAPPING | MASTER2_MAPPING)); /* set all mix/PU in A53 domain */ mmio_write_32(IMX_GPC_BASE + PGC_CPU_0_1_MAPPING, 0xffff); /* * Set the CORE & SCU power up timing: * SW = 0x1, SW2ISO = 0x8; * the CPU CORE and SCU power up timming counter * is drived by 32K OSC, each domain's power up * latency is (SW + SW2ISO) / 32768 */ mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(0) + 0x4, 0x401); mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(1) + 0x4, 0x401); mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(2) + 0x4, 0x401); mmio_write_32(IMX_GPC_BASE + COREx_PGC_PCR(3) + 0x4, 0x401); mmio_write_32(IMX_GPC_BASE + PLAT_PGC_PCR + 0x4, 0x401); mmio_write_32(IMX_GPC_BASE + PGC_SCU_TIMING, (0x59 << 10) | 0x5B | (0x2 << 20)); /* set DUMMY PDN/PUP ACK by default for A53 domain */ mmio_write_32(IMX_GPC_BASE + PGC_ACK_SEL_A53, A53_DUMMY_PUP_ACK | A53_DUMMY_PDN_ACK); /* clear DSM by default */ val = mmio_read_32(IMX_GPC_BASE + SLPCR); val &= ~SLPCR_EN_DSM; /* enable the fast wakeup wait/stop mode */ val |= SLPCR_A53_FASTWUP_WAIT_MODE; val |= SLPCR_A53_FASTWUP_STOP_MODE; /* clear the RBC */ val &= ~(0x3f << SLPCR_RBC_COUNT_SHIFT); /* set the STBY_COUNT to 0x5, (128 * 30)us */ val &= ~(0x7 << SLPCR_STBY_COUNT_SHFT); val |= (0x5 << SLPCR_STBY_COUNT_SHFT); mmio_write_32(IMX_GPC_BASE + SLPCR, val); /* * USB PHY power up needs to make sure RESET bit in SRC is clear, * otherwise, the PU power up bit in GPC will NOT self-cleared. * only need to do it once. */ mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG1PHY_SCR, 0x1); mmio_clrbits_32(IMX_SRC_BASE + SRC_OTG2PHY_SCR, 0x1); } int imx_src_handler(uint32_t smc_fid, u_register_t x1, u_register_t x2, u_register_t x3) { uint32_t val; switch(x1) { case IMX_SIP_SRC_M4_START: val = mmio_read_32(IMX_SRC_BASE + SRC_M4RCR); val &= ~SRC_SCR_M4C_NON_SCLR_RST_MASK; val |= SRC_SCR_M4_ENABLE_MASK; mmio_write_32(IMX_SRC_BASE + SRC_M4RCR, val); break; case IMX_SIP_SRC_M4_STARTED: val = mmio_read_32(IMX_SRC_BASE + SRC_M4RCR); return !(val & SRC_SCR_M4C_NON_SCLR_RST_MASK); default: return SMC_UNK; }; return 0; }