[U-Boot] [PATCH V2 1/4] imx: mx7: psci: add cpu hotplug support

This patch adds cpu hotplug support, previous imx_cpu_off implementation is NOT safe, a CPU can NOT power down itself in runtime, it will cause system bus hang due to pending transaction. So need to use other online CPU to kill it when it is ready for killed.
Here use SRC parameter register and a magic number of ~0 as handshake for killing a offline CPU, when the online CPU checks the psci_affinity_info, it will help kill the offline CPU according to the magic number stored in SRC parameter register.
Signed-off-by: Anson Huang Anson.Huang@nxp.com --- changes since V1: Defining magic number for CPU handshake; Improving SRC GPR register offset by using macros definition. no function change. arch/arm/mach-imx/mx7/psci-mx7.c | 41 ++++++++++++++++++++++++++++++++++++---- arch/arm/mach-imx/mx7/psci.S | 14 ++++++++++++++ 2 files changed, 51 insertions(+), 4 deletions(-)
diff --git a/arch/arm/mach-imx/mx7/psci-mx7.c b/arch/arm/mach-imx/mx7/psci-mx7.c index 7dc49bd..a5cccac 100644 --- a/arch/arm/mach-imx/mx7/psci-mx7.c +++ b/arch/arm/mach-imx/mx7/psci-mx7.c @@ -34,6 +34,14 @@ #define CCM_ROOT_WDOG 0xbb80 #define CCM_CCGR_WDOG1 0x49c0
+#define imx_cpu_gpr_entry_offset(cpu) \ + (SRC_BASE_ADDR + SRC_GPR1_MX7D + cpu * 8) +#define imx_cpu_gpr_para_offset(cpu) \ + (imx_cpu_gpr_entry_offset(cpu) + 4) + +#define IMX_CPU_SYNC_OFF ~0 +#define IMX_CPU_SYNC_ON 0 + static inline void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset) { writel(enable, GPC_IPS_BASE_ADDR + offset); @@ -69,7 +77,7 @@ __secure void imx_enable_cpu_ca7(int cpu, bool enable)
__secure int imx_cpu_on(int fn, int cpu, int pc) { - writel(pc, SRC_BASE_ADDR + cpu * 8 + SRC_GPR1_MX7D); + writel(pc, imx_cpu_gpr_entry_offset(cpu)); imx_gpcv2_set_core1_power(true); imx_enable_cpu_ca7(cpu, true); return 0; @@ -77,12 +85,37 @@ __secure int imx_cpu_on(int fn, int cpu, int pc)
__secure int imx_cpu_off(int cpu) { - imx_enable_cpu_ca7(cpu, false); - imx_gpcv2_set_core1_power(false); - writel(0, SRC_BASE_ADDR + cpu * 8 + SRC_GPR1_MX7D + 4); + /* + * We use the cpu jumping argument register to sync with + * imx_cpu_affinity() which is running on cpu0 to kill the cpu. + */ + writel(IMX_CPU_SYNC_OFF, imx_cpu_gpr_para_offset(cpu)); + return 0; }
+__secure int imx_cpu_affinity(int cpu) +{ + u32 val; + + /* always ON for CPU0 */ + if (cpu == 0) + return PSCI_AFFINITY_LEVEL_ON; + + /* CPU1 is waiting for killed */ + if (readl(imx_cpu_gpr_para_offset(cpu)) == IMX_CPU_SYNC_OFF) { + imx_enable_cpu_ca7(cpu, false); + imx_gpcv2_set_core1_power(false); + writel(IMX_CPU_SYNC_ON, imx_cpu_gpr_para_offset(cpu)); + return PSCI_AFFINITY_LEVEL_OFF; + } + + val = readl(SRC_BASE_ADDR + SRC_A7RCR1) & + (1 << BP_SRC_A7RCR1_A7_CORE1_ENABLE); + + return val ? PSCI_AFFINITY_LEVEL_ON : PSCI_AFFINITY_LEVEL_OFF; +} + __secure void imx_system_reset(void) { struct wdog_regs *wdog = (struct wdog_regs *)WDOG1_BASE_ADDR; diff --git a/arch/arm/mach-imx/mx7/psci.S b/arch/arm/mach-imx/mx7/psci.S index 89dcf88..d6d19d5 100644 --- a/arch/arm/mach-imx/mx7/psci.S +++ b/arch/arm/mach-imx/mx7/psci.S @@ -57,4 +57,18 @@ psci_system_off: 3: wfi b 3b
+.globl psci_affinity_info +psci_affinity_info: + push {lr} + + mov r0, #ARM_PSCI_RET_INVAL + cmp r2, #0 + bne out_affinity + + and r0, r1, #0xff + bl imx_cpu_affinity + +out_affinity: + pop {pc} + .popsection

Add i.MX7D GPC initialization for low power mode support like system suspend/resume from linux kernel:
- Pending IOMUXC IRQ to workaround GPC state machine issue; - Mask all GPC interrupts for M4/C0/C1; - Configure SCU timing; - Configure time slot ack; - Configure C0/C1 power up/down timing; - Configure wakeup source mechanism; - Disable DSM/RBC related settings.
Signed-off-by: Anson Huang Anson.Huang@nxp.com --- no change since V1. arch/arm/mach-imx/mx7/soc.c | 101 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+)
diff --git a/arch/arm/mach-imx/mx7/soc.c b/arch/arm/mach-imx/mx7/soc.c index f1dea66..fb94712 100644 --- a/arch/arm/mach-imx/mx7/soc.c +++ b/arch/arm/mach-imx/mx7/soc.c @@ -19,6 +19,37 @@ #include <fsl_sec.h> #include <asm/setup.h>
+#define IOMUXC_GPR1 0x4 +#define BM_IOMUXC_GPR1_IRQ 0x1000 + +#define GPC_LPCR_A7_BSC 0x0 +#define GPC_LPCR_M4 0x8 +#define GPC_SLPCR 0x14 +#define GPC_PGC_ACK_SEL_A7 0x24 +#define GPC_IMR1_CORE0 0x30 +#define GPC_IMR1_CORE1 0x40 +#define GPC_IMR1_M4 0x50 +#define GPC_PGC_CPU_MAPPING 0xec +#define GPC_PGC_C0_PUPSCR 0x804 +#define GPC_PGC_SCU_TIMING 0x890 +#define GPC_PGC_C1_PUPSCR 0x844 + +#define BM_LPCR_A7_BSC_IRQ_SRC_A7_WAKEUP 0x70000000 +#define BM_LPCR_A7_BSC_CPU_CLK_ON_LPM 0x4000 +#define BM_LPCR_M4_MASK_DSM_TRIGGER 0x80000000 +#define BM_SLPCR_EN_DSM 0x80000000 +#define BM_SLPCR_RBC_EN 0x40000000 +#define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000 +#define BM_SLPCR_VSTBY 0x4 +#define BM_SLPCR_SBYOS 0x2 +#define BM_SLPCR_BYPASS_PMIC_READY 0x1 +#define BM_SLPCR_EN_A7_FASTWUP_WAIT_MODE 0x10000 + +#define BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK 0x80000000 +#define BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK 0x8000 + +#define BM_GPC_PGC_CORE_PUPSCR 0x7fff80 + #if defined(CONFIG_IMX_THERMAL) static const struct imx_thermal_plat imx7_thermal_plat = { .regs = (void *)ANATOP_BASE_ADDR, @@ -160,6 +191,74 @@ static void imx_enet_mdio_fixup(void) } }
+static void imx_gpcv2_init(void) +{ + u32 val, i; + + /* + * Force IOMUXC irq pending, so that the interrupt to GPC can be + * used to deassert dsm_request signal when the signal gets + * asserted unexpectedly. + */ + val = readl(IOMUXC_GPR_BASE_ADDR + IOMUXC_GPR1); + val |= BM_IOMUXC_GPR1_IRQ; + writel(val, IOMUXC_GPR_BASE_ADDR + IOMUXC_GPR1); + + /* Initially mask all interrupts */ + for (i = 0; i < 4; i++) { + writel(~0, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4); + writel(~0, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE1 + i * 4); + writel(~0, GPC_IPS_BASE_ADDR + GPC_IMR1_M4 + i * 4); + } + + /* set SCU timing */ + writel((0x59 << 10) | 0x5B | (0x2 << 20), + GPC_IPS_BASE_ADDR + GPC_PGC_SCU_TIMING); + + /* only external IRQs to wake up LPM and core 0/1 */ + val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC); + val |= BM_LPCR_A7_BSC_IRQ_SRC_A7_WAKEUP; + writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC); + + /* set C0 power up timming per design requirement */ + val = readl(GPC_IPS_BASE_ADDR + GPC_PGC_C0_PUPSCR); + val &= ~BM_GPC_PGC_CORE_PUPSCR; + val |= (0x1A << 7); + writel(val, GPC_IPS_BASE_ADDR + GPC_PGC_C0_PUPSCR); + + /* set C1 power up timming per design requirement */ + val = readl(GPC_IPS_BASE_ADDR + GPC_PGC_C1_PUPSCR); + val &= ~BM_GPC_PGC_CORE_PUPSCR; + val |= (0x1A << 7); + writel(val, GPC_IPS_BASE_ADDR + GPC_PGC_C1_PUPSCR); + + /* dummy ack for time slot by default */ + writel(BM_GPC_PGC_ACK_SEL_A7_DUMMY_PUP_ACK | + BM_GPC_PGC_ACK_SEL_A7_DUMMY_PDN_ACK, + GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7); + + /* mask M4 DSM trigger */ + writel(readl(GPC_IPS_BASE_ADDR + GPC_LPCR_M4) | + BM_LPCR_M4_MASK_DSM_TRIGGER, + GPC_IPS_BASE_ADDR + GPC_LPCR_M4); + + /* set mega/fast mix in A7 domain */ + writel(0x1, GPC_IPS_BASE_ADDR + GPC_PGC_CPU_MAPPING); + /* DSM related settings */ + val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR); + val &= ~(BM_SLPCR_EN_DSM | BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN | + BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY | + BM_SLPCR_REG_BYPASS_COUNT); + val |= BM_SLPCR_EN_A7_FASTWUP_WAIT_MODE; + writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR); + /* + * disabling RBC need to delay at least 2 cycles of CKIL(32K) + * due to hardware design requirement, which is + * ~61us, here we use 65us for safe + */ + udelay(65); +} + int arch_cpu_init(void) { init_aips(); @@ -181,6 +280,8 @@ int arch_cpu_init(void)
init_snvs();
+ imx_gpcv2_init(); + return 0; }

This patch adds system suspend/resume support, when linux kernel enters deep sleep mode, SoC will go into below mode:
- CA7 platform goes into STOP mode; - SoC goes into DSM mode; - DDR goes into self-refresh mode; - CPU0/SCU will be powered down.
When wake up event arrives:
- SoC DSM mdoe exits; - CA7 platform exit STOP mode, SCU/CPU0 power up; - Invalidate L1 cache; - DDR exit self-refresh mode; - Do secure monitor mode related initialization; - Jump to linux kernel resume entry.
Belwo is the log of 1 iteration of system suspend/resume:
[ 338.824862] PM: suspend entry (deep) [ 338.828853] PM: Syncing filesystems ... done. [ 338.834433] Freezing user space processes ... (elapsed 0.001 seconds) done. [ 338.842939] OOM killer disabled. [ 338.846182] Freezing remaining freezable tasks ... (elapsed 0.001 seconds) done. [ 338.869717] PM: suspend devices took 0.010 seconds [ 338.877846] Disabling non-boot CPUs ... [ 338.960301] Retrying again to check for CPU kill [ 338.964953] CPU1 killed. [ 338.968104] Enabling non-boot CPUs ... [ 338.973598] CPU1 is up [ 339.267155] mmc1: queuing unknown CIS tuple 0x80 (2 bytes) [ 339.275833] mmc1: queuing unknown CIS tuple 0x80 (7 bytes) [ 339.284158] mmc1: queuing unknown CIS tuple 0x80 (6 bytes) [ 339.385065] PM: resume devices took 0.400 seconds [ 339.389836] OOM killer enabled. [ 339.392986] Restarting tasks ... done. [ 339.398990] PM: suspend exit
Signed-off-by: Anson Huang Anson.Huang@nxp.com --- no change since V1. arch/arm/mach-imx/mx7/psci-mx7.c | 343 ++++++++++++++++++++++++++++++++++++++- arch/arm/mach-imx/mx7/psci.S | 200 +++++++++++++++++++++++ 2 files changed, 541 insertions(+), 2 deletions(-)
diff --git a/arch/arm/mach-imx/mx7/psci-mx7.c b/arch/arm/mach-imx/mx7/psci-mx7.c index a5cccac..96fab22 100644 --- a/arch/arm/mach-imx/mx7/psci-mx7.c +++ b/arch/arm/mach-imx/mx7/psci-mx7.c @@ -8,16 +8,68 @@ #include <asm/psci.h> #include <asm/secure.h> #include <asm/arch/imx-regs.h> +#include <asm/armv7.h> +#include <asm/gic.h> #include <common.h> #include <fsl_wdog.h>
-#define GPC_CPU_PGC_SW_PDN_REQ 0xfc +#define GPC_LPCR_A7_BSC 0x0 +#define GPC_LPCR_A7_AD 0x4 +#define GPC_SLPCR 0x14 +#define GPC_PGC_ACK_SEL_A7 0x24 +#define GPC_IMR1_CORE0 0x30 +#define GPC_SLOT0_CFG 0xb0 #define GPC_CPU_PGC_SW_PUP_REQ 0xf0 +#define GPC_CPU_PGC_SW_PDN_REQ 0xfc +#define GPC_PGC_C0 0x800 #define GPC_PGC_C1 0x840 +#define GPC_PGC_SCU 0x880 + +#define BM_LPCR_A7_BSC_CPU_CLK_ON_LPM 0x4000 +#define BM_LPCR_A7_BSC_LPM1 0xc +#define BM_LPCR_A7_BSC_LPM0 0x3 +#define BP_LPCR_A7_BSC_LPM0 0 +#define BM_SLPCR_EN_DSM 0x80000000 +#define BM_SLPCR_RBC_EN 0x40000000 +#define BM_SLPCR_REG_BYPASS_COUNT 0x3f000000 +#define BM_SLPCR_VSTBY 0x4 +#define BM_SLPCR_SBYOS 0x2 +#define BM_SLPCR_BYPASS_PMIC_READY 0x1 +#define BM_LPCR_A7_AD_L2PGE 0x10000 +#define BM_LPCR_A7_AD_EN_C1_PUP 0x800 +#define BM_LPCR_A7_AD_EN_C0_PUP 0x200 +#define BM_LPCR_A7_AD_EN_PLAT_PDN 0x10 +#define BM_LPCR_A7_AD_EN_C1_PDN 0x8 +#define BM_LPCR_A7_AD_EN_C0_PDN 0x2
#define BM_CPU_PGC_SW_PDN_PUP_REQ_CORE1_A7 0x2
-/* below is for i.MX7D */ +#define BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK 0x8000 +#define BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK 0x80000000 + +#define MAX_SLOT_NUMBER 10 +#define A7_LPM_WAIT 0x5 +#define A7_LPM_STOP 0xa + +#define BM_SYS_COUNTER_CNTCR_FCR1 0x200 +#define BM_SYS_COUNTER_CNTCR_FCR0 0x100 + +#define REG_SET 0x4 +#define REG_CLR 0x8 + +#define ANADIG_ARM_PLL 0x60 +#define ANADIG_DDR_PLL 0x70 +#define ANADIG_SYS_PLL 0xb0 +#define ANADIG_ENET_PLL 0xe0 +#define ANADIG_AUDIO_PLL 0xf0 +#define ANADIG_VIDEO_PLL 0x130 +#define BM_ANATOP_ARM_PLL_OVERRIDE BIT(20) +#define BM_ANATOP_DDR_PLL_OVERRIDE BIT(19) +#define BM_ANATOP_SYS_PLL_OVERRIDE (0x1ff << 17) +#define BM_ANATOP_ENET_PLL_OVERRIDE BIT(13) +#define BM_ANATOP_AUDIO_PLL_OVERRIDE BIT(24) +#define BM_ANATOP_VIDEO_PLL_OVERRIDE BIT(24) + #define SRC_GPR1_MX7D 0x074 #define SRC_A7RCR0 0x004 #define SRC_A7RCR1 0x008 @@ -42,6 +94,172 @@ #define IMX_CPU_SYNC_OFF ~0 #define IMX_CPU_SYNC_ON 0
+enum imx_gpc_slot { + CORE0_A7, + CORE1_A7, + SCU_A7, + FAST_MEGA_MIX, + MIPI_PHY, + PCIE_PHY, + USB_OTG1_PHY, + USB_OTG2_PHY, + USB_HSIC_PHY, + CORE0_M4, +}; + +enum mxc_cpu_pwr_mode { + RUN, + WAIT, + STOP, +}; + +inline void imx_pll_suspend(void) +{ + writel(BM_ANATOP_ARM_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_SET); + writel(BM_ANATOP_DDR_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_SET); + writel(BM_ANATOP_SYS_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_SET); + writel(BM_ANATOP_ENET_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_SET); + writel(BM_ANATOP_AUDIO_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_SET); + writel(BM_ANATOP_VIDEO_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_SET); +} + +inline void imx_pll_resume(void) +{ + writel(BM_ANATOP_ARM_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_ARM_PLL + REG_CLR); + writel(BM_ANATOP_DDR_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_DDR_PLL + REG_CLR); + writel(BM_ANATOP_SYS_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_SYS_PLL + REG_CLR); + writel(BM_ANATOP_ENET_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_ENET_PLL + REG_CLR); + writel(BM_ANATOP_AUDIO_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_AUDIO_PLL + REG_CLR); + writel(BM_ANATOP_VIDEO_PLL_OVERRIDE, + ANATOP_BASE_ADDR + ANADIG_VIDEO_PLL + REG_CLR); +} + +__secure void imx_gpcv2_set_lpm_mode(enum mxc_cpu_pwr_mode mode) +{ + u32 val1, val2, val3; + + val1 = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC); + val2 = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR); + + /* all cores' LPM settings must be same */ + val1 &= ~(BM_LPCR_A7_BSC_LPM0 | BM_LPCR_A7_BSC_LPM1); + val1 |= BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + + val2 &= ~(BM_SLPCR_EN_DSM | BM_SLPCR_VSTBY | BM_SLPCR_RBC_EN | + BM_SLPCR_SBYOS | BM_SLPCR_BYPASS_PMIC_READY); + /* + * GPC: When improper low-power sequence is used, + * the SoC enters low power mode before the ARM core executes WFI. + * + * Software workaround: + * 1) Software should trigger IRQ #32 (IOMUX) to be always pending + * by setting IOMUX_GPR1_IRQ. + * 2) Software should then unmask IRQ #32 in GPC before setting GPC + * Low-Power mode. + * 3) Software should mask IRQ #32 right after GPC Low-Power mode + * is set. + */ + switch (mode) { + case RUN: + val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + val3 &= ~0x1; + writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + break; + case WAIT: + val1 |= A7_LPM_WAIT << BP_LPCR_A7_BSC_LPM0; + val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + val3 &= ~0x1; + writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + break; + case STOP: + val1 |= A7_LPM_STOP << BP_LPCR_A7_BSC_LPM0; + val1 &= ~BM_LPCR_A7_BSC_CPU_CLK_ON_LPM; + val2 |= BM_SLPCR_EN_DSM; + val2 |= BM_SLPCR_SBYOS; + val2 |= BM_SLPCR_VSTBY; + val2 |= BM_SLPCR_BYPASS_PMIC_READY; + val3 = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + val3 |= 0x1; + writel(val3, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0); + break; + default: + return; + } + writel(val1, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_BSC); + writel(val2, GPC_IPS_BASE_ADDR + GPC_SLPCR); +} + +__secure void imx_gpcv2_set_plat_power_gate_by_lpm(bool pdn) +{ + u32 val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD); + + val &= ~(BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE); + if (pdn) + val |= BM_LPCR_A7_AD_EN_PLAT_PDN | BM_LPCR_A7_AD_L2PGE; + + writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD); +} + +__secure void imx_gpcv2_set_cpu_power_gate_by_lpm(u32 cpu, bool pdn) +{ + u32 val; + + val = readl(GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD); + if (cpu == 0) { + if (pdn) + val |= BM_LPCR_A7_AD_EN_C0_PDN | + BM_LPCR_A7_AD_EN_C0_PUP; + else + val &= ~(BM_LPCR_A7_AD_EN_C0_PDN | + BM_LPCR_A7_AD_EN_C0_PUP); + } + if (cpu == 1) { + if (pdn) + val |= BM_LPCR_A7_AD_EN_C1_PDN | + BM_LPCR_A7_AD_EN_C1_PUP; + else + val &= ~(BM_LPCR_A7_AD_EN_C1_PDN | + BM_LPCR_A7_AD_EN_C1_PUP); + } + writel(val, GPC_IPS_BASE_ADDR + GPC_LPCR_A7_AD); +} + +__secure void imx_gpcv2_set_slot_ack(u32 index, enum imx_gpc_slot m_core, + bool mode, bool ack) +{ + u32 val; + + if (index >= MAX_SLOT_NUMBER) + return; + + /* set slot */ + writel(readl(GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4) | + ((mode + 1) << (m_core * 2)), + GPC_IPS_BASE_ADDR + GPC_SLOT0_CFG + index * 4); + + if (ack) { + /* set ack */ + val = readl(GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7); + /* clear dummy ack */ + val &= ~(mode ? BM_GPC_PGC_ACK_SEL_A7_PU_DUMMY_ACK : + BM_GPC_PGC_ACK_SEL_A7_PD_DUMMY_ACK); + val |= 1 << (m_core + (mode ? 16 : 0)); + writel(val, GPC_IPS_BASE_ADDR + GPC_PGC_ACK_SEL_A7); + } +} + static inline void imx_gpcv2_set_m_core_pgc(bool enable, u32 offset) { writel(enable, GPC_IPS_BASE_ADDR + offset); @@ -137,3 +355,124 @@ __secure void imx_system_off(void) val |= BP_SNVS_LPCR_DP_EN | BP_SNVS_LPCR_TOP; writel(val, SNVS_BASE_ADDR + SNVS_LPCR); } + +__secure void imx_system_counter_resume(void) +{ + u32 val; + + val = readl(SYSCNT_CTRL_IPS_BASE_ADDR); + val &= ~BM_SYS_COUNTER_CNTCR_FCR1; + val |= BM_SYS_COUNTER_CNTCR_FCR0; + writel(val, SYSCNT_CTRL_IPS_BASE_ADDR); +} + +__secure void imx_system_counter_suspend(void) +{ + u32 val; + + val = readl(SYSCNT_CTRL_IPS_BASE_ADDR); + val &= ~BM_SYS_COUNTER_CNTCR_FCR0; + val |= BM_SYS_COUNTER_CNTCR_FCR1; + writel(val, SYSCNT_CTRL_IPS_BASE_ADDR); +} + +__secure void gic_resume(void) +{ + u32 itlinesnr, i; + u32 gic_dist_addr = GIC400_ARB_BASE_ADDR + GIC_DIST_OFFSET; + + /* enable the GIC distributor */ + writel(readl(gic_dist_addr + GICD_CTLR) | 0x03, + gic_dist_addr + GICD_CTLR); + + /* TYPER[4:0] contains an encoded number of available interrupts */ + itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f; + + /* set all bits in the GIC group registers to one to allow access + * from non-secure state. The first 32 interrupts are private per + * CPU and will be set later when enabling the GIC for each core + */ + for (i = 1; i <= itlinesnr; i++) + writel((u32)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i); +} + +__secure void imx_udelay(u32 usec) +{ + u32 freq; + u64 start, end; + + asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (freq)); + asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (start)); + do { + asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (end)); + if ((end - start) > usec * (freq / 1000000)) + break; + } while (1); +} + +__secure void imx_system_resume(void) +{ + unsigned int i, val, imr[4]; + + imx_pll_resume(); + imx_system_counter_resume(); + imx_gpcv2_set_lpm_mode(RUN); + imx_gpcv2_set_cpu_power_gate_by_lpm(0, false); + imx_gpcv2_set_plat_power_gate_by_lpm(false); + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_C0); + imx_gpcv2_set_m_core_pgc(false, GPC_PGC_SCU); + + /* + * need to mask all interrupts in GPC before + * operating RBC configurations + */ + for (i = 0; i < 4; i++) { + imr[i] = readl(GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4); + writel(~0, GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4); + } + + /* configure RBC enable bit */ + val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR); + val &= ~BM_SLPCR_RBC_EN; + writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR); + + /* configure RBC count */ + val = readl(GPC_IPS_BASE_ADDR + GPC_SLPCR); + val &= ~BM_SLPCR_REG_BYPASS_COUNT; + writel(val, GPC_IPS_BASE_ADDR + GPC_SLPCR); + + /* + * need to delay at least 2 cycles of CKIL(32K) + * due to hardware design requirement, which is + * ~61us, here we use 65us for safe + */ + imx_udelay(65); + + /* restore GPC interrupt mask settings */ + for (i = 0; i < 4; i++) + writel(imr[i], GPC_IPS_BASE_ADDR + GPC_IMR1_CORE0 + i * 4); + + /* initialize gic distributor */ + gic_resume(); + _nonsec_init(); +} + +__secure void imx_system_suspend(void) +{ + /* overwrite PLL to be controlled by low power mode */ + imx_pll_suspend(); + imx_system_counter_suspend(); + /* set CA7 platform to enter STOP mode */ + imx_gpcv2_set_lpm_mode(STOP); + /* enable core0/scu power down/up with low power mode */ + imx_gpcv2_set_cpu_power_gate_by_lpm(0, true); + imx_gpcv2_set_plat_power_gate_by_lpm(true); + /* time slot settings for core0 and scu */ + imx_gpcv2_set_slot_ack(0, CORE0_A7, false, false); + imx_gpcv2_set_slot_ack(1, SCU_A7, false, true); + imx_gpcv2_set_slot_ack(5, SCU_A7, true, false); + imx_gpcv2_set_slot_ack(6, CORE0_A7, true, true); + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_C0); + imx_gpcv2_set_m_core_pgc(true, GPC_PGC_SCU); + psci_v7_flush_dcache_all(); +} diff --git a/arch/arm/mach-imx/mx7/psci.S b/arch/arm/mach-imx/mx7/psci.S index d6d19d5..cbe6781 100644 --- a/arch/arm/mach-imx/mx7/psci.S +++ b/arch/arm/mach-imx/mx7/psci.S @@ -10,11 +10,108 @@ #include <asm/armv7.h> #include <asm/arch-armv7/generictimer.h> #include <asm/psci.h> +#include <asm/gic.h> + +#define DDRC_STAT 0x4 +#define DDRC_PWRCTL 0x30 +#define DDRC_PSTAT 0x3fc + +#define SRC_GPR1 0x74 +#define SRC_GPR2 0x78
.pushsection ._secure.text, "ax"
.arch_extension sec
+.global ddrc_enter_self_refresh +ddrc_enter_self_refresh: + /* let DDR out of self-refresh */ + ldr r1, =0x0 + str r1, [r0, #DDRC_PWRCTL] + + /* wait rw port_busy clear */ + ldr r2, =(0x1 << 16) + orr r2, r2, #0x1 +1: + ldr r1, [r0, #DDRC_PSTAT] + ands r1, r1, r2 + bne 1b + + /* enter self-refresh bit 5 */ + ldr r1, =(0x1 << 5) + str r1, [r0, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +2: + ldr r1, [r0, #DDRC_STAT] + and r1, r1, #0x3 + cmp r1, #0x3 + bne 2b +3: + ldr r1, [r0, #DDRC_STAT] + ands r1, r1, #0x20 + beq 3b + + /* disable dram clk */ + ldr r1, [r0, #DDRC_PWRCTL] + orr r1, r1, #(1 << 3) + str r1, [r0, #DDRC_PWRCTL] + + mov pc, lr + +.global ddrc_exit_self_refresh +ddrc_exit_self_refresh: + /* let DDR out of self-refresh */ + ldr r1, =0x0 + str r1, [r0, #DDRC_PWRCTL] + + /* wait until self-refresh mode entered */ +1: + ldr r1, [r0, #DDRC_STAT] + and r1, r1, #0x3 + cmp r1, #0x3 + beq 1b + + /* enable auto self-refresh */ + ldr r1, [r0, #DDRC_PWRCTL] + orr r1, r1, #(1 << 0) + str r1, [r0, #DDRC_PWRCTL] + + mov pc, lr + +.globl v7_invalidate_l1 +v7_invalidate_l1: + mov r0, #0 + mcr p15, 2, r0, c0, c0, 0 + mrc p15, 1, r0, c0, c0, 0 + + movw r1, #0x7fff + and r2, r1, r0, lsr #13 + + movw r1, #0x3ff + + and r3, r1, r0, lsr #3 @ NumWays - 1 + add r2, r2, #1 @ NumSets + + and r0, r0, #0x7 + add r0, r0, #4 @ SetShift + + clz r1, r3 @ WayShift + add r4, r3, #1 @ NumWays +1: sub r2, r2, #1 @ NumSets-- + mov r3, r4 @ Temp = NumWays +2: subs r3, r3, #1 @ Temp-- + mov r5, r3, lsl r1 + mov r6, r2, lsl r0 + orr r5, r5, r6 @ Reg = (Temp<<WayShift)|(NumSets<<SetShift) + mcr p15, 0, r5, c7, c6, 2 + bgt 2b + cmp r2, #0 + bgt 1b + dsb st + isb + mov pc, lr + .globl psci_cpu_on psci_cpu_on: push {r4, r5, lr} @@ -71,4 +168,107 @@ psci_affinity_info: out_affinity: pop {pc}
+.globl psci_system_resume +psci_system_resume: + mov sp, r0 + + /* force DDR exit self-refresh */ + ldr r0, =DDRC_IPS_BASE_ADDR + bl ddrc_exit_self_refresh + + /* invalidate L1 I-cache first */ + mov r6, #0x0 + mcr p15, 0, r6, c7, c5, 0 + mcr p15, 0, r6, c7, c5, 6 + /* enable the Icache and branch prediction */ + mov r6, #0x1800 + mcr p15, 0, r6, c1, c0, 0 + isb + bl v7_invalidate_l1 + + mov r0, #0 + bl psci_get_target_pc @ target PC => r0 + push {r0} @ save cpu_resume since _nonsec_init will clean it + bl imx_system_resume + pop {r1} + /* save cpu0 entry r1 */ + mov r0, #0 + mov r2, #0 + bl psci_save + + ldr lr, =psci_cpu_entry + mov pc, lr + +.globl psci_system_suspend +psci_system_suspend: + /* save cpu0 entry r1 */ + mov r0, #0 + mov r2, #0 + bl psci_save + + bl imx_system_suspend + + ldr r0, =SRC_BASE_ADDR + ldr r1, =psci_system_resume + str r1, [r0, #SRC_GPR1] + str sp, [r0, #SRC_GPR2] + + /* disable GIC distributor */ + ldr r0, =GIC400_ARB_BASE_ADDR + ldr r1, =0x0 + ldr r2, =GIC_DIST_OFFSET + str r1, [r0, r2] + + /* force DDR into self-refresh */ + ldr r0, =DDRC_IPS_BASE_ADDR + bl ddrc_enter_self_refresh + + ldr r11, =GPC_IPS_BASE_ADDR + ldr r4, [r11, #0x30] + ldr r5, [r11, #0x34] + ldr r6, [r11, #0x38] + ldr r7, [r11, #0x3c] + + /* + * enable the RBC bypass counter here + * to hold off the interrupts. RBC counter + * = 8 (240us). With this setting, the latency + * from wakeup interrupt to ARM power up + * is ~250uS. + */ + ldr r8, [r11, #0x14] + bic r8, r8, #(0x3f << 24) + orr r8, r8, #(0x8 << 24) + str r8, [r11, #0x14] + + /* enable the counter. */ + ldr r8, [r11, #0x14] + orr r8, r8, #(0x1 << 30) + str r8, [r11, #0x14] + + /* unmask all the GPC interrupts. */ + str r4, [r11, #0x30] + str r5, [r11, #0x34] + str r6, [r11, #0x38] + str r7, [r11, #0x3c] + + /* + * now delay for a short while (3usec) + * ARM is at 1GHz at this point + * so a short loop should be enough. + * this delay is required to ensure that + * the RBC counter can start counting in + * case an interrupt is already pending + * or in case an interrupt arrives just + * as ARM is about to assert DSM_request. + */ + ldr r7, =2000 +rbc_loop: + subs r7, r7, #0x1 + bne rbc_loop + +dsm: + wfi + b dsm + .popsection

It is necessary to implement psci_features callback to report supported features for linux kernel to decide whether to proceed psci calls, for example, linux deep sleep mode is ONLY entered when psci_features returns TRUE when linux kernel checks whether system suspend is supported by calling psci_features, otherwise, s2idle will be entered instead.
Signed-off-by: Anson Huang Anson.Huang@nxp.com --- no change since V1. arch/arm/mach-imx/mx7/psci.S | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+)
diff --git a/arch/arm/mach-imx/mx7/psci.S b/arch/arm/mach-imx/mx7/psci.S index cbe6781..694cef7 100644 --- a/arch/arm/mach-imx/mx7/psci.S +++ b/arch/arm/mach-imx/mx7/psci.S @@ -23,6 +23,22 @@
.arch_extension sec
+imx_psci_supported_table: + .word ARM_PSCI_0_2_FN_CPU_OFF + .word ARM_PSCI_RET_SUCCESS + .word ARM_PSCI_0_2_FN_CPU_ON + .word ARM_PSCI_RET_SUCCESS + .word ARM_PSCI_0_2_FN_AFFINITY_INFO + .word ARM_PSCI_RET_SUCCESS + .word ARM_PSCI_0_2_FN_SYSTEM_OFF + .word ARM_PSCI_RET_SUCCESS + .word ARM_PSCI_0_2_FN_SYSTEM_RESET + .word ARM_PSCI_RET_SUCCESS + .word ARM_PSCI_1_0_FN_SYSTEM_SUSPEND + .word ARM_PSCI_RET_SUCCESS + .word 0 + .word ARM_PSCI_RET_NI + .global ddrc_enter_self_refresh ddrc_enter_self_refresh: /* let DDR out of self-refresh */ @@ -112,6 +128,20 @@ v7_invalidate_l1: isb mov pc, lr
+.globl psci_features +psci_features: + adr r2, imx_psci_supported_table +1: ldr r3, [r2] + cmp r3, #0 + beq out_psci_features + cmp r1, r3 + addne r2, r2, #8 + bne 1b + +out_psci_features: + ldr r0, [r2, #4] + bx lr + .globl psci_cpu_on psci_cpu_on: push {r4, r5, lr}
participants (1)
-
Anson Huang