[U-Boot] [PATCH 02/11] Exynos542x: CPU: Power down all secondary cores

This patch adds code to shutdown secondary cores. When U-boot comes up, all secondary cores appear powered on, which is undesirable and causes side effects while initializing these cores in kernel.
Secondary core power down happens in following steps:
Step-1: After Exynos power-on, primary core starts executing first. Step-2: In iROM code every core has to check 2 flags i.e. addresses 0x02020028 & 0x02020004. Step-3: Initially 0x02020028 is 0 for all cores and 0x02020004 has a jump address for primary core and 0 for all secondary cores. Step-4: Therefore, primary core follows normal iROM execution and jumps to BL1 eventually, whereas all secondary cores enter WFE. Step-5: When primary core comes into function secondary_cores_configure, it puts pointer to function power_down_core into 0x02020004 and provides DSB and SEV for all cores so that they may come out of WFE and jump to power_down_core function. Step-6: And ultimately because of power_down_core all secondary cores shut-down.
Signed-off-by: Kimoon Kim kimoon.kim@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/lowlevel_init.c | 62 ++++++++++++++++++++++ arch/arm/include/asm/arch-exynos/cpu.h | 30 +++++++++++ arch/arm/include/asm/arch-exynos/system.h | 87 +++++++++++++++++++++++++++++++ 3 files changed, 179 insertions(+)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 83e1dcf..43c957b 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -31,6 +31,7 @@ #include <asm/arch/tzpc.h> #include <asm/arch/periph.h> #include <asm/arch/pinmux.h> +#include <asm/arch/system.h> #include "common_setup.h"
/* These are the things we can do during low-level init */ @@ -42,6 +43,62 @@ enum { DO_POWER = 1 << 4, };
+#ifdef CONFIG_EXYNOS5420 +/* + * Pointer to this function is stored in iRam which is used + * for jump and power down of a specific core. + */ +static void power_down_core(void) +{ + uint32_t tmp, core_id, core_config; + + /* Get the core id */ + mrc_mpafr(core_id); + tmp = core_id & 0x3; + core_id = (core_id >> 6) & ~3; + core_id |= tmp; + + /* Set the status of the core to low */ + core_config = (core_id * CORE_CONFIG_OFFSET); + core_config += ARM_CORE0_CONFIG; + writel(0x0, core_config); + + /* Core enter WFI */ + wfi(); +} + +/* + * Configurations for secondary cores are inapt at this stage. + * Reconfigure secondary cores. Shutdown and change the status + * of all cores except the primary core. + */ +static void secondary_cores_configure(void) +{ + uint32_t core_id; + + /* Store jump address for power down of secondary cores */ + writel((uint32_t)&power_down_core, CONFIG_PHY_IRAM_BASE + 0x4); + + /* Need all core power down check */ + dsb(); + sev(); + + /* + * Power down all cores(secondary) while primary core must + * wait for all cores to go down. + */ + for (core_id = 1; core_id != CORE_COUNT; core_id++) { + while ((readl(ARM_CORE0_STATUS + + (core_id * CORE_CONFIG_OFFSET)) + & 0xff) != 0x0) { + isb(); + sev(); + } + isb(); + } +} +#endif + int do_lowlevel_init(void) { uint32_t reset_status; @@ -49,6 +106,11 @@ int do_lowlevel_init(void)
arch_cpu_init();
+#ifdef CONFIG_EXYNOS5420 + /* Reconfigure secondary cores */ + secondary_cores_configure(); +#endif + reset_status = get_reset_status();
switch (reset_status) { diff --git a/arch/arm/include/asm/arch-exynos/cpu.h b/arch/arm/include/asm/arch-exynos/cpu.h index 29674ad..f1f9994 100644 --- a/arch/arm/include/asm/arch-exynos/cpu.h +++ b/arch/arm/include/asm/arch-exynos/cpu.h @@ -177,6 +177,7 @@ #define EXYNOS5420_GPIO_PART1_BASE 0x14010000 #define EXYNOS5420_MIPI_DSIM_BASE 0x14500000 #define EXYNOS5420_DP_BASE 0x145B0000 +#define EXYNOS5420_INF_REG_BASE 0x10040800
#define EXYNOS5420_USBPHY_BASE DEVICE_NOT_AVAILABLE #define EXYNOS5420_USBOTG_BASE DEVICE_NOT_AVAILABLE @@ -186,6 +187,35 @@ #define EXYNOS5420_USB3PHY_BASE DEVICE_NOT_AVAILABLE #define EXYNOS5420_USB_HOST_XHCI_BASE DEVICE_NOT_AVAILABLE
+#define ARM_CORE0_CONFIG (EXYNOS5420_POWER_BASE + 0x2000) +#define ARM_CORE0_STATUS (EXYNOS5420_POWER_BASE + 0x2004) +#define CORE_CONFIG_OFFSET 0x80 +#define CORE_COUNT 0x8 + +/* + * POWER + */ +#define PMU_BASE EXYNOS5420_POWER_BASE +#define SW_RST_REG_OFFSET 0x400 + +#define INF_REG_BASE EXYNOS5420_INF_REG_BASE +#define INF_REG0_OFFSET 0x00 +#define INF_REG1_OFFSET 0x04 +#define INF_REG2_OFFSET 0x08 +#define INF_REG3_OFFSET 0x0C +#define INF_REG4_OFFSET 0x10 +#define INF_REG5_OFFSET 0x14 +#define INF_REG6_OFFSET 0x18 +#define INF_REG7_OFFSET 0x1C + +#define PMU_SPARE_BASE (EXYNOS5420_INF_REG_BASE + 0x100) +#define PMU_SPARE_0 PMU_SPARE_BASE +#define PMU_SPARE_1 (PMU_SPARE_BASE + 0x4) +#define PMU_SPARE_2 (PMU_SPARE_BASE + 0x8) +#define PMU_SPARE_3 (PMU_SPARE_BASE + 0xc) +#define RST_FLAG_REG PMU_SPARE_BASE +#define RST_FLAG_VAL 0xfcba0d10 + #ifndef __ASSEMBLY__ #include <asm/io.h> /* CPU detection macros */ diff --git a/arch/arm/include/asm/arch-exynos/system.h b/arch/arm/include/asm/arch-exynos/system.h index 4968d3d..86903c3 100644 --- a/arch/arm/include/asm/arch-exynos/system.h +++ b/arch/arm/include/asm/arch-exynos/system.h @@ -37,6 +37,93 @@ struct exynos5_sysreg {
#define USB20_PHY_CFG_HOST_LINK_EN (1 << 0)
+#ifdef CONFIG_EXYNOS5420 +/* + * Data Synchronization Barrier acts as a special kind of memory barrier. + * No instruction in program order after this instruction executes until + * this instruction completes. This instruction completes when: + * - All explicit memory accesses before this instruction complete. + * - All Cache, Branch predictor and TLB maintenance operations before + * this instruction complete. + */ +#define dsb() __asm__ __volatile__ ("dsb\n\t" : : ); + +/* + * This instruction causes an event to be signaled to all cores + * within a multiprocessor system. If SEV is implemented, + * WFE must also be implemented. + */ +#define sev() __asm__ __volatile__ ("sev\n\t" : : ); +/* + * If the Event Register is not set, WFE suspends execution until + * one of the following events occurs: + * - an IRQ interrupt, unless masked by the CPSR I-bit + * - an FIQ interrupt, unless masked by the CPSR F-bit + * - an Imprecise Data abort, unless masked by the CPSR A-bit + * - a Debug Entry request, if Debug is enabled + * - an Event signaled by another processor using the SEV instruction. + * If the Event Register is set, WFE clears it and returns immediately. + * If WFE is implemented, SEV must also be implemented. + */ +#define wfe() __asm__ __volatile__ ("wfe\n\t" : : ); + +/* Move 0xd3 value to CPSR register to enable SVC mode */ +#define svc32_mode_en() __asm__ __volatile__ \ + ("@ I&F disable, Mode: 0x13 - SVC\n\t" \ + "msr cpsr_c, #0x13|0xC0\n\t" : : ) + +/* Set program counter with the given value */ +#define set_pc(x) __asm__ __volatile__ ("mov pc, %0\n\t" : : "r"(x)) + +/* Read Main Id register */ +#define mrc_midr(x) __asm__ __volatile__ \ + ("mrc p15, 0, %0, c0, c0, 0\n\t" : "=r"(x) : ) + +/* Read Multiprocessor Affinity Register */ +#define mrc_mpafr(x) __asm__ __volatile__ \ + ("mrc p15, 0, %0, c0, c0, 5\n\t" : "=r"(x) : ) + +/* Read System Control Register */ +#define mrc_sctlr(x) __asm__ __volatile__ \ + ("mrc p15, 0, %0, c1, c0, 0\n\t" : "=r"(x) : ) + +/* Read Auxiliary Control Register */ +#define mrc_auxr(x) __asm__ __volatile__ \ + ("mrc p15, 0, %0, c1, c0, 1\n\t" : "=r"(x) : ) + +/* Read L2 Control register */ +#define mrc_l2_ctlr(x) __asm__ __volatile__ \ + ("mrc p15, 1, %0, c9, c0, 2\n\t" : "=r"(x) : ) + +/* Read L2 Auxilliary Control register */ +#define mrc_l2_aux_ctlr(x) __asm__ __volatile__ \ + ("mrc p15, 1, %0, c15, c0, 0\n\t" : "=r"(x) : ) + +/* Write System Control Register */ +#define mcr_sctlr(x) __asm__ __volatile__ \ + ("mcr p15, 0, %0, c1, c0, 0\n\t" : : "r"(x)) + +/* Write Auxiliary Control Register */ +#define mcr_auxr(x) __asm__ __volatile__ \ + ("mcr p15, 0, %0, c1, c0, 1\n\t" : : "r"(x)) + +/* Invalidate all instruction caches to PoU */ +#define mcr_icache(x) __asm__ __volatile__ \ + ("mcr p15, 0, %0, c7, c5, 0\n\t" : : "r"(x)) + +/* Invalidate unified TLB */ +#define mcr_tlb(x) __asm__ __volatile__ \ + ("mcr p15, 0, %0, c8, c7, 0\n\t" : : "r"(x)) + +/* Write L2 Control register */ +#define mcr_l2_ctlr(x) __asm__ __volatile__ \ + ("mcr p15, 1, %0, c9, c0, 2\n\t" : : "r"(x)) + +/* Write L2 Auxilliary Control register */ +#define mcr_l2_aux_ctlr(x) __asm__ __volatile__ \ + ("mcr p15, 1, %0, c15, c0, 0\n\t" : : "r"(x)) +#endif + void set_usbhost_mode(unsigned int mode); void set_system_display_ctrl(void); int exynos_lcd_early_init(const void *blob);

This patch adds workaround for ARM errata 798870 which says "If back-to-back speculative cache line fills (fill A and fill B) are issued from the L1 data cache of a CPU to the L2 cache, the second request (fill B) is then cancelled, and the second request would have detected a hazard against a recent write or eviction (write B) to the same cache line as fill B then the L2 logic might deadlock."
Signed-off-by: Kimoon Kim kimoon.kim@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/lowlevel_init.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 43c957b..7073c5c 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -45,6 +45,28 @@ enum {
#ifdef CONFIG_EXYNOS5420 /* + * Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been + * stalled for 1024 cycles to verify that its hazard condition still exists. + */ +void set_l2cache(void) +{ + uint32_t val; + + /* Read MIDR for Primary Part Number*/ + mrc_midr(val); + val = (val >> 4); + val &= 0xf; + + /* L2ACTLR[7]: Enable hazard detect timeout for A15 */ + if (val == 0xf) { + mrc_l2_aux_ctlr(val); + val |= (1 << 7); + mcr_l2_aux_ctlr(val); + mrc_l2_ctlr(val); + } +} + +/* * Pointer to this function is stored in iRam which is used * for jump and power down of a specific core. */

Hi Akshay,
On 15 January 2015 at 06:42, Akshay Saraswat akshay.s@samsung.com wrote:
This patch adds workaround for ARM errata 798870 which says "If back-to-back speculative cache line fills (fill A and fill B) are issued from the L1 data cache of a CPU to the L2 cache, the second request (fill B) is then cancelled, and the second request would have detected a hazard against a recent write or eviction (write B) to the same cache line as fill B then the L2 logic might deadlock."
Signed-off-by: Kimoon Kim kimoon.kim@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/lowlevel_init.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 43c957b..7073c5c 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -45,6 +45,28 @@ enum {
#ifdef CONFIG_EXYNOS5420 /*
- Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been
- stalled for 1024 cycles to verify that its hazard condition still exists.
- */
+void set_l2cache(void) +{
uint32_t val;
/* Read MIDR for Primary Part Number*/
Nit: Space before */
mrc_midr(val);
val = (val >> 4);
val &= 0xf;
/* L2ACTLR[7]: Enable hazard detect timeout for A15 */
if (val == 0xf) {
mrc_l2_aux_ctlr(val);
val |= (1 << 7);
mcr_l2_aux_ctlr(val);
mrc_l2_ctlr(val);
}
+}
+/*
- Pointer to this function is stored in iRam which is used
- for jump and power down of a specific core.
*/
1.9.1
Reviewed-by: Simon Glass sjg@chromium.org
Tested on snow, pit, pi Tested-by: Simon Glass sjg@chromium.org
Regards, Simon

This patch adds workaround for the ARM errata 799270 which says "If the L2 cache logic clock is stopped because of L2 inactivity, setting or clearing the ACTLR.SMP bit might not be effective. The bit is modified in the ACTLR, meaning a read of the register returns the updated value. However the logic that uses that bit retains the previous value."
Signed-off-by: Kimoon Kim kimoon.kim@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/lowlevel_init.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 7073c5c..3097382 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -45,6 +45,28 @@ enum {
#ifdef CONFIG_EXYNOS5420 /* + * Ensure that the L2 logic has been used within the previous 256 cycles + * before modifying the ACTLR.SMP bit. This is required during boot before + * MMU has been enabled, or during a specified reset or power down sequence. + */ +void enable_smp(void) +{ + uint32_t temp, val; + + /* Enable SMP mode */ + mrc_auxr(temp); + temp |= (1 << 6); + + /* Dummy read to assure L2 access */ + val = readl(INF_REG_BASE); + val &= 0; + temp |= val; + mcr_auxr(temp); + dsb(); + isb(); +} + +/* * Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been * stalled for 1024 cycles to verify that its hazard condition still exists. */

On 15 January 2015 at 06:42, Akshay Saraswat akshay.s@samsung.com wrote:
This patch adds workaround for the ARM errata 799270 which says "If the L2 cache logic clock is stopped because of L2 inactivity, setting or clearing the ACTLR.SMP bit might not be effective. The bit is modified in the ACTLR, meaning a read of the register returns the updated value. However the logic that uses that bit retains the previous value."
Signed-off-by: Kimoon Kim kimoon.kim@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/lowlevel_init.c | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 7073c5c..3097382 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -45,6 +45,28 @@ enum {
#ifdef CONFIG_EXYNOS5420 /*
- Ensure that the L2 logic has been used within the previous 256 cycles
- before modifying the ACTLR.SMP bit. This is required during boot before
- MMU has been enabled, or during a specified reset or power down sequence.
- */
+void enable_smp(void) +{
uint32_t temp, val;
/* Enable SMP mode */
mrc_auxr(temp);
temp |= (1 << 6);
/* Dummy read to assure L2 access */
val = readl(INF_REG_BASE);
val &= 0;
temp |= val;
mcr_auxr(temp);
dsb();
isb();
+}
+/*
- Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been
- stalled for 1024 cycles to verify that its hazard condition still exists.
*/
1.9.1
Reviewed-by: Simon Glass sjg@chromium.org
Tested on snow, pit, pi Tested-by: Simon Glass sjg@chromium.org

iROM logic provides undesired jump address for CPU2. This patch adds a programmable susbstitute for a part of iROM logic which wakes up cores and provides jump addresses. This patch creates a logic to make all secondary cores jump to a particular address which evades the possibility of CPU2 jumping to wrong address and create undesired results.
Logic of the workaround:
Step-1: iROM code checks value at address 0x2020028. Step-2: If value is 0xc9cfcfcf, it jumps to the address (0x202000+CPUid*4), else, it continues executing normally. Step-3: Primary core puts secondary cores in WFE and store 0xc9cfcfcf in 0x2020028 and jump address (pointer to function low_power_start) in (0x202000+CPUid*4). Step-4: When secondary cores recieve event signal they jump to this address and continue execution.
Signed-off-by: Kimoon Kim kimoon.kim@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/Makefile | 2 + arch/arm/cpu/armv7/exynos/lowlevel_init.c | 90 +++++++++++++++---- arch/arm/cpu/armv7/exynos/sec_boot.S | 145 ++++++++++++++++++++++++++++++ 3 files changed, 219 insertions(+), 18 deletions(-) create mode 100644 arch/arm/cpu/armv7/exynos/sec_boot.S
diff --git a/arch/arm/cpu/armv7/exynos/Makefile b/arch/arm/cpu/armv7/exynos/Makefile index e207bd6..8542f89 100644 --- a/arch/arm/cpu/armv7/exynos/Makefile +++ b/arch/arm/cpu/armv7/exynos/Makefile @@ -7,6 +7,8 @@
obj-y += clock.o power.o soc.o system.o pinmux.o tzpc.o
+obj-$(CONFIG_EXYNOS5420) += sec_boot.o + ifdef CONFIG_SPL_BUILD obj-$(CONFIG_EXYNOS5) += clock_init_exynos5.o obj-$(CONFIG_EXYNOS5) += dmc_common.o dmc_init_ddr3.o diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 3097382..d3c466e 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -49,7 +49,7 @@ enum { * before modifying the ACTLR.SMP bit. This is required during boot before * MMU has been enabled, or during a specified reset or power down sequence. */ -void enable_smp(void) +static void enable_smp(void) { uint32_t temp, val;
@@ -70,7 +70,7 @@ void enable_smp(void) * Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been * stalled for 1024 cycles to verify that its hazard condition still exists. */ -void set_l2cache(void) +static void set_l2cache(void) { uint32_t val;
@@ -89,6 +89,62 @@ void set_l2cache(void) }
/* + * Power up secondary CPUs. + */ +static void secondary_cpu_start(void) +{ + enable_smp(); + svc32_mode_en(); + set_pc(CONFIG_EXYNOS_RELOCATE_CODE_BASE); +} + +/* + * This is the entry point of hotplug-in and + * cluster switching. + */ +static void low_power_start(void) +{ + uint32_t val, reg_val; + + reg_val = readl(RST_FLAG_REG); + if (reg_val != RST_FLAG_VAL) { + writel(0x0, CONFIG_LOWPOWER_FLAG); + set_pc(0x0); + } + + reg_val = readl(CONFIG_PHY_IRAM_BASE + 0x4); + if (reg_val != (uint32_t)&low_power_start) { + /* Store jump address as low_power_start if not present */ + writel((uint32_t)&low_power_start, CONFIG_PHY_IRAM_BASE + 0x4); + dsb(); + sev(); + } + + /* Set the CPU to SVC32 mode */ + svc32_mode_en(); + set_l2cache(); + + /* Invalidate L1 & TLB */ + val = 0x0; + mcr_tlb(val); + mcr_icache(val); + + /* Disable MMU stuff and caches */ + mrc_sctlr(val); + + val &= ~((0x2 << 12) | 0x7); + val |= ((0x1 << 12) | (0x8 << 8) | 0x2); + mcr_sctlr(val); + + /* CPU state is hotplug or reset */ + secondary_cpu_start(); + + /* Core should not enter into WFI here */ + wfi(); + +} + +/* * Pointer to this function is stored in iRam which is used * for jump and power down of a specific core. */ @@ -118,29 +174,25 @@ static void power_down_core(void) */ static void secondary_cores_configure(void) { - uint32_t core_id; + /* Setup L2 cache */ + set_l2cache(); + + /* Clear secondary boot iRAM base */ + writel(0x0, (CONFIG_EXYNOS_RELOCATE_CODE_BASE + 0x1C));
- /* Store jump address for power down of secondary cores */ + /* set lowpower flag and address */ + writel(RST_FLAG_VAL, CONFIG_LOWPOWER_FLAG); + writel((uint32_t)&low_power_start, CONFIG_LOWPOWER_ADDR); + writel(RST_FLAG_VAL, RST_FLAG_REG); + /* Store jump address for power down */ writel((uint32_t)&power_down_core, CONFIG_PHY_IRAM_BASE + 0x4);
/* Need all core power down check */ dsb(); sev(); - - /* - * Power down all cores(secondary) while primary core must - * wait for all cores to go down. - */ - for (core_id = 1; core_id != CORE_COUNT; core_id++) { - while ((readl(ARM_CORE0_STATUS - + (core_id * CORE_CONFIG_OFFSET)) - & 0xff) != 0x0) { - isb(); - sev(); - } - isb(); - } } + +extern void relocate_wait_code(void); #endif
int do_lowlevel_init(void) @@ -151,6 +203,8 @@ int do_lowlevel_init(void) arch_cpu_init();
#ifdef CONFIG_EXYNOS5420 + relocate_wait_code(); + /* Reconfigure secondary cores */ secondary_cores_configure(); #endif diff --git a/arch/arm/cpu/armv7/exynos/sec_boot.S b/arch/arm/cpu/armv7/exynos/sec_boot.S new file mode 100644 index 0000000..e818cf1 --- /dev/null +++ b/arch/arm/cpu/armv7/exynos/sec_boot.S @@ -0,0 +1,145 @@ +/* + * Lowlevel setup for EXYNOS5 + * + * Copyright (C) 2013 Samsung Electronics + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <config.h> +#include <asm/arch/cpu.h> + + .globl relocate_wait_code +relocate_wait_code: + adr r0, code_base @ r0: source address (start) + adr r1, code_end @ r1: source address (end) + ldr r2, =0x02073000 @ r2: target address +1: + ldmia r0!, {r3-r6} + stmia r2!, {r3-r6} + cmp r0, r1 + blt 1b + b code_end + .ltorg +/* + * Secondary core waits here until Primary wake it up. + * Below code is copied to CONFIG_EXYNOS_RELOCATE_CODE_BASE. + * This is a workaround code which is supposed to act as a + * substitute/supplement to the iROM code. + * + * This workaround code is relocated to the address 0x02073000 + * because that comes out to be the last 4KB of the iRAM + * (Base Address - 0x02020000, Limit Address - 0x020740000). + * + * U-boot and kernel are aware of this code and flags by the simple + * fact that we are implementing a workaround in the last 4KB + * of the iRAM and we have already defined these flag and address + * values in both kernel and U-boot for our use. + */ +code_base: + b 1f +/* + * These addresses are being used as flags in u-boot and kernel. + * + * Jump address for resume and flag to check for resume/reset: + * Resume address - 0x2073008 + * Resume flag - 0x207300C + * + * Jump address for cluster switching: + * Switch address - 0x2073018 + * + * Jump address for core hotplug: + * Hotplug address - 0x207301C + * + * Jump address for C2 state (Reserved for future not being used right now): + * C2 address - 0x2073024 + * + * Managed per core status for the active cluster: + * CPU0 state - 0x2073028 + * CPU1 state - 0x207302C + * CPU2 state - 0x2073030 + * CPU3 state - 0x2073034 + * + * Managed per core GIC status for the active cluster: + * CPU0 gic state - 0x2073038 + * CPU1 gic state - 0x207303C + * CPU2 gic state - 0x2073040 + * CPU3 gic state - 0x2073044 + * + * Logic of the code: + * Step-1: Read current CPU status. + * Step-2: If it's a resume then continue, else jump to step 4. + * Step-3: Clear inform1 PMU register and jump to inform0 value. + * Step-4: If it's a switch, C2 or reset, get the hotplug address. + * Step-5: If address is not available, enter WFE. + * Step-6: If address is available, jump to that address. + */ + nop @ for backward compatibility + .word 0x0 @ REG0: RESUME_ADDR + .word 0x0 @ REG1: RESUME_FLAG + .word 0x0 @ REG2 + .word 0x0 @ REG3 +_switch_addr: + .word 0x0 @ REG4: SWITCH_ADDR +_hotplug_addr: + .word 0x0 @ REG5: CPU1_BOOT_REG + .word 0x0 @ REG6 +_c2_addr: + .word 0x0 @ REG7: REG_C2_ADDR +_cpu_state: + .word 0x1 @ CPU0_STATE : RESET + .word 0x2 @ CPU1_STATE : SECONDARY RESET + .word 0x2 @ CPU2_STATE : SECONDARY RESET + .word 0x2 @ CPU3_STATE : SECONDARY RESET +_gic_state: + .word 0x0 @ CPU0 - GICD_IGROUPR0 + .word 0x0 @ CPU1 - GICD_IGROUPR0 + .word 0x0 @ CPU2 - GICD_IGROUPR0 + .word 0x0 @ CPU3 - GICD_IGROUPR0 +1: + adr r0, _cpu_state + mrc p15, 0, r7, c0, c0, 5 @ read MPIDR + and r7, r7, #0xf @ r7 = cpu id +/* Read the current cpu state */ + ldr r10, [r0, r7, lsl #2] +svc_entry: + tst r10, #(1 << 4) + adrne r0, _switch_addr + bne wait_for_addr +/* Clear INFORM1 */ + ldr r0, =(0x10040000 + 0x804) + ldr r1, [r0] + cmp r1, #0x0 + movne r1, #0x0 + strne r1, [r0] +/* Get INFORM0 */ + ldrne r1, =(0x10040000 + 0x800) + ldrne pc, [r1] + tst r10, #(1 << 0) + ldrne pc, =0x23e00000 + adr r0, _hotplug_addr +wait_for_addr: + ldr r1, [r0] + cmp r1, #0x0 + bxne r1 + wfe + b wait_for_addr + .ltorg +code_end: + mov pc, lr

Hi Akshay,
On 15 January 2015 at 06:42, Akshay Saraswat akshay.s@samsung.com wrote:
iROM logic provides undesired jump address for CPU2. This patch adds a programmable susbstitute for a part of iROM logic which wakes up cores and provides jump addresses. This patch creates a logic to make all secondary cores jump to a particular address which evades the possibility of CPU2 jumping to wrong address and create undesired results.
Logic of the workaround:
Step-1: iROM code checks value at address 0x2020028. Step-2: If value is 0xc9cfcfcf, it jumps to the address (0x202000+CPUid*4), else, it continues executing normally. Step-3: Primary core puts secondary cores in WFE and store 0xc9cfcfcf in 0x2020028 and jump address (pointer to function low_power_start) in (0x202000+CPUid*4). Step-4: When secondary cores recieve event signal they jump to this address and continue execution.
Signed-off-by: Kimoon Kim kimoon.kim@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/Makefile | 2 + arch/arm/cpu/armv7/exynos/lowlevel_init.c | 90 +++++++++++++++---- arch/arm/cpu/armv7/exynos/sec_boot.S | 145 ++++++++++++++++++++++++++++++ 3 files changed, 219 insertions(+), 18 deletions(-) create mode 100644 arch/arm/cpu/armv7/exynos/sec_boot.S
diff --git a/arch/arm/cpu/armv7/exynos/Makefile b/arch/arm/cpu/armv7/exynos/Makefile index e207bd6..8542f89 100644 --- a/arch/arm/cpu/armv7/exynos/Makefile +++ b/arch/arm/cpu/armv7/exynos/Makefile @@ -7,6 +7,8 @@
obj-y += clock.o power.o soc.o system.o pinmux.o tzpc.o
+obj-$(CONFIG_EXYNOS5420) += sec_boot.o
ifdef CONFIG_SPL_BUILD obj-$(CONFIG_EXYNOS5) += clock_init_exynos5.o obj-$(CONFIG_EXYNOS5) += dmc_common.o dmc_init_ddr3.o diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 3097382..d3c466e 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -49,7 +49,7 @@ enum {
- before modifying the ACTLR.SMP bit. This is required during boot before
- MMU has been enabled, or during a specified reset or power down sequence.
*/ -void enable_smp(void) +static void enable_smp(void) { uint32_t temp, val;
@@ -70,7 +70,7 @@ void enable_smp(void)
- Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been
- stalled for 1024 cycles to verify that its hazard condition still exists.
*/ -void set_l2cache(void) +static void set_l2cache(void) { uint32_t val;
@@ -89,6 +89,62 @@ void set_l2cache(void) }
/*
- Power up secondary CPUs.
- */
+static void secondary_cpu_start(void) +{
enable_smp();
svc32_mode_en();
set_pc(CONFIG_EXYNOS_RELOCATE_CODE_BASE);
+}
+/*
- This is the entry point of hotplug-in and
- cluster switching.
- */
+static void low_power_start(void) +{
uint32_t val, reg_val;
reg_val = readl(RST_FLAG_REG);
if (reg_val != RST_FLAG_VAL) {
writel(0x0, CONFIG_LOWPOWER_FLAG);
set_pc(0x0);
}
reg_val = readl(CONFIG_PHY_IRAM_BASE + 0x4);
if (reg_val != (uint32_t)&low_power_start) {
/* Store jump address as low_power_start if not present */
writel((uint32_t)&low_power_start, CONFIG_PHY_IRAM_BASE + 0x4);
dsb();
sev();
}
/* Set the CPU to SVC32 mode */
svc32_mode_en();
set_l2cache();
/* Invalidate L1 & TLB */
val = 0x0;
mcr_tlb(val);
mcr_icache(val);
/* Disable MMU stuff and caches */
mrc_sctlr(val);
val &= ~((0x2 << 12) | 0x7);
val |= ((0x1 << 12) | (0x8 << 8) | 0x2);
mcr_sctlr(val);
/* CPU state is hotplug or reset */
secondary_cpu_start();
/* Core should not enter into WFI here */
wfi();
+}
+/*
- Pointer to this function is stored in iRam which is used
- for jump and power down of a specific core.
*/ @@ -118,29 +174,25 @@ static void power_down_core(void) */ static void secondary_cores_configure(void) {
uint32_t core_id;
/* Setup L2 cache */
set_l2cache();
/* Clear secondary boot iRAM base */
writel(0x0, (CONFIG_EXYNOS_RELOCATE_CODE_BASE + 0x1C));
/* Store jump address for power down of secondary cores */
/* set lowpower flag and address */
writel(RST_FLAG_VAL, CONFIG_LOWPOWER_FLAG);
writel((uint32_t)&low_power_start, CONFIG_LOWPOWER_ADDR);
writel(RST_FLAG_VAL, RST_FLAG_REG);
/* Store jump address for power down */ writel((uint32_t)&power_down_core, CONFIG_PHY_IRAM_BASE + 0x4); /* Need all core power down check */ dsb(); sev();
/*
* Power down all cores(secondary) while primary core must
* wait for all cores to go down.
*/
for (core_id = 1; core_id != CORE_COUNT; core_id++) {
while ((readl(ARM_CORE0_STATUS
+ (core_id * CORE_CONFIG_OFFSET))
& 0xff) != 0x0) {
isb();
sev();
}
isb();
}
}
+extern void relocate_wait_code(void); #endif
int do_lowlevel_init(void) @@ -151,6 +203,8 @@ int do_lowlevel_init(void) arch_cpu_init();
#ifdef CONFIG_EXYNOS5420
relocate_wait_code();
/* Reconfigure secondary cores */ secondary_cores_configure();
#endif diff --git a/arch/arm/cpu/armv7/exynos/sec_boot.S b/arch/arm/cpu/armv7/exynos/sec_boot.S new file mode 100644 index 0000000..e818cf1 --- /dev/null +++ b/arch/arm/cpu/armv7/exynos/sec_boot.S @@ -0,0 +1,145 @@ +/*
- Lowlevel setup for EXYNOS5
- Copyright (C) 2013 Samsung Electronics
- See file CREDITS for list of people who contributed to this
- project.
Can we please use SPDX header here?
- This program is free software; you can redistribute it and/or
- modify it under the terms of the GNU General Public License as
- published by the Free Software Foundation; either version 2 of
- the License, or (at your option) any later version.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- MA 02111-1307 USA
- */
+#include <config.h> +#include <asm/arch/cpu.h>
.globl relocate_wait_code
+relocate_wait_code:
adr r0, code_base @ r0: source address (start)
adr r1, code_end @ r1: source address (end)
ldr r2, =0x02073000 @ r2: target address
+1:
ldmia r0!, {r3-r6}
stmia r2!, {r3-r6}
cmp r0, r1
blt 1b
b code_end
.ltorg
+/*
- Secondary core waits here until Primary wake it up.
- Below code is copied to CONFIG_EXYNOS_RELOCATE_CODE_BASE.
- This is a workaround code which is supposed to act as a
- substitute/supplement to the iROM code.
- This workaround code is relocated to the address 0x02073000
- because that comes out to be the last 4KB of the iRAM
- (Base Address - 0x02020000, Limit Address - 0x020740000).
- U-boot and kernel are aware of this code and flags by the simple
- fact that we are implementing a workaround in the last 4KB
- of the iRAM and we have already defined these flag and address
- values in both kernel and U-boot for our use.
- */
+code_base:
b 1f
+/*
- These addresses are being used as flags in u-boot and kernel.
- Jump address for resume and flag to check for resume/reset:
- Resume address - 0x2073008
- Resume flag - 0x207300C
- Jump address for cluster switching:
- Switch address - 0x2073018
- Jump address for core hotplug:
- Hotplug address - 0x207301C
- Jump address for C2 state (Reserved for future not being used right now):
- C2 address - 0x2073024
- Managed per core status for the active cluster:
- CPU0 state - 0x2073028
- CPU1 state - 0x207302C
- CPU2 state - 0x2073030
- CPU3 state - 0x2073034
- Managed per core GIC status for the active cluster:
- CPU0 gic state - 0x2073038
- CPU1 gic state - 0x207303C
- CPU2 gic state - 0x2073040
- CPU3 gic state - 0x2073044
- Logic of the code:
- Step-1: Read current CPU status.
- Step-2: If it's a resume then continue, else jump to step 4.
- Step-3: Clear inform1 PMU register and jump to inform0 value.
- Step-4: If it's a switch, C2 or reset, get the hotplug address.
- Step-5: If address is not available, enter WFE.
- Step-6: If address is available, jump to that address.
- */
nop @ for backward compatibility
.word 0x0 @ REG0: RESUME_ADDR
.word 0x0 @ REG1: RESUME_FLAG
.word 0x0 @ REG2
.word 0x0 @ REG3
+_switch_addr:
.word 0x0 @ REG4: SWITCH_ADDR
+_hotplug_addr:
.word 0x0 @ REG5: CPU1_BOOT_REG
.word 0x0 @ REG6
+_c2_addr:
.word 0x0 @ REG7: REG_C2_ADDR
+_cpu_state:
.word 0x1 @ CPU0_STATE : RESET
.word 0x2 @ CPU1_STATE : SECONDARY RESET
.word 0x2 @ CPU2_STATE : SECONDARY RESET
.word 0x2 @ CPU3_STATE : SECONDARY RESET
+_gic_state:
.word 0x0 @ CPU0 - GICD_IGROUPR0
.word 0x0 @ CPU1 - GICD_IGROUPR0
.word 0x0 @ CPU2 - GICD_IGROUPR0
.word 0x0 @ CPU3 - GICD_IGROUPR0
+1:
adr r0, _cpu_state
mrc p15, 0, r7, c0, c0, 5 @ read MPIDR
and r7, r7, #0xf @ r7 = cpu id
+/* Read the current cpu state */
ldr r10, [r0, r7, lsl #2]
+svc_entry:
tst r10, #(1 << 4)
adrne r0, _switch_addr
bne wait_for_addr
+/* Clear INFORM1 */
ldr r0, =(0x10040000 + 0x804)
ldr r1, [r0]
cmp r1, #0x0
movne r1, #0x0
strne r1, [r0]
+/* Get INFORM0 */
ldrne r1, =(0x10040000 + 0x800)
ldrne pc, [r1]
tst r10, #(1 << 0)
ldrne pc, =0x23e00000
adr r0, _hotplug_addr
+wait_for_addr:
ldr r1, [r0]
cmp r1, #0x0
bxne r1
wfe
b wait_for_addr
.ltorg
+code_end:
mov pc, lr
-- 1.9.1
Reviewed-by: Simon Glass sjg@chromium.org
Tested on snow, pit, pi Tested-by: Simon Glass sjg@chromium.org
Regards, Simon

1. Renaming set_l2cache to configure_l2actlr in order to avoid misleading comprehensions. Apparently this name suggests that L2 cache is being set or initialized which is incorrect as per the code in this function. 2. Cleaning missed mrc for L2 control register.
Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/lowlevel_init.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index d3c466e..688972b 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -70,7 +70,7 @@ static void enable_smp(void) * Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been * stalled for 1024 cycles to verify that its hazard condition still exists. */ -static void set_l2cache(void) +static void configure_l2actlr(void) { uint32_t val;
@@ -84,7 +84,6 @@ static void set_l2cache(void) mrc_l2_aux_ctlr(val); val |= (1 << 7); mcr_l2_aux_ctlr(val); - mrc_l2_ctlr(val); } }
@@ -122,7 +121,7 @@ static void low_power_start(void)
/* Set the CPU to SVC32 mode */ svc32_mode_en(); - set_l2cache(); + configure_l2actlr();
/* Invalidate L1 & TLB */ val = 0x0; @@ -175,7 +174,7 @@ static void power_down_core(void) static void secondary_cores_configure(void) { /* Setup L2 cache */ - set_l2cache(); + configure_l2actlr();
/* Clear secondary boot iRAM base */ writel(0x0, (CONFIG_EXYNOS_RELOCATE_CODE_BASE + 0x1C));

On 15 January 2015 at 06:42, Akshay Saraswat akshay.s@samsung.com wrote:
- Renaming set_l2cache to configure_l2actlr in order to avoid misleading comprehensions. Apparently this name suggests that L2 cache is being set or initialized which is incorrect as per the code in this function.
- Cleaning missed mrc for L2 control register.
Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/lowlevel_init.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index d3c466e..688972b 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -70,7 +70,7 @@ static void enable_smp(void)
- Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been
- stalled for 1024 cycles to verify that its hazard condition still exists.
*/ -static void set_l2cache(void) +static void configure_l2actlr(void) { uint32_t val;
@@ -84,7 +84,6 @@ static void set_l2cache(void) mrc_l2_aux_ctlr(val); val |= (1 << 7); mcr_l2_aux_ctlr(val);
mrc_l2_ctlr(val); }
}
@@ -122,7 +121,7 @@ static void low_power_start(void)
/* Set the CPU to SVC32 mode */ svc32_mode_en();
set_l2cache();
configure_l2actlr(); /* Invalidate L1 & TLB */ val = 0x0;
@@ -175,7 +174,7 @@ static void power_down_core(void) static void secondary_cores_configure(void) { /* Setup L2 cache */
set_l2cache();
configure_l2actlr(); /* Clear secondary boot iRAM base */ writel(0x0, (CONFIG_EXYNOS_RELOCATE_CODE_BASE + 0x1C));
-- 1.9.1
Reviewed-by: Simon Glass sjg@chromium.org
Tested on snow, pit, pi Tested-by: Simon Glass sjg@chromium.org

L2 Auxiliary Control Register provides configuration and control options for the L2 memory system. Bit 3 of L2ACTLR stands for clean/evict push to external. Setting bit 3 disables clean/evict which is what this patch intends to do.
Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/soc.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/arch/arm/cpu/armv7/exynos/soc.c b/arch/arm/cpu/armv7/exynos/soc.c index 8c7d7d8..7268b9b 100644 --- a/arch/arm/cpu/armv7/exynos/soc.c +++ b/arch/arm/cpu/armv7/exynos/soc.c @@ -45,6 +45,15 @@ static void exynos5_set_l2cache_params(void) CACHE_DATA_RAM_LATENCY;
asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val)); + +#ifdef CONFIG_EXYNOS5420 + /* Read CP15 L2ACTLR value */ + asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val)); + /* Disable clean/evict push to external */ + val |= (0x1 << 3); + /* Write new vlaue to L2ACTLR */ + asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val)); +#endif }
/*

On 15 January 2015 at 06:42, Akshay Saraswat akshay.s@samsung.com wrote:
L2 Auxiliary Control Register provides configuration and control options for the L2 memory system. Bit 3 of L2ACTLR stands for clean/evict push to external. Setting bit 3 disables clean/evict which is what this patch intends to do.
Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/soc.c | 9 +++++++++ 1 file changed, 9 insertions(+)
diff --git a/arch/arm/cpu/armv7/exynos/soc.c b/arch/arm/cpu/armv7/exynos/soc.c index 8c7d7d8..7268b9b 100644 --- a/arch/arm/cpu/armv7/exynos/soc.c +++ b/arch/arm/cpu/armv7/exynos/soc.c @@ -45,6 +45,15 @@ static void exynos5_set_l2cache_params(void) CACHE_DATA_RAM_LATENCY;
asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val));
+#ifdef CONFIG_EXYNOS5420
/* Read CP15 L2ACTLR value */
asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
/* Disable clean/evict push to external */
val |= (0x1 << 3);
/* Write new vlaue to L2ACTLR */
asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
+#endif }
Reviewed-by: Simon Glass sjg@chromium.org
Tested on snow, pit, pi Tested-by: Simon Glass sjg@chromium.org

This patch does 3 things: 1. Enables ECC by setting 21st bit of L2CTLR. 2. Restore data and tag RAM latencies to 3 cycles because iROM sets 0x3000400 L2CTLR value during switching. 3. Disable clean/evict push to external by setting 3rd bit of L2ACTLR. We need to restore this here due to switching.
Signed-off-by: Abhilash Kesavan a.kesavan@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/lowlevel_init.c | 53 +++++++++++++++++++++++-------- arch/arm/cpu/armv7/exynos/soc.c | 7 ++++ 2 files changed, 46 insertions(+), 14 deletions(-)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 688972b..57b4c66 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -67,24 +67,40 @@ static void enable_smp(void) }
/* + * Enable ECC by setting L2CTLR[21]. + * Set L2CTLR[7] to make tag ram latency 3 cycles and + * set L2CTLR[1] to make data ram latency 3 cycles. + * We need to make RAM latency of 3 cycles here because cores + * power ON and OFF while switching. And everytime a core powers + * ON, iROM provides it a default L2CTLR value 0x400 which stands + * for TAG RAM setup of 1 cycle. Hence, we face a need of + * restoring data and tag latency values. + */ +static void configure_l2_ctlr(void) +{ + uint32_t val; + + mrc_l2_ctlr(val); + val |= (1 << 21); + val |= (1 << 7); + val |= (1 << 1); + mcr_l2_ctlr(val); +} + +/* * Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been * stalled for 1024 cycles to verify that its hazard condition still exists. + * Disable clean/evict push to external by setting L2ACTLR[3]. */ -static void configure_l2actlr(void) +static void configure_l2_actlr(void) { uint32_t val;
- /* Read MIDR for Primary Part Number*/ - mrc_midr(val); - val = (val >> 4); - val &= 0xf; - - /* L2ACTLR[7]: Enable hazard detect timeout for A15 */ - if (val == 0xf) { - mrc_l2_aux_ctlr(val); - val |= (1 << 7); - mcr_l2_aux_ctlr(val); - } + mrc_l2_aux_ctlr(val); + val |= (1 << 27); + val |= (1 << 7); + val |= (1 << 3); + mcr_l2_aux_ctlr(val); }
/* @@ -121,7 +137,16 @@ static void low_power_start(void)
/* Set the CPU to SVC32 mode */ svc32_mode_en(); - configure_l2actlr(); + + /* Read MIDR for Primary Part Number*/ + mrc_midr(val); + val = (val >> 4); + val &= 0xf; + + if (val == 0xf) { + configure_l2_ctlr(); + configure_l2_actlr(); + }
/* Invalidate L1 & TLB */ val = 0x0; @@ -174,7 +199,7 @@ static void power_down_core(void) static void secondary_cores_configure(void) { /* Setup L2 cache */ - configure_l2actlr(); + configure_l2_ctlr();
/* Clear secondary boot iRAM base */ writel(0x0, (CONFIG_EXYNOS_RELOCATE_CODE_BASE + 0x1C)); diff --git a/arch/arm/cpu/armv7/exynos/soc.c b/arch/arm/cpu/armv7/exynos/soc.c index 7268b9b..ea201e7 100644 --- a/arch/arm/cpu/armv7/exynos/soc.c +++ b/arch/arm/cpu/armv7/exynos/soc.c @@ -10,8 +10,10 @@ #include <asm/system.h>
enum l2_cache_params { +#ifndef CONFIG_EXYNOS5420 CACHE_TAG_RAM_SETUP = (1 << 9), CACHE_DATA_RAM_SETUP = (1 << 5), +#endif CACHE_TAG_RAM_LATENCY = (2 << 6), CACHE_DATA_RAM_LATENCY = (2 << 0) }; @@ -39,10 +41,15 @@ static void exynos5_set_l2cache_params(void)
asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r"(val));
+#ifndef CONFIG_EXYNOS5420 val |= CACHE_TAG_RAM_SETUP | CACHE_DATA_RAM_SETUP | CACHE_TAG_RAM_LATENCY | CACHE_DATA_RAM_LATENCY; +#else + val |= CACHE_TAG_RAM_LATENCY | + CACHE_DATA_RAM_LATENCY; +#endif
asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val));

On 15 January 2015 at 06:42, Akshay Saraswat akshay.s@samsung.com wrote:
This patch does 3 things:
- Enables ECC by setting 21st bit of L2CTLR.
- Restore data and tag RAM latencies to 3 cycles because iROM sets 0x3000400 L2CTLR value during switching.
- Disable clean/evict push to external by setting 3rd bit of L2ACTLR. We need to restore this here due to switching.
Signed-off-by: Abhilash Kesavan a.kesavan@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/lowlevel_init.c | 53 +++++++++++++++++++++++-------- arch/arm/cpu/armv7/exynos/soc.c | 7 ++++ 2 files changed, 46 insertions(+), 14 deletions(-)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 688972b..57b4c66 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -67,24 +67,40 @@ static void enable_smp(void) }
/*
- Enable ECC by setting L2CTLR[21].
- Set L2CTLR[7] to make tag ram latency 3 cycles and
- set L2CTLR[1] to make data ram latency 3 cycles.
- We need to make RAM latency of 3 cycles here because cores
- power ON and OFF while switching. And everytime a core powers
- ON, iROM provides it a default L2CTLR value 0x400 which stands
- for TAG RAM setup of 1 cycle. Hence, we face a need of
- restoring data and tag latency values.
- */
+static void configure_l2_ctlr(void) +{
uint32_t val;
mrc_l2_ctlr(val);
val |= (1 << 21);
val |= (1 << 7);
val |= (1 << 1);
mcr_l2_ctlr(val);
+}
+/*
- Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been
- stalled for 1024 cycles to verify that its hazard condition still exists.
*/
- Disable clean/evict push to external by setting L2ACTLR[3].
-static void configure_l2actlr(void) +static void configure_l2_actlr(void) { uint32_t val;
/* Read MIDR for Primary Part Number*/
mrc_midr(val);
val = (val >> 4);
val &= 0xf;
/* L2ACTLR[7]: Enable hazard detect timeout for A15 */
if (val == 0xf) {
mrc_l2_aux_ctlr(val);
val |= (1 << 7);
mcr_l2_aux_ctlr(val);
}
mrc_l2_aux_ctlr(val);
val |= (1 << 27);
val |= (1 << 7);
val |= (1 << 3);
mcr_l2_aux_ctlr(val);
}
/* @@ -121,7 +137,16 @@ static void low_power_start(void)
/* Set the CPU to SVC32 mode */ svc32_mode_en();
configure_l2actlr();
/* Read MIDR for Primary Part Number*/
mrc_midr(val);
val = (val >> 4);
val &= 0xf;
if (val == 0xf) {
configure_l2_ctlr();
configure_l2_actlr();
} /* Invalidate L1 & TLB */ val = 0x0;
@@ -174,7 +199,7 @@ static void power_down_core(void) static void secondary_cores_configure(void) { /* Setup L2 cache */
configure_l2actlr();
configure_l2_ctlr(); /* Clear secondary boot iRAM base */ writel(0x0, (CONFIG_EXYNOS_RELOCATE_CODE_BASE + 0x1C));
diff --git a/arch/arm/cpu/armv7/exynos/soc.c b/arch/arm/cpu/armv7/exynos/soc.c index 7268b9b..ea201e7 100644 --- a/arch/arm/cpu/armv7/exynos/soc.c +++ b/arch/arm/cpu/armv7/exynos/soc.c @@ -10,8 +10,10 @@ #include <asm/system.h>
enum l2_cache_params { +#ifndef CONFIG_EXYNOS5420 CACHE_TAG_RAM_SETUP = (1 << 9), CACHE_DATA_RAM_SETUP = (1 << 5), +#endif CACHE_TAG_RAM_LATENCY = (2 << 6), CACHE_DATA_RAM_LATENCY = (2 << 0) }; @@ -39,10 +41,15 @@ static void exynos5_set_l2cache_params(void)
asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r"(val));
+#ifndef CONFIG_EXYNOS5420 val |= CACHE_TAG_RAM_SETUP | CACHE_DATA_RAM_SETUP | CACHE_TAG_RAM_LATENCY | CACHE_DATA_RAM_LATENCY; +#else
val |= CACHE_TAG_RAM_LATENCY |
CACHE_DATA_RAM_LATENCY;
+#endif
asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val));
-- 1.9.1
Reviewed-by: Simon Glass sjg@chromium.org
Tested on snow, pit, pi Tested-by: Simon Glass sjg@chromium.org

When compiled SPL for Thumb secondary cores failed to boot at the kernel boot up. Only one core came up out of 4. This was happening because the code relocated to the address 0x02073000 by the primary core was an ARM asm code which was executed by the secondary cores as if it was a thumb code. This patch fixes the issue of secondary cores considering relocated code as Thumb instructions and not ARM instructions by jumping to the relocated with the help of "bx" ARM instruction. "bx" instruction changes the 5th bit of CPSR which allows execution unit to consider the following instructions as ARM instructions.
Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/lowlevel_init.c | 2 +- arch/arm/include/asm/arch-exynos/system.h | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 57b4c66..d9f3f4b 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -110,7 +110,7 @@ static void secondary_cpu_start(void) { enable_smp(); svc32_mode_en(); - set_pc(CONFIG_EXYNOS_RELOCATE_CODE_BASE); + branch_bx(CONFIG_EXYNOS_RELOCATE_CODE_BASE); }
/* diff --git a/arch/arm/include/asm/arch-exynos/system.h b/arch/arm/include/asm/arch-exynos/system.h index 86903c3..a9fd5e6 100644 --- a/arch/arm/include/asm/arch-exynos/system.h +++ b/arch/arm/include/asm/arch-exynos/system.h @@ -75,6 +75,9 @@ struct exynos5_sysreg { /* Set program counter with the given value */ #define set_pc(x) __asm__ __volatile__ ("mov pc, %0\n\t" : : "r"(x))
+/* Branch to the given location */ +#define branch_bx(x) __asm__ __volatile__ ("bx %0\n\t" : : "r"(x)) + /* Read Main Id register */ #define mrc_midr(x) __asm__ __volatile__ \ ("mrc p15, 0, %0, c0, c0, 0\n\t" : "=r"(x) : )

On 15 January 2015 at 06:42, Akshay Saraswat akshay.s@samsung.com wrote:
When compiled SPL for Thumb secondary cores failed to boot at the kernel boot up. Only one core came up out of 4. This was happening because the code relocated to the address 0x02073000 by the primary core was an ARM asm code which was executed by the secondary cores as if it was a thumb code. This patch fixes the issue of secondary cores considering relocated code as Thumb instructions and not ARM instructions by jumping to the relocated with the help of "bx" ARM instruction. "bx" instruction changes the 5th bit of CPSR which allows execution unit to consider the following instructions as ARM instructions.
Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/lowlevel_init.c | 2 +- arch/arm/include/asm/arch-exynos/system.h | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 57b4c66..d9f3f4b 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -110,7 +110,7 @@ static void secondary_cpu_start(void) { enable_smp(); svc32_mode_en();
set_pc(CONFIG_EXYNOS_RELOCATE_CODE_BASE);
branch_bx(CONFIG_EXYNOS_RELOCATE_CODE_BASE);
}
/* diff --git a/arch/arm/include/asm/arch-exynos/system.h b/arch/arm/include/asm/arch-exynos/system.h index 86903c3..a9fd5e6 100644 --- a/arch/arm/include/asm/arch-exynos/system.h +++ b/arch/arm/include/asm/arch-exynos/system.h @@ -75,6 +75,9 @@ struct exynos5_sysreg { /* Set program counter with the given value */ #define set_pc(x) __asm__ __volatile__ ("mov pc, %0\n\t" : : "r"(x))
+/* Branch to the given location */ +#define branch_bx(x) __asm__ __volatile__ ("bx %0\n\t" : : "r"(x))
/* Read Main Id register */ #define mrc_midr(x) __asm__ __volatile__ \ ("mrc p15, 0, %0, c0, c0, 0\n\t" : "=r"(x) : ) -- 1.9.1
Reviewed-by: Simon Glass sjg@chromium.org
Tested on snow, pit, pi Tested-by: Simon Glass sjg@chromium.org

On warm reset, all cores jump to the low_power_start function because iRAM data is retained and because while executing iROM code all cores find the jump flag 0x02020028 set. In low_power_start, cores check the reset status and if true they clear the jump flag and jump back to 0x0.
The A7 cores do jump to 0x0 but consider following instructions as a Thumb instructions which in turn makes them loop inside the iROM code instead of jumping to power_down_core.
This issue is fixed by replacing the "mov pc" instruction with a "bx" instruction which switches state along with the jump to make the execution unit consider the branch target as an ARM instruction.
Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/lowlevel_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index d9f3f4b..a459432 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -124,7 +124,7 @@ static void low_power_start(void) reg_val = readl(RST_FLAG_REG); if (reg_val != RST_FLAG_VAL) { writel(0x0, CONFIG_LOWPOWER_FLAG); - set_pc(0x0); + branch_bx(0x0); }
reg_val = readl(CONFIG_PHY_IRAM_BASE + 0x4);

On 15 January 2015 at 06:42, Akshay Saraswat akshay.s@samsung.com wrote:
On warm reset, all cores jump to the low_power_start function because iRAM data is retained and because while executing iROM code all cores find the jump flag 0x02020028 set. In low_power_start, cores check the reset status and if true they clear the jump flag and jump back to 0x0.
The A7 cores do jump to 0x0 but consider following instructions as a Thumb instructions which in turn makes them loop inside the iROM code instead of jumping to power_down_core.
This issue is fixed by replacing the "mov pc" instruction with a "bx" instruction which switches state along with the jump to make the execution unit consider the branch target as an ARM instruction.
Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/lowlevel_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index d9f3f4b..a459432 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -124,7 +124,7 @@ static void low_power_start(void) reg_val = readl(RST_FLAG_REG); if (reg_val != RST_FLAG_VAL) { writel(0x0, CONFIG_LOWPOWER_FLAG);
set_pc(0x0);
branch_bx(0x0); } reg_val = readl(CONFIG_PHY_IRAM_BASE + 0x4);
-- 1.9.1
Reviewed-by: Simon Glass sjg@chromium.org
Tested on snow, pit, pi Tested-by: Simon Glass sjg@chromium.org

From: Doug Anderson dianders@chromium.org
It was found that the L2 cache timings that we had before could cause freezes and hangs. We should make things more robust with better timings. Currently the production ChromeOS kernel applies these timings, but it's nice to fixup firmware too (and upstream probably won't take our kernel hacks).
This also provides a big cleanup of the L2 cache init code avoiding some duplication. The way things used to work: * low_power_start() was installed by the SPL (both at boot and resume time) and left resident in iRAM for the kernel to use when bringing up additional CPUs. It used configure_l2_ctlr() and configure_l2_actlr() when it detected it was on an A15. This was needed (despite the L2 cache registers being shared among all A15s) because we might have been the first man in after the whole A15 cluster was shutdown. * secondary_cores_configure() was called on at boot time and at resume time. Strangely this called configure_l2_ctlr() but not configure_l2_actlr() which was almost certainly wrong. Given that we'll call both (see next bullet) later in the boot process it didn't matter for normal boot, but I guess this is how L2 cache settings got set on 5420/5800 (but not 5250?) at resume time. * exynos5_set_l2cache_params() was called as part of cache enablement. This should happen at boot time (normally in the SPL except for USB boot where it happens in main U-Boot).
Note that the old code wasn't setting ECC/parity in the cache enablement code but we happened to get it anyway because we'd call secondary_cores_configure() at boot time. For resume time we'd get it anyway when the 2nd A15 core came up.
Let's make this a whole lot simpler. Now we always set these parameters in the same place for all boots and use the same code for setting up secondary CPUs.
Intended net effects of this change (other than cleanup): * Timings go from before: data: 0 cycle setup, 3 cycles (0x2) latency tag: 0 cycle setup, 3 cycles (0x2) latency after: data: 1 cycle setup, 4 cycles (0x3) latency tag: 1 cycle setup, 4 cycles (0x3) latency * L2ACTLR is properly initted on 5420/5800 in all cases.
One note is that we're still relying on luck to keep low_power_start() working. The compiler is being nice and not storing anything on the stack.
Another note is that on its own this patch won't help to fix cache settings in an RW U-Boot update where we still have the RO SPL. The plan for that is: * Have RW U-Boot re-init the cache right before calling the kernel (after it has turned the L2 cache off). This is why the functions are in a header file instead of lowlevel_init.c.
* Have the kernel save the L2 cache settings of the boot CPU and apply them to all other CPUs. We get a little lucky here because the old code was using "|=" to modify the registers and all of the bits that it's setting are also present in the new settings (!). That means that when the 2nd CPU in the A15 cluster comes up it doesn't actually mess up the settings of the 1st CPU in the A15 cluster. An alternative option is to have the kernel write its own low_power_start() code.
Signed-off-by: Doug Anderson dianders@chromium.org Signed-off-by: Akshay Saraswat akshay.s@samsung.com --- arch/arm/cpu/armv7/exynos/common_setup.h | 55 +++++++++++++++++++++++++++++++ arch/arm/cpu/armv7/exynos/lowlevel_init.c | 55 +++++++++---------------------- arch/arm/cpu/armv7/exynos/soc.c | 51 ---------------------------- 3 files changed, 70 insertions(+), 91 deletions(-)
diff --git a/arch/arm/cpu/armv7/exynos/common_setup.h b/arch/arm/cpu/armv7/exynos/common_setup.h index e6318c0..7fa9683 100644 --- a/arch/arm/cpu/armv7/exynos/common_setup.h +++ b/arch/arm/cpu/armv7/exynos/common_setup.h @@ -23,6 +23,8 @@ * MA 02111-1307 USA */
+#include <asm/arch/system.h> + #define DMC_OFFSET 0x10000
/* @@ -43,3 +45,56 @@ void system_clock_init(void); int do_lowlevel_init(void);
void sdelay(unsigned long); + +enum l2_cache_params { + CACHE_ECC_AND_PARITY = (1 << 21), + CACHE_TAG_RAM_SETUP = (1 << 9), + CACHE_DATA_RAM_SETUP = (1 << 5), +#ifndef CONFIG_EXYNOS5420 + CACHE_TAG_RAM_LATENCY = (2 << 6), /* 5250 */ + CACHE_DATA_RAM_LATENCY = (2 << 0), +#else + CACHE_TAG_RAM_LATENCY = (3 << 6), /* 5420 and 5422 */ + CACHE_DATA_RAM_LATENCY = (3 << 0), +#endif +}; + +#ifndef CONFIG_SYS_L2CACHE_OFF +/* + * Configure L2CTLR to get timings that keep us from hanging/crashing. + * + * Must be inline here since low_power_start() is called without a + * stack (!). + */ +static inline void configure_l2_ctlr(void) +{ + uint32_t val; + + mrc_l2_ctlr(val); + val |= CACHE_TAG_RAM_SETUP | + CACHE_DATA_RAM_SETUP | + CACHE_TAG_RAM_LATENCY | + CACHE_DATA_RAM_LATENCY | + CACHE_ECC_AND_PARITY; + mcr_l2_ctlr(val); +} + +/* + * Configure L2ACTLR. + * + * Must be inline here since low_power_start() is called without a + * stack (!). + */ +static inline void configure_l2_actlr(void) +{ +#ifdef CONFIG_EXYNOS5420 + uint32_t val; + + mrc_l2_aux_ctlr(val); + val |= (1 << 27) | /* Prevents stopping the L2 logic clock */ + (1 << 7) | /* Enable hazard detect timeout for A15 */ + (1 << 3); /* Disable clean/evict push to external */ + mcr_l2_aux_ctlr(val); +#endif +} +#endif diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index a459432..40d3e3a 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -67,43 +67,6 @@ static void enable_smp(void) }
/* - * Enable ECC by setting L2CTLR[21]. - * Set L2CTLR[7] to make tag ram latency 3 cycles and - * set L2CTLR[1] to make data ram latency 3 cycles. - * We need to make RAM latency of 3 cycles here because cores - * power ON and OFF while switching. And everytime a core powers - * ON, iROM provides it a default L2CTLR value 0x400 which stands - * for TAG RAM setup of 1 cycle. Hence, we face a need of - * restoring data and tag latency values. - */ -static void configure_l2_ctlr(void) -{ - uint32_t val; - - mrc_l2_ctlr(val); - val |= (1 << 21); - val |= (1 << 7); - val |= (1 << 1); - mcr_l2_ctlr(val); -} - -/* - * Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been - * stalled for 1024 cycles to verify that its hazard condition still exists. - * Disable clean/evict push to external by setting L2ACTLR[3]. - */ -static void configure_l2_actlr(void) -{ - uint32_t val; - - mrc_l2_aux_ctlr(val); - val |= (1 << 27); - val |= (1 << 7); - val |= (1 << 3); - mcr_l2_aux_ctlr(val); -} - -/* * Power up secondary CPUs. */ static void secondary_cpu_start(void) @@ -198,9 +161,6 @@ static void power_down_core(void) */ static void secondary_cores_configure(void) { - /* Setup L2 cache */ - configure_l2_ctlr(); - /* Clear secondary boot iRAM base */ writel(0x0, (CONFIG_EXYNOS_RELOCATE_CODE_BASE + 0x1C));
@@ -226,6 +186,21 @@ int do_lowlevel_init(void)
arch_cpu_init();
+#ifndef CONFIG_SYS_L2CACHE_OFF + /* + * Init L2 cache parameters here for use by boot and resume + * + * These are here instead of in v7_outer_cache_enable() so that the + * L2 cache settings get properly set even at resume time or if we're + * running U-Boot with the cache off. The kernel still needs us to + * set these for it. + */ + configure_l2_ctlr(); + configure_l2_actlr(); + dsb(); + isb(); +#endif + #ifdef CONFIG_EXYNOS5420 relocate_wait_code();
diff --git a/arch/arm/cpu/armv7/exynos/soc.c b/arch/arm/cpu/armv7/exynos/soc.c index ea201e7..0f116b1 100644 --- a/arch/arm/cpu/armv7/exynos/soc.c +++ b/arch/arm/cpu/armv7/exynos/soc.c @@ -9,15 +9,6 @@ #include <asm/io.h> #include <asm/system.h>
-enum l2_cache_params { -#ifndef CONFIG_EXYNOS5420 - CACHE_TAG_RAM_SETUP = (1 << 9), - CACHE_DATA_RAM_SETUP = (1 << 5), -#endif - CACHE_TAG_RAM_LATENCY = (2 << 6), - CACHE_DATA_RAM_LATENCY = (2 << 0) -}; - void reset_cpu(ulong addr) { writel(0x1, samsung_get_base_swreset()); @@ -30,45 +21,3 @@ void enable_caches(void) dcache_enable(); } #endif - -#ifndef CONFIG_SYS_L2CACHE_OFF -/* - * Set L2 cache parameters - */ -static void exynos5_set_l2cache_params(void) -{ - unsigned int val = 0; - - asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r"(val)); - -#ifndef CONFIG_EXYNOS5420 - val |= CACHE_TAG_RAM_SETUP | - CACHE_DATA_RAM_SETUP | - CACHE_TAG_RAM_LATENCY | - CACHE_DATA_RAM_LATENCY; -#else - val |= CACHE_TAG_RAM_LATENCY | - CACHE_DATA_RAM_LATENCY; -#endif - - asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val)); - -#ifdef CONFIG_EXYNOS5420 - /* Read CP15 L2ACTLR value */ - asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val)); - /* Disable clean/evict push to external */ - val |= (0x1 << 3); - /* Write new vlaue to L2ACTLR */ - asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val)); -#endif -} - -/* - * Sets L2 cache related parameters before enabling data cache - */ -void v7_outer_cache_enable(void) -{ - if (cpu_is_exynos5()) - exynos5_set_l2cache_params(); -} -#endif

Hi Akshay,
On 15 January 2015 at 06:42, Akshay Saraswat akshay.s@samsung.com wrote:
From: Doug Anderson dianders@chromium.org
It was found that the L2 cache timings that we had before could cause freezes and hangs. We should make things more robust with better timings. Currently the production ChromeOS kernel applies these timings, but it's nice to fixup firmware too (and upstream probably won't take our kernel hacks).
This also provides a big cleanup of the L2 cache init code avoiding some duplication. The way things used to work:
- low_power_start() was installed by the SPL (both at boot and resume time) and left resident in iRAM for the kernel to use when bringing up additional CPUs. It used configure_l2_ctlr() and configure_l2_actlr() when it detected it was on an A15. This was needed (despite the L2 cache registers being shared among all A15s) because we might have been the first man in after the whole A15 cluster was shutdown.
- secondary_cores_configure() was called on at boot time and at resume time. Strangely this called configure_l2_ctlr() but not configure_l2_actlr() which was almost certainly wrong. Given that we'll call both (see next bullet) later in the boot process it didn't matter for normal boot, but I guess this is how L2 cache settings got set on 5420/5800 (but not 5250?) at resume time.
- exynos5_set_l2cache_params() was called as part of cache enablement. This should happen at boot time (normally in the SPL except for USB boot where it happens in main U-Boot).
Note that the old code wasn't setting ECC/parity in the cache enablement code but we happened to get it anyway because we'd call secondary_cores_configure() at boot time. For resume time we'd get it anyway when the 2nd A15 core came up.
Let's make this a whole lot simpler. Now we always set these parameters in the same place for all boots and use the same code for setting up secondary CPUs.
Intended net effects of this change (other than cleanup):
- Timings go from before: data: 0 cycle setup, 3 cycles (0x2) latency tag: 0 cycle setup, 3 cycles (0x2) latency after: data: 1 cycle setup, 4 cycles (0x3) latency tag: 1 cycle setup, 4 cycles (0x3) latency
- L2ACTLR is properly initted on 5420/5800 in all cases.
One note is that we're still relying on luck to keep low_power_start() working. The compiler is being nice and not storing anything on the stack.
Another note is that on its own this patch won't help to fix cache settings in an RW U-Boot update where we still have the RO SPL. The plan for that is:
Have RW U-Boot re-init the cache right before calling the kernel (after it has turned the L2 cache off). This is why the functions are in a header file instead of lowlevel_init.c.
Have the kernel save the L2 cache settings of the boot CPU and apply them to all other CPUs. We get a little lucky here because the old code was using "|=" to modify the registers and all of the bits that it's setting are also present in the new settings (!). That means that when the 2nd CPU in the A15 cluster comes up it doesn't actually mess up the settings of the 1st CPU in the A15 cluster. An alternative option is to have the kernel write its own low_power_start() code.
Signed-off-by: Doug Anderson dianders@chromium.org Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/common_setup.h | 55 +++++++++++++++++++++++++++++++ arch/arm/cpu/armv7/exynos/lowlevel_init.c | 55 +++++++++---------------------- arch/arm/cpu/armv7/exynos/soc.c | 51 ---------------------------- 3 files changed, 70 insertions(+), 91 deletions(-)
This causes a compilation error on snow, so needs to be adjusted.
Tested on pit, pi Tested-by: Simon Glass sjg@chromium.org
diff --git a/arch/arm/cpu/armv7/exynos/common_setup.h b/arch/arm/cpu/armv7/exynos/common_setup.h index e6318c0..7fa9683 100644 --- a/arch/arm/cpu/armv7/exynos/common_setup.h +++ b/arch/arm/cpu/armv7/exynos/common_setup.h @@ -23,6 +23,8 @@
- MA 02111-1307 USA
*/
+#include <asm/arch/system.h>
#define DMC_OFFSET 0x10000
/* @@ -43,3 +45,56 @@ void system_clock_init(void); int do_lowlevel_init(void);
void sdelay(unsigned long);
+enum l2_cache_params {
CACHE_ECC_AND_PARITY = (1 << 21),
CACHE_TAG_RAM_SETUP = (1 << 9),
CACHE_DATA_RAM_SETUP = (1 << 5),
+#ifndef CONFIG_EXYNOS5420
CACHE_TAG_RAM_LATENCY = (2 << 6), /* 5250 */
CACHE_DATA_RAM_LATENCY = (2 << 0),
+#else
CACHE_TAG_RAM_LATENCY = (3 << 6), /* 5420 and 5422 */
CACHE_DATA_RAM_LATENCY = (3 << 0),
+#endif +};
+#ifndef CONFIG_SYS_L2CACHE_OFF +/*
- Configure L2CTLR to get timings that keep us from hanging/crashing.
- Must be inline here since low_power_start() is called without a
- stack (!).
- */
+static inline void configure_l2_ctlr(void) +{
uint32_t val;
mrc_l2_ctlr(val);
val |= CACHE_TAG_RAM_SETUP |
CACHE_DATA_RAM_SETUP |
CACHE_TAG_RAM_LATENCY |
CACHE_DATA_RAM_LATENCY |
CACHE_ECC_AND_PARITY;
mcr_l2_ctlr(val);
+}
+/*
- Configure L2ACTLR.
- Must be inline here since low_power_start() is called without a
- stack (!).
- */
+static inline void configure_l2_actlr(void) +{ +#ifdef CONFIG_EXYNOS5420
uint32_t val;
mrc_l2_aux_ctlr(val);
val |= (1 << 27) | /* Prevents stopping the L2 logic clock */
(1 << 7) | /* Enable hazard detect timeout for A15 */
(1 << 3); /* Disable clean/evict push to external */
mcr_l2_aux_ctlr(val);
+#endif +} +#endif diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index a459432..40d3e3a 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -67,43 +67,6 @@ static void enable_smp(void) }
/*
- Enable ECC by setting L2CTLR[21].
- Set L2CTLR[7] to make tag ram latency 3 cycles and
- set L2CTLR[1] to make data ram latency 3 cycles.
- We need to make RAM latency of 3 cycles here because cores
- power ON and OFF while switching. And everytime a core powers
- ON, iROM provides it a default L2CTLR value 0x400 which stands
- for TAG RAM setup of 1 cycle. Hence, we face a need of
- restoring data and tag latency values.
- */
-static void configure_l2_ctlr(void) -{
uint32_t val;
mrc_l2_ctlr(val);
val |= (1 << 21);
val |= (1 << 7);
val |= (1 << 1);
mcr_l2_ctlr(val);
-}
-/*
- Set L2ACTLR[7] to reissue any memory transaction in the L2 that has been
- stalled for 1024 cycles to verify that its hazard condition still exists.
- Disable clean/evict push to external by setting L2ACTLR[3].
- */
-static void configure_l2_actlr(void) -{
uint32_t val;
mrc_l2_aux_ctlr(val);
val |= (1 << 27);
val |= (1 << 7);
val |= (1 << 3);
mcr_l2_aux_ctlr(val);
-}
-/*
- Power up secondary CPUs.
*/ static void secondary_cpu_start(void) @@ -198,9 +161,6 @@ static void power_down_core(void) */ static void secondary_cores_configure(void) {
/* Setup L2 cache */
configure_l2_ctlr();
/* Clear secondary boot iRAM base */ writel(0x0, (CONFIG_EXYNOS_RELOCATE_CODE_BASE + 0x1C));
@@ -226,6 +186,21 @@ int do_lowlevel_init(void)
arch_cpu_init();
+#ifndef CONFIG_SYS_L2CACHE_OFF
/*
* Init L2 cache parameters here for use by boot and resume
*
* These are here instead of in v7_outer_cache_enable() so that the
* L2 cache settings get properly set even at resume time or if we're
* running U-Boot with the cache off. The kernel still needs us to
* set these for it.
*/
configure_l2_ctlr();
configure_l2_actlr();
dsb();
isb();
+#endif
#ifdef CONFIG_EXYNOS5420 relocate_wait_code();
diff --git a/arch/arm/cpu/armv7/exynos/soc.c b/arch/arm/cpu/armv7/exynos/soc.c index ea201e7..0f116b1 100644 --- a/arch/arm/cpu/armv7/exynos/soc.c +++ b/arch/arm/cpu/armv7/exynos/soc.c @@ -9,15 +9,6 @@ #include <asm/io.h> #include <asm/system.h>
-enum l2_cache_params { -#ifndef CONFIG_EXYNOS5420
CACHE_TAG_RAM_SETUP = (1 << 9),
CACHE_DATA_RAM_SETUP = (1 << 5),
-#endif
CACHE_TAG_RAM_LATENCY = (2 << 6),
CACHE_DATA_RAM_LATENCY = (2 << 0)
-};
void reset_cpu(ulong addr) { writel(0x1, samsung_get_base_swreset()); @@ -30,45 +21,3 @@ void enable_caches(void) dcache_enable(); } #endif
-#ifndef CONFIG_SYS_L2CACHE_OFF -/*
- Set L2 cache parameters
- */
-static void exynos5_set_l2cache_params(void) -{
unsigned int val = 0;
asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r"(val));
-#ifndef CONFIG_EXYNOS5420
val |= CACHE_TAG_RAM_SETUP |
CACHE_DATA_RAM_SETUP |
CACHE_TAG_RAM_LATENCY |
CACHE_DATA_RAM_LATENCY;
-#else
val |= CACHE_TAG_RAM_LATENCY |
CACHE_DATA_RAM_LATENCY;
-#endif
asm volatile("mcr p15, 1, %0, c9, c0, 2\n" : : "r"(val));
-#ifdef CONFIG_EXYNOS5420
/* Read CP15 L2ACTLR value */
asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
/* Disable clean/evict push to external */
val |= (0x1 << 3);
/* Write new vlaue to L2ACTLR */
asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
-#endif -}
-/*
- Sets L2 cache related parameters before enabling data cache
- */
-void v7_outer_cache_enable(void) -{
if (cpu_is_exynos5())
exynos5_set_l2cache_params();
-}
-#endif
1.9.1
Regards, Simon

On 15/01/15 22:41, Akshay Saraswat wrote:
This patch adds code to shutdown secondary cores. When U-boot comes up, all secondary cores appear powered on, which is undesirable and causes side effects while initializing these cores in kernel.
Secondary core power down happens in following steps:
Step-1: After Exynos power-on, primary core starts executing first. Step-2: In iROM code every core has to check 2 flags i.e. addresses 0x02020028 & 0x02020004. Step-3: Initially 0x02020028 is 0 for all cores and 0x02020004 has a jump address for primary core and 0 for all secondary cores. Step-4: Therefore, primary core follows normal iROM execution and jumps to BL1 eventually, whereas all secondary cores enter WFE. Step-5: When primary core comes into function secondary_cores_configure, it puts pointer to function power_down_core into 0x02020004 and provides DSB and SEV for all cores so that they may come out of WFE and jump to power_down_core function. Step-6: And ultimately because of power_down_core all secondary cores shut-down.
Signed-off-by: Kimoon Kim kimoon.kim@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/lowlevel_init.c | 62 ++++++++++++++++++++++ arch/arm/include/asm/arch-exynos/cpu.h | 30 +++++++++++ arch/arm/include/asm/arch-exynos/system.h | 87 +++++++++++++++++++++++++++++++ 3 files changed, 179 insertions(+)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 83e1dcf..43c957b 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -31,6 +31,7 @@ #include <asm/arch/tzpc.h> #include <asm/arch/periph.h> #include <asm/arch/pinmux.h> +#include <asm/arch/system.h> #include "common_setup.h"
/* These are the things we can do during low-level init */ @@ -42,6 +43,62 @@ enum { DO_POWER = 1 << 4, };
+#ifdef CONFIG_EXYNOS5420 +/*
- Pointer to this function is stored in iRam which is used
- for jump and power down of a specific core.
- */
+static void power_down_core(void) +{
- uint32_t tmp, core_id, core_config;
- /* Get the core id */
- mrc_mpafr(core_id);
- tmp = core_id & 0x3;
- core_id = (core_id >> 6) & ~3;
Please explain what means each bits.
- core_id |= tmp;
- /* Set the status of the core to low */
- core_config = (core_id * CORE_CONFIG_OFFSET);
- core_config += ARM_CORE0_CONFIG;
- writel(0x0, core_config);
- /* Core enter WFI */
- wfi();
+}
+/*
- Configurations for secondary cores are inapt at this stage.
- Reconfigure secondary cores. Shutdown and change the status
- of all cores except the primary core.
- */
+static void secondary_cores_configure(void) +{
- uint32_t core_id;
- /* Store jump address for power down of secondary cores */
- writel((uint32_t)&power_down_core, CONFIG_PHY_IRAM_BASE + 0x4);
- /* Need all core power down check */
- dsb();
- sev();
- /*
* Power down all cores(secondary) while primary core must
* wait for all cores to go down.
*/
- for (core_id = 1; core_id != CORE_COUNT; core_id++) {
while ((readl(ARM_CORE0_STATUS
+ (core_id * CORE_CONFIG_OFFSET))
& 0xff) != 0x0) {
isb();
sev();
}
isb();
- }
+} +#endif
int do_lowlevel_init(void) { uint32_t reset_status; @@ -49,6 +106,11 @@ int do_lowlevel_init(void)
arch_cpu_init();
+#ifdef CONFIG_EXYNOS5420
- /* Reconfigure secondary cores */
- secondary_cores_configure();
+#endif
reset_status = get_reset_status();
switch (reset_status) {
diff --git a/arch/arm/include/asm/arch-exynos/cpu.h b/arch/arm/include/asm/arch-exynos/cpu.h index 29674ad..f1f9994 100644 --- a/arch/arm/include/asm/arch-exynos/cpu.h +++ b/arch/arm/include/asm/arch-exynos/cpu.h @@ -177,6 +177,7 @@ #define EXYNOS5420_GPIO_PART1_BASE 0x14010000 #define EXYNOS5420_MIPI_DSIM_BASE 0x14500000 #define EXYNOS5420_DP_BASE 0x145B0000 +#define EXYNOS5420_INF_REG_BASE 0x10040800
#define EXYNOS5420_USBPHY_BASE DEVICE_NOT_AVAILABLE #define EXYNOS5420_USBOTG_BASE DEVICE_NOT_AVAILABLE @@ -186,6 +187,35 @@ #define EXYNOS5420_USB3PHY_BASE DEVICE_NOT_AVAILABLE #define EXYNOS5420_USB_HOST_XHCI_BASE DEVICE_NOT_AVAILABLE
+#define ARM_CORE0_CONFIG (EXYNOS5420_POWER_BASE + 0x2000) +#define ARM_CORE0_STATUS (EXYNOS5420_POWER_BASE + 0x2004) +#define CORE_CONFIG_OFFSET 0x80 +#define CORE_COUNT 0x8
are they for all exynos SoCs?
+/*
- POWER
- */
for power then it should be gone into power.h
+#define PMU_BASE EXYNOS5420_POWER_BASE +#define SW_RST_REG_OFFSET 0x400
+#define INF_REG_BASE EXYNOS5420_INF_REG_BASE +#define INF_REG0_OFFSET 0x00 +#define INF_REG1_OFFSET 0x04 +#define INF_REG2_OFFSET 0x08 +#define INF_REG3_OFFSET 0x0C +#define INF_REG4_OFFSET 0x10 +#define INF_REG5_OFFSET 0x14 +#define INF_REG6_OFFSET 0x18 +#define INF_REG7_OFFSET 0x1C
+#define PMU_SPARE_BASE (EXYNOS5420_INF_REG_BASE + 0x100) +#define PMU_SPARE_0 PMU_SPARE_BASE +#define PMU_SPARE_1 (PMU_SPARE_BASE + 0x4) +#define PMU_SPARE_2 (PMU_SPARE_BASE + 0x8) +#define PMU_SPARE_3 (PMU_SPARE_BASE + 0xc)
We do NOT allow such an accessor.
+#define RST_FLAG_REG PMU_SPARE_BASE +#define RST_FLAG_VAL 0xfcba0d10
#ifndef __ASSEMBLY__ #include <asm/io.h> /* CPU detection macros */
Thanks, Minkyu Kang.

Hi,
On 15 January 2015 at 23:35, Minkyu Kang mk7.kang@samsung.com wrote:
On 15/01/15 22:41, Akshay Saraswat wrote:
This patch adds code to shutdown secondary cores. When U-boot comes up, all secondary cores appear powered on, which is undesirable and causes side effects while initializing these cores in kernel.
Secondary core power down happens in following steps:
Step-1: After Exynos power-on, primary core starts executing first. Step-2: In iROM code every core has to check 2 flags i.e. addresses 0x02020028 & 0x02020004. Step-3: Initially 0x02020028 is 0 for all cores and 0x02020004 has a jump address for primary core and 0 for all secondary cores. Step-4: Therefore, primary core follows normal iROM execution and jumps to BL1 eventually, whereas all secondary cores enter WFE. Step-5: When primary core comes into function secondary_cores_configure, it puts pointer to function power_down_core into 0x02020004 and provides DSB and SEV for all cores so that they may come out of WFE and jump to power_down_core function. Step-6: And ultimately because of power_down_core all secondary cores shut-down.
Signed-off-by: Kimoon Kim kimoon.kim@samsung.com Signed-off-by: Akshay Saraswat akshay.s@samsung.com
arch/arm/cpu/armv7/exynos/lowlevel_init.c | 62 ++++++++++++++++++++++ arch/arm/include/asm/arch-exynos/cpu.h | 30 +++++++++++ arch/arm/include/asm/arch-exynos/system.h | 87 +++++++++++++++++++++++++++++++ 3 files changed, 179 insertions(+)
diff --git a/arch/arm/cpu/armv7/exynos/lowlevel_init.c b/arch/arm/cpu/armv7/exynos/lowlevel_init.c index 83e1dcf..43c957b 100644 --- a/arch/arm/cpu/armv7/exynos/lowlevel_init.c +++ b/arch/arm/cpu/armv7/exynos/lowlevel_init.c @@ -31,6 +31,7 @@ #include <asm/arch/tzpc.h> #include <asm/arch/periph.h> #include <asm/arch/pinmux.h> +#include <asm/arch/system.h> #include "common_setup.h"
/* These are the things we can do during low-level init */ @@ -42,6 +43,62 @@ enum { DO_POWER = 1 << 4, };
+#ifdef CONFIG_EXYNOS5420 +/*
- Pointer to this function is stored in iRam which is used
- for jump and power down of a specific core.
- */
+static void power_down_core(void) +{
uint32_t tmp, core_id, core_config;
/* Get the core id */
mrc_mpafr(core_id);
tmp = core_id & 0x3;
core_id = (core_id >> 6) & ~3;
Please explain what means each bits.
core_id |= tmp;
/* Set the status of the core to low */
core_config = (core_id * CORE_CONFIG_OFFSET);
core_config += ARM_CORE0_CONFIG;
writel(0x0, core_config);
/* Core enter WFI */
wfi();
+}
+/*
- Configurations for secondary cores are inapt at this stage.
- Reconfigure secondary cores. Shutdown and change the status
- of all cores except the primary core.
- */
+static void secondary_cores_configure(void) +{
uint32_t core_id;
/* Store jump address for power down of secondary cores */
writel((uint32_t)&power_down_core, CONFIG_PHY_IRAM_BASE + 0x4);
/* Need all core power down check */
dsb();
sev();
/*
* Power down all cores(secondary) while primary core must
* wait for all cores to go down.
*/
for (core_id = 1; core_id != CORE_COUNT; core_id++) {
while ((readl(ARM_CORE0_STATUS
+ (core_id * CORE_CONFIG_OFFSET))
& 0xff) != 0x0) {
isb();
sev();
}
isb();
}
+} +#endif
int do_lowlevel_init(void) { uint32_t reset_status; @@ -49,6 +106,11 @@ int do_lowlevel_init(void)
arch_cpu_init();
+#ifdef CONFIG_EXYNOS5420
/* Reconfigure secondary cores */
secondary_cores_configure();
+#endif
reset_status = get_reset_status(); switch (reset_status) {
diff --git a/arch/arm/include/asm/arch-exynos/cpu.h b/arch/arm/include/asm/arch-exynos/cpu.h index 29674ad..f1f9994 100644 --- a/arch/arm/include/asm/arch-exynos/cpu.h +++ b/arch/arm/include/asm/arch-exynos/cpu.h @@ -177,6 +177,7 @@ #define EXYNOS5420_GPIO_PART1_BASE 0x14010000 #define EXYNOS5420_MIPI_DSIM_BASE 0x14500000 #define EXYNOS5420_DP_BASE 0x145B0000 +#define EXYNOS5420_INF_REG_BASE 0x10040800
#define EXYNOS5420_USBPHY_BASE DEVICE_NOT_AVAILABLE #define EXYNOS5420_USBOTG_BASE DEVICE_NOT_AVAILABLE @@ -186,6 +187,35 @@ #define EXYNOS5420_USB3PHY_BASE DEVICE_NOT_AVAILABLE #define EXYNOS5420_USB_HOST_XHCI_BASE DEVICE_NOT_AVAILABLE
+#define ARM_CORE0_CONFIG (EXYNOS5420_POWER_BASE + 0x2000) +#define ARM_CORE0_STATUS (EXYNOS5420_POWER_BASE + 0x2004) +#define CORE_CONFIG_OFFSET 0x80 +#define CORE_COUNT 0x8
are they for all exynos SoCs?
+/*
- POWER
- */
for power then it should be gone into power.h
+#define PMU_BASE EXYNOS5420_POWER_BASE +#define SW_RST_REG_OFFSET 0x400
+#define INF_REG_BASE EXYNOS5420_INF_REG_BASE +#define INF_REG0_OFFSET 0x00 +#define INF_REG1_OFFSET 0x04 +#define INF_REG2_OFFSET 0x08 +#define INF_REG3_OFFSET 0x0C +#define INF_REG4_OFFSET 0x10 +#define INF_REG5_OFFSET 0x14 +#define INF_REG6_OFFSET 0x18 +#define INF_REG7_OFFSET 0x1C
+#define PMU_SPARE_BASE (EXYNOS5420_INF_REG_BASE + 0x100) +#define PMU_SPARE_0 PMU_SPARE_BASE +#define PMU_SPARE_1 (PMU_SPARE_BASE + 0x4) +#define PMU_SPARE_2 (PMU_SPARE_BASE + 0x8) +#define PMU_SPARE_3 (PMU_SPARE_BASE + 0xc)
We do NOT allow such an accessor.
Are these actually used? Maybe just remove them?
+#define RST_FLAG_REG PMU_SPARE_BASE +#define RST_FLAG_VAL 0xfcba0d10
#ifndef __ASSEMBLY__ #include <asm/io.h> /* CPU detection macros */
Thanks, Minkyu Kang.
Tested on snow, pit, pi
Tested-by: Simon Glass sjg@chromium.org
Regards, Simon
participants (3)
-
Akshay Saraswat
-
Minkyu Kang
-
Simon Glass