[U-Boot] [PATCH 0/2] armv8: Support loading 32-bit OS in AArch32 execution state

This series is to support loading a 32-bit OS, the execution state will change from AArch64 to AArch32 when jumping to kernel. The architecture information will be got through checking FIT image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
Spin-table method is used for secondary cores to load 32-bit OS. The architecture information will be got through checking FIT image and saved in the os_arch element of spin-table, then the secondary cores will check os_arch and jump to 32-bit OS or 64-bit OS automatically.
This series is tested on LS1043ARDB board. ---------------------------------------------------------------- Alison Wang (2): armv8: Support loading 32-bit OS in AArch32 execution state armv8: fsl-layerscape: SMP support for loading 32-bit OS
arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S | 21 +++++++++++++++++++ arch/arm/cpu/armv8/fsl-layerscape/mp.c | 10 +++++++++ arch/arm/cpu/armv8/transition.S | 100 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/arch-fsl-layerscape/mp.h | 6 ++++++ arch/arm/include/asm/system.h | 2 ++ arch/arm/lib/bootm.c | 25 +++++++++++++++++++++-- common/image-fit.c | 12 ++++++++++- 7 files changed, 173 insertions(+), 3 deletions(-)

To support loading a 32-bit OS, the execution state will change from AArch64 to AArch32 when jumping to kernel.
The architecture information will be got through checking FIT image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
Signed-off-by: Ebony Zhu ebony.zhu@nxp.com Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com --- arch/arm/cpu/armv8/transition.S | 100 ++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/system.h | 2 + arch/arm/lib/bootm.c | 20 +++++++- common/image-fit.c | 12 ++++- 4 files changed, 131 insertions(+), 3 deletions(-)
diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S index 253a39b..9d7a17a 100644 --- a/arch/arm/cpu/armv8/transition.S +++ b/arch/arm/cpu/armv8/transition.S @@ -21,3 +21,103 @@ ENTRY(armv8_switch_to_el1) 0: ret 1: armv8_switch_to_el1_m x0, x1 ENDPROC(armv8_switch_to_el1) + +/* + * x0: kernel entry point + * x1: machine nr + * x2: fdt address + */ +ENTRY(armv8_switch_to_el2_aarch32) + switch_el x3, 1f, 0f, 0f +0: ret +1: + mov x7, x0 + mov x8, x1 + mov x9, x2 + + /* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1 */ + mov x1, 0x1b1 + msr scr_el3, x1 + msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */ + mov x1, 0x33ff + msr cptr_el2, x1 /* Disable coprocessor traps to EL2 */ + + /* Initialize Generic Timers */ + msr cntvoff_el2, xzr + + mov x1, #0x0830 + movk x1, #0x30c5, lsl #16 + msr sctlr_el2, x1 + + /* Return to AArch32 Hypervisor mode */ + mov x1, sp + msr sp_el2, x1 + mrs x1, vbar_el3 + msr vbar_el2, x1 /* Migrate VBAR */ + mov x1, #0x1da + msr spsr_el3, x1 + msr elr_el3, x7 + + mov x0, #0 + mov x1, x8 + mov x2, x9 + + eret +ENDPROC(armv8_switch_to_el2_aarch32) + +/* + * x0: kernel entry point + * x1: machine nr + * x2: fdt address + */ +ENTRY(armv8_switch_to_el1_aarch32) + switch_el x3, 0f, 1f, 0f +0: ret +1: + mov x7, x0 + mov x8, x1 + mov x9, x2 + + /* Initialize Generic Timers */ + mrs x0, cnthctl_el2 + orr x0, x0, #0x3 /* Enable EL1 access to timers */ + msr cnthctl_el2, x0 + msr cntvoff_el2, xzr + + /* Initialize MPID/MPIDR registers */ + mrs x0, midr_el1 + mrs x1, mpidr_el1 + msr vpidr_el2, x0 + msr vmpidr_el2, x1 + + /* Disable coprocessor traps */ + mov x0, #0x33ff + msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */ + msr hstr_el2, xzr /* Disable coprocessor traps to EL2 */ + mov x0, #3 << 20 + msr cpacr_el1, x0 /* Enable FP/SIMD at EL1 */ + + /* Initialize HCR_EL2 */ + mov x0, #(0 << 31) /* 32bit EL1 */ + orr x0, x0, #(1 << 29) /* Disable HVC */ + msr hcr_el2, x0 + + mov x0, #0x0800 + movk x0, #0x30d0, lsl #16 + msr sctlr_el1, x0 + + /* Return to AArch32 Supervisor mode */ + mov x0, sp + msr sp_el1, x0 /* Migrate SP */ + mrs x0, vbar_el2 + msr vbar_el1, x0 /* Migrate VBAR */ + mov x0, #0x1d3 + msr spsr_el2, x0 + msr elr_el2, x7 + + mov x0, #0 + mov x1, x8 + mov x2, x9 + + eret +ENDPROC(armv8_switch_to_el1_aarch32) diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 9ae890a..bb87cf0 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -102,6 +102,8 @@ void __asm_switch_ttbr(u64 new_ttbr);
void armv8_switch_to_el2(void); void armv8_switch_to_el1(void); +void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64 fdt_addr); +void armv8_switch_to_el1_aarch32(u64 entry_point, u64 mach_nr, u64 fdt_addr); void gic_init(void); void gic_send_sgi(unsigned long sgino); void wait_for_wakeup(void); diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index 0838d89..a39c3d2 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -286,8 +286,24 @@ static void boot_jump_linux(bootm_headers_t *images, int flag) announce_and_cleanup(fake);
if (!fake) { - do_nonsec_virt_switch(); - kernel_entry(images->ft_addr, NULL, NULL, NULL); + if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) && + (images->os.arch == IH_ARCH_ARM)) { + smp_kick_all_cpus(); + dcache_disable(); +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 + armv8_switch_to_el2(); + armv8_switch_to_el1_aarch32((u64)images->ep, + (u64)gd->bd->bi_arch_number, + (u64)images->ft_addr); +#else + armv8_switch_to_el2_aarch32((u64)images->ep, + (u64)gd->bd->bi_arch_number, + (u64)images->ft_addr); +#endif + } else { + do_nonsec_virt_switch(); + kernel_entry(images->ft_addr, NULL, NULL, NULL); + } } #else unsigned long machid = gd->bd->bi_arch_number; diff --git a/common/image-fit.c b/common/image-fit.c index 25f8a11..2986469 100644 --- a/common/image-fit.c +++ b/common/image-fit.c @@ -1163,7 +1163,8 @@ int fit_image_check_arch(const void *fit, int noffset, uint8_t arch) if (fit_image_get_arch(fit, noffset, &image_arch)) return 0; return (arch == image_arch) || - (arch == IH_ARCH_I386 && image_arch == IH_ARCH_X86_64); + (arch == IH_ARCH_I386 && image_arch == IH_ARCH_X86_64) || + (arch == IH_ARCH_ARM64 && image_arch == IH_ARCH_ARM); }
/** @@ -1586,6 +1587,9 @@ int fit_image_load(bootm_headers_t *images, ulong addr, int type_ok, os_ok; ulong load, data, len; uint8_t os; +#ifndef USE_HOSTCC + uint8_t os_arch; +#endif const char *prop_name; int ret;
@@ -1669,6 +1673,12 @@ int fit_image_load(bootm_headers_t *images, ulong addr, return -ENOEXEC; } #endif + +#ifndef USE_HOSTCC + fit_image_get_arch(fit, noffset, &os_arch); + images->os.arch = os_arch; +#endif + if (image_type == IH_TYPE_FLATDT && !fit_image_check_comp(fit, noffset, IH_COMP_NONE)) { puts("FDT image is compressed");

On 13.05.16 10:40, Alison Wang wrote:
To support loading a 32-bit OS, the execution state will change from AArch64 to AArch32 when jumping to kernel.
The architecture information will be got through checking FIT image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
Signed-off-by: Ebony Zhu ebony.zhu@nxp.com Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com
arch/arm/cpu/armv8/transition.S | 100 ++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/system.h | 2 + arch/arm/lib/bootm.c | 20 +++++++- common/image-fit.c | 12 ++++- 4 files changed, 131 insertions(+), 3 deletions(-)
diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S index 253a39b..9d7a17a 100644 --- a/arch/arm/cpu/armv8/transition.S +++ b/arch/arm/cpu/armv8/transition.S @@ -21,3 +21,103 @@ ENTRY(armv8_switch_to_el1) 0: ret 1: armv8_switch_to_el1_m x0, x1 ENDPROC(armv8_switch_to_el1)
+/*
- x0: kernel entry point
- x1: machine nr
- x2: fdt address
- */
+ENTRY(armv8_switch_to_el2_aarch32)
- switch_el x3, 1f, 0f, 0f
+0: ret +1:
- mov x7, x0
- mov x8, x1
- mov x9, x2
- /* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1 */
- mov x1, 0x1b1
- msr scr_el3, x1
- msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
- mov x1, 0x33ff
- msr cptr_el2, x1 /* Disable coprocessor traps to EL2 */
- /* Initialize Generic Timers */
- msr cntvoff_el2, xzr
- mov x1, #0x0830
- movk x1, #0x30c5, lsl #16
- msr sctlr_el2, x1
Why is this necessary?
- /* Return to AArch32 Hypervisor mode */
- mov x1, sp
- msr sp_el2, x1
- mrs x1, vbar_el3
- msr vbar_el2, x1 /* Migrate VBAR */
- mov x1, #0x1da
- msr spsr_el3, x1
- msr elr_el3, x7
- mov x0, #0
- mov x1, x8
- mov x2, x9
- eret
+ENDPROC(armv8_switch_to_el2_aarch32)
This whole thing looks like a copy of armv8_switch_to_el2_m. Just parameterize that one and put the few bits that are different in macro ifs.
+/*
- x0: kernel entry point
- x1: machine nr
- x2: fdt address
- */
+ENTRY(armv8_switch_to_el1_aarch32)
- switch_el x3, 0f, 1f, 0f
+0: ret +1:
- mov x7, x0
- mov x8, x1
- mov x9, x2
- /* Initialize Generic Timers */
- mrs x0, cnthctl_el2
- orr x0, x0, #0x3 /* Enable EL1 access to timers */
- msr cnthctl_el2, x0
- msr cntvoff_el2, xzr
/* Initialize MPID/MPIDR registers */
- mrs x0, midr_el1
- mrs x1, mpidr_el1
- msr vpidr_el2, x0
- msr vmpidr_el2, x1
/* Disable coprocessor traps */
- mov x0, #0x33ff
- msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
msr hstr_el2, xzr /* Disable coprocessor traps to EL2 */
mov x0, #3 << 20
msr cpacr_el1, x0 /* Enable FP/SIMD at EL1 */
- /* Initialize HCR_EL2 */
- mov x0, #(0 << 31) /* 32bit EL1 */
- orr x0, x0, #(1 << 29) /* Disable HVC */
- msr hcr_el2, x0
- mov x0, #0x0800
- movk x0, #0x30d0, lsl #16
- msr sctlr_el1, x0
- /* Return to AArch32 Supervisor mode */
- mov x0, sp
- msr sp_el1, x0 /* Migrate SP */
- mrs x0, vbar_el2
- msr vbar_el1, x0 /* Migrate VBAR */
- mov x0, #0x1d3
- msr spsr_el2, x0
- msr elr_el2, x7
- mov x0, #0
- mov x1, x8
- mov x2, x9
- eret
+ENDPROC(armv8_switch_to_el1_aarch32)
Does anybody really care about jumping to el1?
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 9ae890a..bb87cf0 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -102,6 +102,8 @@ void __asm_switch_ttbr(u64 new_ttbr);
void armv8_switch_to_el2(void); void armv8_switch_to_el1(void); +void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64 fdt_addr); +void armv8_switch_to_el1_aarch32(u64 entry_point, u64 mach_nr, u64 fdt_addr); void gic_init(void); void gic_send_sgi(unsigned long sgino); void wait_for_wakeup(void); diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index 0838d89..a39c3d2 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -286,8 +286,24 @@ static void boot_jump_linux(bootm_headers_t *images, int flag) announce_and_cleanup(fake);
if (!fake) {
do_nonsec_virt_switch();
kernel_entry(images->ft_addr, NULL, NULL, NULL);
if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
(images->os.arch == IH_ARCH_ARM)) {
smp_kick_all_cpus();
dcache_disable();
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
armv8_switch_to_el2();
armv8_switch_to_el1_aarch32((u64)images->ep,
(u64)gd->bd->bi_arch_number,
(u64)images->ft_addr);
+#else
armv8_switch_to_el2_aarch32((u64)images->ep,
(u64)gd->bd->bi_arch_number,
(u64)images->ft_addr);
+#endif
Does this compile on 32bit targets?
Alex

On 13.05.16 10:40, Alison Wang wrote:
To support loading a 32-bit OS, the execution state will change from AArch64 to AArch32 when jumping to kernel.
The architecture information will be got through checking FIT image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
Signed-off-by: Ebony Zhu ebony.zhu@nxp.com Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com
arch/arm/cpu/armv8/transition.S | 100
++++++++++++++++++++++++++++++++++++++++
arch/arm/include/asm/system.h | 2 + arch/arm/lib/bootm.c | 20 +++++++- common/image-fit.c | 12 ++++- 4 files changed, 131 insertions(+), 3 deletions(-)
diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S index 253a39b..9d7a17a 100644 --- a/arch/arm/cpu/armv8/transition.S +++ b/arch/arm/cpu/armv8/transition.S @@ -21,3 +21,103 @@ ENTRY(armv8_switch_to_el1) 0: ret 1: armv8_switch_to_el1_m x0, x1 ENDPROC(armv8_switch_to_el1)
+/*
- x0: kernel entry point
- x1: machine nr
- x2: fdt address
- */
+ENTRY(armv8_switch_to_el2_aarch32)
- switch_el x3, 1f, 0f, 0f
+0: ret +1:
- mov x7, x0
- mov x8, x1
- mov x9, x2
- /* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1
*/
- mov x1, 0x1b1
- msr scr_el3, x1
- msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
- mov x1, 0x33ff
- msr cptr_el2, x1 /* Disable coprocessor traps to EL2 */
- /* Initialize Generic Timers */
- msr cntvoff_el2, xzr
- mov x1, #0x0830
- movk x1, #0x30c5, lsl #16
- msr sctlr_el2, x1
Why is this necessary?
[Alison Wang] SCTLR_EL2 is architecturally mapped to AArch32 register HSCTLR. HSCTLR will provide control of the system operation in Hyp mode.
- /* Return to AArch32 Hypervisor mode */
- mov x1, sp
- msr sp_el2, x1
- mrs x1, vbar_el3
- msr vbar_el2, x1 /* Migrate VBAR */
- mov x1, #0x1da
- msr spsr_el3, x1
- msr elr_el3, x7
- mov x0, #0
- mov x1, x8
- mov x2, x9
- eret
+ENDPROC(armv8_switch_to_el2_aarch32)
This whole thing looks like a copy of armv8_switch_to_el2_m. Just parameterize that one and put the few bits that are different in macro ifs.
[Alison Wang] Yes, they are similar because they both switch from EL3 to EL2. But some bits are different because one switch from AArch64 EL3 to AArch64 EL2 and the other switch from AArch64 EL3 to AArch32 EL2. The parameters need to use too.
+/*
- x0: kernel entry point
- x1: machine nr
- x2: fdt address
- */
+ENTRY(armv8_switch_to_el1_aarch32)
- switch_el x3, 0f, 1f, 0f
+0: ret +1:
- mov x7, x0
- mov x8, x1
- mov x9, x2
- /* Initialize Generic Timers */
- mrs x0, cnthctl_el2
- orr x0, x0, #0x3 /* Enable EL1 access to timers */
- msr cnthctl_el2, x0
- msr cntvoff_el2, xzr
/* Initialize MPID/MPIDR registers */
- mrs x0, midr_el1
- mrs x1, mpidr_el1
- msr vpidr_el2, x0
- msr vmpidr_el2, x1
/* Disable coprocessor traps */
- mov x0, #0x33ff
- msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
msr hstr_el2, xzr /* Disable coprocessor traps
to EL2 */
mov x0, #3 << 20
msr cpacr_el1, x0 /* Enable FP/SIMD at EL1 */
- /* Initialize HCR_EL2 */
- mov x0, #(0 << 31) /* 32bit EL1 */
- orr x0, x0, #(1 << 29) /* Disable HVC */
- msr hcr_el2, x0
- mov x0, #0x0800
- movk x0, #0x30d0, lsl #16
- msr sctlr_el1, x0
- /* Return to AArch32 Supervisor mode */
- mov x0, sp
- msr sp_el1, x0 /* Migrate SP */
- mrs x0, vbar_el2
- msr vbar_el1, x0 /* Migrate VBAR */
- mov x0, #0x1d3
- msr spsr_el2, x0
- msr elr_el2, x7
- mov x0, #0
- mov x1, x8
- mov x2, x9
- eret
+ENDPROC(armv8_switch_to_el1_aarch32)
Does anybody really care about jumping to el1?
[Alison Wang] I am not sure if anybody will jump to el1. Anyway, I provide this support. If anybody want to jump to el1, he can just define CONFIG_ARMV8_SWITCH_TO_EL1.
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 9ae890a..bb87cf0 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -102,6 +102,8 @@ void __asm_switch_ttbr(u64 new_ttbr);
void armv8_switch_to_el2(void); void armv8_switch_to_el1(void); +void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64 +fdt_addr); void armv8_switch_to_el1_aarch32(u64 entry_point, u64 +mach_nr, u64 fdt_addr); void gic_init(void); void gic_send_sgi(unsigned long sgino); void wait_for_wakeup(void); diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index 0838d89..a39c3d2 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -286,8 +286,24 @@ static void boot_jump_linux(bootm_headers_t
*images, int flag)
announce_and_cleanup(fake);
if (!fake) {
do_nonsec_virt_switch();
kernel_entry(images->ft_addr, NULL, NULL, NULL);
if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
(images->os.arch == IH_ARCH_ARM)) {
smp_kick_all_cpus();
dcache_disable();
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
armv8_switch_to_el2();
armv8_switch_to_el1_aarch32((u64)images->ep,
(u64)gd->bd->bi_arch_number,
(u64)images->ft_addr);
+#else
armv8_switch_to_el2_aarch32((u64)images->ep,
(u64)gd->bd->bi_arch_number,
(u64)images->ft_addr);
+#endif
Does this compile on 32bit targets?
[Alison Wang] This compile for 64bit target to support 64-bit U-Boot and 32-bit kernel. It will not affect 32bit targets.
Best Regards, Alison Wang

On 16.05.16 07:28, Huan Wang wrote:
On 13.05.16 10:40, Alison Wang wrote:
To support loading a 32-bit OS, the execution state will change from AArch64 to AArch32 when jumping to kernel.
The architecture information will be got through checking FIT image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
Signed-off-by: Ebony Zhu ebony.zhu@nxp.com Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com
arch/arm/cpu/armv8/transition.S | 100
++++++++++++++++++++++++++++++++++++++++
arch/arm/include/asm/system.h | 2 + arch/arm/lib/bootm.c | 20 +++++++- common/image-fit.c | 12 ++++- 4 files changed, 131 insertions(+), 3 deletions(-)
diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S index 253a39b..9d7a17a 100644 --- a/arch/arm/cpu/armv8/transition.S +++ b/arch/arm/cpu/armv8/transition.S @@ -21,3 +21,103 @@ ENTRY(armv8_switch_to_el1) 0: ret 1: armv8_switch_to_el1_m x0, x1 ENDPROC(armv8_switch_to_el1)
+/*
- x0: kernel entry point
- x1: machine nr
- x2: fdt address
- */
+ENTRY(armv8_switch_to_el2_aarch32)
- switch_el x3, 1f, 0f, 0f
+0: ret +1:
- mov x7, x0
- mov x8, x1
- mov x9, x2
- /* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1
*/
- mov x1, 0x1b1
- msr scr_el3, x1
- msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
- mov x1, 0x33ff
- msr cptr_el2, x1 /* Disable coprocessor traps to EL2 */
- /* Initialize Generic Timers */
- msr cntvoff_el2, xzr
- mov x1, #0x0830
- movk x1, #0x30c5, lsl #16
- msr sctlr_el2, x1
Why is this necessary?
[Alison Wang] SCTLR_EL2 is architecturally mapped to AArch32 register HSCTLR. HSCTLR will provide control of the system operation in Hyp mode.
It still doesn't explain why you move magical values into a random register that is not set in the 64-bit path.
Please make this code more readable :). Try to #define values for the bits that you set. Add comments explaining why you do what you do.
- /* Return to AArch32 Hypervisor mode */
- mov x1, sp
- msr sp_el2, x1
- mrs x1, vbar_el3
- msr vbar_el2, x1 /* Migrate VBAR */
- mov x1, #0x1da
- msr spsr_el3, x1
- msr elr_el3, x7
- mov x0, #0
- mov x1, x8
- mov x2, x9
- eret
+ENDPROC(armv8_switch_to_el2_aarch32)
This whole thing looks like a copy of armv8_switch_to_el2_m. Just parameterize that one and put the few bits that are different in macro ifs.
[Alison Wang] Yes, they are similar because they both switch from EL3 to EL2. But some bits are different because one switch from AArch64 EL3 to AArch64 EL2 and the other switch from AArch64 EL3 to AArch32 EL2. The parameters need to use too.
Yes, so I think it makes a lot of sense to combine the jump-to-64-bit-el2 and jump-to-32-bit-el2 functions be a single implementation. That way there's less chance an accidental difference creeps in.
+/*
- x0: kernel entry point
- x1: machine nr
- x2: fdt address
- */
+ENTRY(armv8_switch_to_el1_aarch32)
- switch_el x3, 0f, 1f, 0f
+0: ret +1:
- mov x7, x0
- mov x8, x1
- mov x9, x2
- /* Initialize Generic Timers */
- mrs x0, cnthctl_el2
- orr x0, x0, #0x3 /* Enable EL1 access to timers */
- msr cnthctl_el2, x0
- msr cntvoff_el2, xzr
/* Initialize MPID/MPIDR registers */
- mrs x0, midr_el1
- mrs x1, mpidr_el1
- msr vpidr_el2, x0
- msr vmpidr_el2, x1
/* Disable coprocessor traps */
- mov x0, #0x33ff
- msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
msr hstr_el2, xzr /* Disable coprocessor traps
to EL2 */
mov x0, #3 << 20
msr cpacr_el1, x0 /* Enable FP/SIMD at EL1 */
- /* Initialize HCR_EL2 */
- mov x0, #(0 << 31) /* 32bit EL1 */
- orr x0, x0, #(1 << 29) /* Disable HVC */
- msr hcr_el2, x0
- mov x0, #0x0800
- movk x0, #0x30d0, lsl #16
- msr sctlr_el1, x0
- /* Return to AArch32 Supervisor mode */
- mov x0, sp
- msr sp_el1, x0 /* Migrate SP */
- mrs x0, vbar_el2
- msr vbar_el1, x0 /* Migrate VBAR */
- mov x0, #0x1d3
- msr spsr_el2, x0
- msr elr_el2, x7
- mov x0, #0
- mov x1, x8
- mov x2, x9
- eret
+ENDPROC(armv8_switch_to_el1_aarch32)
Does anybody really care about jumping to el1?
[Alison Wang] I am not sure if anybody will jump to el1. Anyway, I provide this support. If anybody want to jump to el1, he can just define CONFIG_ARMV8_SWITCH_TO_EL1.
Well, yes, I'm actually questioning the existence of the define. Why did it get introduced? Is there any case where it's actually sensible?
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 9ae890a..bb87cf0 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -102,6 +102,8 @@ void __asm_switch_ttbr(u64 new_ttbr);
void armv8_switch_to_el2(void); void armv8_switch_to_el1(void); +void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64 +fdt_addr); void armv8_switch_to_el1_aarch32(u64 entry_point, u64 +mach_nr, u64 fdt_addr); void gic_init(void); void gic_send_sgi(unsigned long sgino); void wait_for_wakeup(void); diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index 0838d89..a39c3d2 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -286,8 +286,24 @@ static void boot_jump_linux(bootm_headers_t
*images, int flag)
announce_and_cleanup(fake);
if (!fake) {
do_nonsec_virt_switch();
kernel_entry(images->ft_addr, NULL, NULL, NULL);
if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
(images->os.arch == IH_ARCH_ARM)) {
smp_kick_all_cpus();
dcache_disable();
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
armv8_switch_to_el2();
armv8_switch_to_el1_aarch32((u64)images->ep,
(u64)gd->bd->bi_arch_number,
(u64)images->ft_addr);
+#else
armv8_switch_to_el2_aarch32((u64)images->ep,
(u64)gd->bd->bi_arch_number,
(u64)images->ft_addr);
+#endif
Does this compile on 32bit targets?
[Alison Wang] This compile for 64bit target to support 64-bit U-Boot and 32-bit kernel. It will not affect 32bit targets.
Ah, we're inside an #ifdef CONFIG_ARM64.
Alex

On 16.05.16 07:28, Huan Wang wrote:
On 13.05.16 10:40, Alison Wang wrote:
To support loading a 32-bit OS, the execution state will change from AArch64 to AArch32 when jumping to kernel.
The architecture information will be got through checking FIT image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
Signed-off-by: Ebony Zhu ebony.zhu@nxp.com Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com
arch/arm/cpu/armv8/transition.S | 100
++++++++++++++++++++++++++++++++++++++++
arch/arm/include/asm/system.h | 2 + arch/arm/lib/bootm.c | 20 +++++++- common/image-fit.c | 12 ++++- 4 files changed, 131 insertions(+), 3 deletions(-)
diff --git a/arch/arm/cpu/armv8/transition.S b/arch/arm/cpu/armv8/transition.S index 253a39b..9d7a17a 100644 --- a/arch/arm/cpu/armv8/transition.S +++ b/arch/arm/cpu/armv8/transition.S @@ -21,3 +21,103 @@ ENTRY(armv8_switch_to_el1) 0: ret 1: armv8_switch_to_el1_m x0, x1 ENDPROC(armv8_switch_to_el1)
+/*
- x0: kernel entry point
- x1: machine nr
- x2: fdt address
- */
+ENTRY(armv8_switch_to_el2_aarch32)
- switch_el x3, 1f, 0f, 0f
+0: ret +1:
- mov x7, x0
- mov x8, x1
- mov x9, x2
- /* 32bit EL2 | HCE | SMD | RES1 (Bits[5:4]) | Non-secure EL0/EL1
*/
- mov x1, 0x1b1
- msr scr_el3, x1
- msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
- mov x1, 0x33ff
- msr cptr_el2, x1 /* Disable coprocessor traps to EL2 */
- /* Initialize Generic Timers */
- msr cntvoff_el2, xzr
- mov x1, #0x0830
- movk x1, #0x30c5, lsl #16
- msr sctlr_el2, x1
Why is this necessary?
[Alison Wang] SCTLR_EL2 is architecturally mapped to AArch32 register
HSCTLR.
HSCTLR will provide control of the system operation in Hyp mode.
It still doesn't explain why you move magical values into a random register that is not set in the 64-bit path.
Please make this code more readable :). Try to #define values for the bits that you set. Add comments explaining why you do what you do.
[Alison Wang] This setting is copied from armv8_switch_to_el2_m. The comment will be added.
/* Initialize SCTLR_EL2 * * setting RES1 bits (29,28,23,22,18,16,11,5,4) to 1 * and RES0 bits (31,30,27,26,24,21,20,17,15-13,10-6) + * EE,WXN,I,SA,C,A,M to 0 */
- /* Return to AArch32 Hypervisor mode */
- mov x1, sp
- msr sp_el2, x1
- mrs x1, vbar_el3
- msr vbar_el2, x1 /* Migrate VBAR */
- mov x1, #0x1da
- msr spsr_el3, x1
- msr elr_el3, x7
- mov x0, #0
- mov x1, x8
- mov x2, x9
- eret
+ENDPROC(armv8_switch_to_el2_aarch32)
This whole thing looks like a copy of armv8_switch_to_el2_m. Just parameterize that one and put the few bits that are different in macro ifs.
[Alison Wang] Yes, they are similar because they both switch from EL3
to EL2.
But some bits are different because one switch from AArch64 EL3 to AArch64 EL2 and the other switch from AArch64 EL3 to AArch32 EL2. The parameters need to use too.
Yes, so I think it makes a lot of sense to combine the jump-to-64-bit-el2 and jump-to-32-bit-el2 functions be a single implementation. That way there's less chance an accidental difference creeps in.
[Alison Wang] Ok, I agree it makes sense. I will try to realize it in the next version.
+/*
- x0: kernel entry point
- x1: machine nr
- x2: fdt address
- */
+ENTRY(armv8_switch_to_el1_aarch32)
- switch_el x3, 0f, 1f, 0f
+0: ret +1:
- mov x7, x0
- mov x8, x1
- mov x9, x2
- /* Initialize Generic Timers */
- mrs x0, cnthctl_el2
- orr x0, x0, #0x3 /* Enable EL1 access to timers */
- msr cnthctl_el2, x0
- msr cntvoff_el2, xzr
/* Initialize MPID/MPIDR registers */
- mrs x0, midr_el1
- mrs x1, mpidr_el1
- msr vpidr_el2, x0
- msr vmpidr_el2, x1
/* Disable coprocessor traps */
- mov x0, #0x33ff
- msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
msr hstr_el2, xzr /* Disable coprocessor traps
to EL2 */
mov x0, #3 << 20
msr cpacr_el1, x0 /* Enable FP/SIMD at EL1 */
- /* Initialize HCR_EL2 */
- mov x0, #(0 << 31) /* 32bit EL1 */
- orr x0, x0, #(1 << 29) /* Disable HVC */
- msr hcr_el2, x0
- mov x0, #0x0800
- movk x0, #0x30d0, lsl #16
- msr sctlr_el1, x0
- /* Return to AArch32 Supervisor mode */
- mov x0, sp
- msr sp_el1, x0 /* Migrate SP */
- mrs x0, vbar_el2
- msr vbar_el1, x0 /* Migrate VBAR */
- mov x0, #0x1d3
- msr spsr_el2, x0
- msr elr_el2, x7
- mov x0, #0
- mov x1, x8
- mov x2, x9
- eret
+ENDPROC(armv8_switch_to_el1_aarch32)
Does anybody really care about jumping to el1?
[Alison Wang] I am not sure if anybody will jump to el1. Anyway, I provide this support. If anybody want to jump to el1, he can just define CONFIG_ARMV8_SWITCH_TO_EL1.
Well, yes, I'm actually questioning the existence of the define. Why did it get introduced? Is there any case where it's actually sensible?
[Alison Wang] This define is introduced a long time ago and there is armv8_switch_to_el1_m for it. I think it makes sense, because some users want to switch from EL3 to EL1 in U-Boot.
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index 9ae890a..bb87cf0 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -102,6 +102,8 @@ void __asm_switch_ttbr(u64 new_ttbr);
void armv8_switch_to_el2(void); void armv8_switch_to_el1(void); +void armv8_switch_to_el2_aarch32(u64 entry_point, u64 mach_nr, u64 +fdt_addr); void armv8_switch_to_el1_aarch32(u64 entry_point, u64 +mach_nr, u64 fdt_addr); void gic_init(void); void gic_send_sgi(unsigned long sgino); void wait_for_wakeup(void); diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index 0838d89..a39c3d2 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -286,8 +286,24 @@ static void boot_jump_linux(bootm_headers_t
*images, int flag)
announce_and_cleanup(fake);
if (!fake) {
do_nonsec_virt_switch();
kernel_entry(images->ft_addr, NULL, NULL, NULL);
if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) &&
(images->os.arch == IH_ARCH_ARM)) {
smp_kick_all_cpus();
dcache_disable();
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
armv8_switch_to_el2();
armv8_switch_to_el1_aarch32((u64)images->ep,
(u64)gd->bd->bi_arch_number,
(u64)images->ft_addr);
+#else
armv8_switch_to_el2_aarch32((u64)images->ep,
(u64)gd->bd->bi_arch_number,
(u64)images->ft_addr);
+#endif
Does this compile on 32bit targets?
[Alison Wang] This compile for 64bit target to support 64-bit U-Boot and 32-bit kernel. It will not affect 32bit targets.
Ah, we're inside an #ifdef CONFIG_ARM64.
[Alison Wang] Yes.
Best Regards, Alison Wang

On 05/13/2016 01:50 AM, Alison Wang wrote:
To support loading a 32-bit OS, the execution state will change from AArch64 to AArch32 when jumping to kernel.
The architecture information will be got through checking FIT image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
Signed-off-by: Ebony Zhu ebony.zhu@nxp.com Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com
arch/arm/cpu/armv8/transition.S | 100 ++++++++++++++++++++++++++++++++++++++++ arch/arm/include/asm/system.h | 2 + arch/arm/lib/bootm.c | 20 +++++++- common/image-fit.c | 12 ++++- 4 files changed, 131 insertions(+), 3 deletions(-)
If you repsin the patch for any reason, please remember to add correct version number and change log.
York

On 05/13/2016 01:50 AM, Alison Wang wrote:
To support loading a 32-bit OS, the execution state will change from AArch64 to AArch32 when jumping to kernel.
The architecture information will be got through checking FIT image, then U-Boot will load 32-bit OS or 64-bit OS automatically.
Signed-off-by: Ebony Zhu ebony.zhu@nxp.com Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com
arch/arm/cpu/armv8/transition.S | 100
++++++++++++++++++++++++++++++++++++++++
arch/arm/include/asm/system.h | 2 + arch/arm/lib/bootm.c | 20 +++++++- common/image-fit.c | 12 ++++- 4 files changed, 131 insertions(+), 3 deletions(-)
If you repsin the patch for any reason, please remember to add correct version number and change log.
[Alison Wang] Yes. This is the first version I sent to upstream, I will add Version number and change log when I send the next version.
Thanks.
Best Regards, Alison Wang

Spin-table method is used for secondary cores to load 32-bit OS. The architecture information will be got through checking FIT image and saved in the os_arch element of spin-table, then the secondary cores will check os_arch and jump to 32-bit OS or 64-bit OS automatically.
Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com --- arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S | 21 +++++++++++++++++++++ arch/arm/cpu/armv8/fsl-layerscape/mp.c | 10 ++++++++++ arch/arm/include/asm/arch-fsl-layerscape/mp.h | 6 ++++++ arch/arm/lib/bootm.c | 5 +++++ 4 files changed, 42 insertions(+)
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S index 04831ca..85d1d4b 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S +++ b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S @@ -13,6 +13,7 @@ #ifdef CONFIG_MP #include <asm/arch/mp.h> #endif +#include <asm/u-boot.h>
ENTRY(lowlevel_init) mov x29, lr /* Save LR */ @@ -320,6 +321,11 @@ ENTRY(secondary_boot_func) gic_wait_for_interrupt_m x0, w1 #endif
+ ldr x5, [x11, #24] + ldr x6, =IH_ARCH_DEFAULT + cmp x6, x5 + b.ne slave_cpu + bl secondary_switch_to_el2 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1 bl secondary_switch_to_el1 @@ -337,6 +343,21 @@ slave_cpu: tbz x1, #25, cpu_is_le rev x0, x0 /* BE to LE conversion */ cpu_is_le: + + ldr x5, [x11, #24] + ldr x6, =IH_ARCH_DEFAULT + cmp x6, x5 + b.eq 1f + +#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 + bl secondary_switch_to_el2 + ldr x0, [x11] + bl armv8_switch_to_el1_aarch32 +#else + bl armv8_switch_to_el2_aarch32 +#endif + +1: br x0 /* branch to the given address */ ENDPROC(secondary_boot_func)
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/mp.c b/arch/arm/cpu/armv8/fsl-layerscape/mp.c index df7ffb8..dd91550 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/mp.c +++ b/arch/arm/cpu/armv8/fsl-layerscape/mp.c @@ -22,6 +22,16 @@ phys_addr_t determine_mp_bootpg(void) return (phys_addr_t)&secondary_boot_code; }
+void update_os_arch_secondary_cores(uint8_t os_arch) +{ + u64 *table = get_spin_tbl_addr(); + int i; + + for (i = 1; i < CONFIG_MAX_CPUS; i++) + table[i * WORDS_PER_SPIN_TABLE_ENTRY + + SPIN_TABLE_ELEM_OS_ARCH_IDX] = os_arch; +} + int fsl_layerscape_wake_seconday_cores(void) { struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); diff --git a/arch/arm/include/asm/arch-fsl-layerscape/mp.h b/arch/arm/include/asm/arch-fsl-layerscape/mp.h index e46e076..55f0e0c 100644 --- a/arch/arm/include/asm/arch-fsl-layerscape/mp.h +++ b/arch/arm/include/asm/arch-fsl-layerscape/mp.h @@ -13,6 +13,7 @@ * uint64_t entry_addr; * uint64_t status; * uint64_t lpid; +* uint64_t os_arch; * }; * we pad this struct to 64 bytes so each entry is in its own cacheline * the actual spin table is an array of these structures @@ -20,6 +21,7 @@ #define SPIN_TABLE_ELEM_ENTRY_ADDR_IDX 0 #define SPIN_TABLE_ELEM_STATUS_IDX 1 #define SPIN_TABLE_ELEM_LPID_IDX 2 +#define SPIN_TABLE_ELEM_OS_ARCH_IDX 3 #define WORDS_PER_SPIN_TABLE_ENTRY 8 /* pad to 64 bytes */ #define SPIN_TABLE_ELEM_SIZE 64
@@ -35,4 +37,8 @@ phys_addr_t determine_mp_bootpg(void); void secondary_boot_func(void); int is_core_online(u64 cpu_id); #endif + +#define IH_ARCH_ARM 2 /* ARM */ +#define IH_ARCH_ARM64 22 /* ARM64 */ + #endif /* _FSL_LAYERSCAPE_MP_H */ diff --git a/arch/arm/lib/bootm.c b/arch/arm/lib/bootm.c index a39c3d2..0e7e321 100644 --- a/arch/arm/lib/bootm.c +++ b/arch/arm/lib/bootm.c @@ -268,6 +268,10 @@ bool armv7_boot_nonsec(void) } #endif
+__weak void update_os_arch_secondary_cores(uint8_t os_arch) +{ +} + /* Subcommand: GO */ static void boot_jump_linux(bootm_headers_t *images, int flag) { @@ -286,6 +290,7 @@ static void boot_jump_linux(bootm_headers_t *images, int flag) announce_and_cleanup(fake);
if (!fake) { + update_os_arch_secondary_cores(images->os.arch); if ((IH_ARCH_DEFAULT == IH_ARCH_ARM64) && (images->os.arch == IH_ARCH_ARM)) { smp_kick_all_cpus();

On 13.05.16 10:40, Alison Wang wrote:
Spin-table method is used for secondary cores to load 32-bit OS. The architecture information will be got through checking FIT image and saved in the os_arch element of spin-table, then the secondary cores will check os_arch and jump to 32-bit OS or 64-bit OS automatically.
Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com
arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S | 21 +++++++++++++++++++++ arch/arm/cpu/armv8/fsl-layerscape/mp.c | 10 ++++++++++ arch/arm/include/asm/arch-fsl-layerscape/mp.h | 6 ++++++ arch/arm/lib/bootm.c | 5 +++++ 4 files changed, 42 insertions(+)
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S index 04831ca..85d1d4b 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S +++ b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S @@ -13,6 +13,7 @@ #ifdef CONFIG_MP #include <asm/arch/mp.h> #endif +#include <asm/u-boot.h>
ENTRY(lowlevel_init) mov x29, lr /* Save LR */ @@ -320,6 +321,11 @@ ENTRY(secondary_boot_func) gic_wait_for_interrupt_m x0, w1 #endif
- ldr x5, [x11, #24]
- ldr x6, =IH_ARCH_DEFAULT
- cmp x6, x5
- b.ne slave_cpu
- bl secondary_switch_to_el2
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 bl secondary_switch_to_el1 @@ -337,6 +343,21 @@ slave_cpu: tbz x1, #25, cpu_is_le rev x0, x0 /* BE to LE conversion */ cpu_is_le:
- ldr x5, [x11, #24]
- ldr x6, =IH_ARCH_DEFAULT
- cmp x6, x5
- b.eq 1f
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
- bl secondary_switch_to_el2
- ldr x0, [x11]
- bl armv8_switch_to_el1_aarch32
+#else
- bl armv8_switch_to_el2_aarch32
+#endif
Ah, so conditionally you also need to invoke the aarch64 variant. Why not just make it an actual runtime parameter to the existing function?
Also as a side remark, the "clean" solution would obviously be to support PSCI and just check which mode the caller was in.
Alex

On 05/13/2016 04:15 AM, Alexander Graf wrote:
On 13.05.16 10:40, Alison Wang wrote:
Spin-table method is used for secondary cores to load 32-bit OS. The architecture information will be got through checking FIT image and saved in the os_arch element of spin-table, then the secondary cores will check os_arch and jump to 32-bit OS or 64-bit OS automatically.
Signed-off-by: Alison Wang alison.wang@nxp.com Signed-off-by: Chenhui Zhao chenhui.zhao@nxp.com
arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S | 21 +++++++++++++++++++++ arch/arm/cpu/armv8/fsl-layerscape/mp.c | 10 ++++++++++ arch/arm/include/asm/arch-fsl-layerscape/mp.h | 6 ++++++ arch/arm/lib/bootm.c | 5 +++++ 4 files changed, 42 insertions(+)
diff --git a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S index 04831ca..85d1d4b 100644 --- a/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S +++ b/arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S @@ -13,6 +13,7 @@ #ifdef CONFIG_MP #include <asm/arch/mp.h> #endif +#include <asm/u-boot.h>
ENTRY(lowlevel_init) mov x29, lr /* Save LR */ @@ -320,6 +321,11 @@ ENTRY(secondary_boot_func) gic_wait_for_interrupt_m x0, w1 #endif
- ldr x5, [x11, #24]
- ldr x6, =IH_ARCH_DEFAULT
- cmp x6, x5
- b.ne slave_cpu
- bl secondary_switch_to_el2
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 bl secondary_switch_to_el1 @@ -337,6 +343,21 @@ slave_cpu: tbz x1, #25, cpu_is_le rev x0, x0 /* BE to LE conversion */ cpu_is_le:
- ldr x5, [x11, #24]
- ldr x6, =IH_ARCH_DEFAULT
- cmp x6, x5
- b.eq 1f
+#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
- bl secondary_switch_to_el2
- ldr x0, [x11]
- bl armv8_switch_to_el1_aarch32
+#else
- bl armv8_switch_to_el2_aarch32
+#endif
Ah, so conditionally you also need to invoke the aarch64 variant. Why not just make it an actual runtime parameter to the existing function?
Also as a side remark, the "clean" solution would obviously be to support PSCI and just check which mode the caller was in.
Alex,
Let's do it one step at a time. The patches for PSCI are under review.
York
participants (4)
-
Alexander Graf
-
Alison Wang
-
Huan Wang
-
York Sun