
On 8/19/21 4:53 PM, Peter Hoyes wrote:
Hi,
From: Peter Hoyes Peter.Hoyes@arm.com
Armv8r64 is the first Armv8 platform that only has a PMSA at the current exception level. The architecture supplement for Armv8r64 describes new fields in ID_AA64MMFR0_EL1 which can be used to detect whether a VMSA or PMSA is present. These fields are RES0 on Armv8a.
Add logic to read these fields and, for the protection of the memory used by U-Boot, initialize the MPU instead of the MMU during init, then clear the MPU regions before transition to the next stage.
Provide a default (blank) MPU memory map, which can be overridden by board configurations.
So while this MPU feature is indeed architecturally discoverable, and that works fine even on existing ARMv8-A cores, this adds some code to the generic ARMv8 code path, which almost no one needs. So to avoid code bloat, I was wondering if we should move this code to a separate file and only include it when a new Kconfig symbol (for v8-R64) is defined? The new board can then select this symbol. We can then have either weak functions or static inline versions of the new functions (el_has_mmu(), mpu_setup()), to keep the existing code path.
Cheers, Andre
Signed-off-by: Peter Hoyes Peter.Hoyes@arm.com
arch/arm/cpu/armv8/cache_v8.c | 96 +++++++++++++++++++++++++++++++- arch/arm/include/asm/armv8/mpu.h | 61 ++++++++++++++++++++ 2 files changed, 154 insertions(+), 3 deletions(-) create mode 100644 arch/arm/include/asm/armv8/mpu.h
diff --git a/arch/arm/cpu/armv8/cache_v8.c b/arch/arm/cpu/armv8/cache_v8.c index 3de18c7675..46625675bd 100644 --- a/arch/arm/cpu/armv8/cache_v8.c +++ b/arch/arm/cpu/armv8/cache_v8.c @@ -15,6 +15,7 @@ #include <asm/global_data.h> #include <asm/system.h> #include <asm/armv8/mmu.h> +#include <asm/armv8/mpu.h>
DECLARE_GLOBAL_DATA_PTR;
@@ -365,6 +366,86 @@ __weak u64 get_page_table_size(void) return size; }
+static void mpu_clear_regions(void) +{
- int i;
- for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) {
setup_el2_mpu_region(i, 0, 0);
- }
+}
+static struct mpu_region default_mpu_mem_map[] = {{0,}}; +__weak struct mpu_region *mpu_mem_map = default_mpu_mem_map;
+static void mpu_setup(void) +{
- int i;
- if (current_el() != 2) {
panic("MPU configuration is only supported at EL2");
- }
- set_sctlr(get_sctlr() & ~(CR_M | CR_WXN));
- asm volatile("msr MAIR_EL2, %0" : : "r" MEMORY_ATTRIBUTES);
- for (i = 0; mpu_mem_map[i].end || mpu_mem_map[i].attrs; i++) {
setup_el2_mpu_region(i,
PRBAR_ADDRESS(mpu_mem_map[i].start)
| PRBAR_OUTER_SH | PRBAR_AP_RW_ANY,
PRLAR_ADDRESS(mpu_mem_map[i].end)
| mpu_mem_map[i].attrs | PRLAR_EN_BIT
);
- }
- set_sctlr(get_sctlr() | CR_M);
+}
+static bool el_has_mmu(void) +{
- uint64_t id_aa64mmfr0;
- asm volatile("mrs %0, id_aa64mmfr0_el1"
: "=r" (id_aa64mmfr0) : : "cc");
- uint64_t msa = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_MASK;
- uint64_t msa_frac = id_aa64mmfr0 & ID_AA64MMFR0_EL1_MSA_FRAC_MASK;
- switch (msa) {
case ID_AA64MMFR0_EL1_MSA_VMSA:
/*
* VMSA supported in all translation regimes.
* No support for PMSA.
*/
return true;
case ID_AA64MMFR0_EL1_MSA_USE_FRAC:
/* See MSA_frac for the supported MSAs. */
switch (msa_frac) {
case ID_AA64MMFR0_EL1_MSA_FRAC_NO_PMSA:
/*
* PMSA not supported in any translation
* regime.
*/
return true;
case ID_AA64MMFR0_EL1_MSA_FRAC_VMSA:
/*
* PMSA supported in all translation
* regimes. No support for VMSA.
*/
case ID_AA64MMFR0_EL1_MSA_FRAC_PMSA:
/*
* PMSA supported in all translation
* regimes.
*/
return false;
default:
panic("Unsupported id_aa64mmfr0_el1 " \
"MSA_frac value");
}
default:
panic("Unsupported id_aa64mmfr0_el1 MSA value");
- }
+}
- void setup_pgtables(void) { int i;
@@ -479,8 +560,13 @@ void dcache_enable(void) /* The data cache is not active unless the mmu is enabled */ if (!(get_sctlr() & CR_M)) { invalidate_dcache_all();
__asm_invalidate_tlb_all();
mmu_setup();
if (el_has_mmu()) {
__asm_invalidate_tlb_all();
mmu_setup();
} else {
mpu_setup();
}
}
set_sctlr(get_sctlr() | CR_C);
@@ -499,7 +585,11 @@ void dcache_disable(void) set_sctlr(sctlr & ~(CR_C|CR_M));
flush_dcache_all();
- __asm_invalidate_tlb_all();
if (el_has_mmu())
__asm_invalidate_tlb_all();
else
mpu_clear_regions();
}
int dcache_status(void)
diff --git a/arch/arm/include/asm/armv8/mpu.h b/arch/arm/include/asm/armv8/mpu.h new file mode 100644 index 0000000000..c6c8828325 --- /dev/null +++ b/arch/arm/include/asm/armv8/mpu.h @@ -0,0 +1,61 @@ +/*
- SPDX-License-Identifier: GPL-2.0+
- (C) Copyright 2021 Arm Limited
- */
+#ifndef _ASM_ARMV8_MPU_H_ +#define _ASM_ARMV8_MPU_H_
+#include <asm/armv8/mmu.h> +#include <asm/barriers.h> +#include <linux/stringify.h>
+#define PRSELR_EL2 S3_4_c6_c2_1 +#define PRBAR_EL2 S3_4_c6_c8_0 +#define PRLAR_EL2 S3_4_c6_c8_1 +#define MPUIR_EL2 S3_4_c0_c0_4
+#define PRBAR_ADDRESS(addr) ((addr) & ~(0x3fULL))
+/* Access permissions */ +#define PRBAR_AP(val) (((val) & 0x3) << 2) +#define PRBAR_AP_RW_HYP PRBAR_AP(0x0) +#define PRBAR_AP_RW_ANY PRBAR_AP(0x1) +#define PRBAR_AP_RO_HYP PRBAR_AP(0x2) +#define PRBAR_AP_RO_ANY PRBAR_AP(0x3)
+/* Shareability */ +#define PRBAR_SH(val) (((val) & 0x3) << 4) +#define PRBAR_NON_SH PRBAR_SH(0x0) +#define PRBAR_OUTER_SH PRBAR_SH(0x2) +#define PRBAR_INNER_SH PRBAR_SH(0x3)
+/* Memory attribute (MAIR idx) */ +#define PRLAR_ATTRIDX(val) (((val) & 0x7) << 1) +#define PRLAR_EN_BIT (0x1) +#define PRLAR_ADDRESS(addr) ((addr) & ~(0x3fULL))
+#ifndef __ASSEMBLY__
+static inline void setup_el2_mpu_region(uint8_t region, uint64_t base, uint64_t limit) +{
- asm volatile("msr " __stringify(PRSELR_EL2) ", %0" : : "r" (region));
- isb();
- asm volatile("msr " __stringify(PRBAR_EL2) ", %0" : : "r" (base));
- asm volatile("msr " __stringify(PRLAR_EL2) ", %0" : : "r" (limit));
- dsb();
- isb();
+}
+#endif
+struct mpu_region {
- u64 start;
- u64 end;
- u64 attrs;
+};
+extern struct mpu_region *mpu_mem_map;
+#endif /* _ASM_ARMV8_MPU_H_ */