
Using E6500 L1 cache as initram requires L2 cache enabled. Add l2-cache cluster enabling.
Signed-off-by: York Sun yorksun@freescale.com Signed-off-by: Kumar Gala galak@kernel.crashing.org --- arch/powerpc/cpu/mpc85xx/cpu_init.c | 39 ++++++++++++++++++++++- arch/powerpc/cpu/mpc85xx/start.S | 57 +++++++++++++++++++++++++++++++++ arch/powerpc/include/asm/immap_85xx.h | 43 +++++++++++++++++++++++++ 3 files changed, 138 insertions(+), 1 deletions(-)
diff --git a/arch/powerpc/cpu/mpc85xx/cpu_init.c b/arch/powerpc/cpu/mpc85xx/cpu_init.c index 2c78905..1a2858a 100644 --- a/arch/powerpc/cpu/mpc85xx/cpu_init.c +++ b/arch/powerpc/cpu/mpc85xx/cpu_init.c @@ -309,6 +309,34 @@ static void __fsl_serdes__init(void) } __attribute__((weak, alias("__fsl_serdes__init"))) void fsl_serdes_init(void);
+#ifdef CONFIG_E6500 +int enable_cluster_l2(void) +{ + int i = 0; + u32 cluster; + ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); + struct ccsr_cluster_l2 *l2cache; + + cluster = in_be32(&gur->tp_cluster[i++].lower); + if (cluster & TP_CLUSTER_EOC) + return 0; + + do { + l2cache = (void *)(CONFIG_SYS_FSL_CLUSTER_1_L2 + i * 0x40000); + cluster = in_be32(&gur->tp_cluster[i++].lower); + + printf("enable l2 for cluster %d %p\n", i, l2cache); + + out_be32(&l2cache->l2csr0, L2CSR0_L2FI|L2CSR0_L2LFC); + while ((in_be32(&l2cache->l2csr0) & (L2CSR0_L2FI|L2CSR0_L2LFC)) != 0) + ; + out_be32(&l2cache->l2csr0, L2CSR0_L2E); + } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC); + + return 0; +} +#endif + /* * Initialize L2 as cache. * @@ -322,6 +350,11 @@ int cpu_init_r(void) #ifdef CONFIG_SYS_LBC_LCRR volatile fsl_lbc_t *lbc = LBC_BASE_ADDR; #endif +#ifdef CONFIG_L2_CACHE + volatile ccsr_l2cache_t *l2cache = (void *)CONFIG_SYS_MPC85xx_L2_ADDR; +#elif defined(CONFIG_E6500) + struct ccsr_cluster_l2 * l2cache = (void *)CONFIG_SYS_FSL_CLUSTER_1_L2; +#endif
#if defined(CONFIG_SYS_P4080_ERRATUM_CPU22) || \ defined(CONFIG_SYS_FSL_ERRATUM_NMG_CPU_A011) @@ -363,7 +396,6 @@ int cpu_init_r(void) puts ("L2: ");
#if defined(CONFIG_L2_CACHE) - volatile ccsr_l2cache_t *l2cache = (void *)CONFIG_SYS_MPC85xx_L2_ADDR; volatile uint cache_ctl; uint ver; u32 l2siz_field; @@ -474,6 +506,11 @@ int cpu_init_r(void) }
skip_l2: +#elif defined(CONFIG_E6500) + if (l2cache->l2csr0 & L2CSR0_L2E) + printf("%d KB enabled\n", (l2cache->l2cfg0 & 0x3fff) * 64); + + enable_cluster_l2(); #else puts("disabled\n"); #endif diff --git a/arch/powerpc/cpu/mpc85xx/start.S b/arch/powerpc/cpu/mpc85xx/start.S index 2e1d265..739127f 100644 --- a/arch/powerpc/cpu/mpc85xx/start.S +++ b/arch/powerpc/cpu/mpc85xx/start.S @@ -762,6 +762,63 @@ delete_temp_tlbs: tlbwe #endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
+#ifdef CONFIG_E6500 +create_ccsr_l2_tlb: + /* + * Create a TLB for the MMR location of CCSR + * to access L2CSR0 register + */ + lis r0, FSL_BOOKE_MAS0(0, 0, 0)@h + ori r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l + lis r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@h + ori r1, r1, FSL_BOOKE_MAS1(1, 0, 0, 0, BOOKE_PAGESZ_4K)@l + lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0xC20000, (MAS2_I|MAS2_G))@h + ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR + 0xC20000, (MAS2_I|MAS2_G))@l + lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, 0, (MAS3_SW|MAS3_SR))@h + ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, 0, (MAS3_SW|MAS3_SR))@l + lis r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h + ori r7, r7, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l + mtspr MAS0, r0 + mtspr MAS1, r1 + mtspr MAS2, r2 + mtspr MAS3, r3 + mtspr MAS7, r7 + isync + msync + tlbwe +enable_l2_e6500: + /* enable L2 cache */ + lis r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h + ori r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l + lis r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h + ori r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l + sync + stw r4, 0(r3) /* invalidate L2 */ +1: sync + lwz r0, 0(r3) + twi 0, r0, 0 + isync + and. r1, r0, r4 + bne 1b + lis r4, L2CSR0_L2E@h + sync + stw r4, 0(r3) /* eanble L2 */ +delete_ccsr_l2_tlb: + lis r0, FSL_BOOKE_MAS0(0, 0, 0)@h + ori r0, r0, FSL_BOOKE_MAS0(0, 0, 0)@l + li r1, 0 + lis r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@h + ori r2, r2, FSL_BOOKE_MAS2(CONFIG_SYS_CCSRBAR, (MAS2_I|MAS2_G))@l + mtspr MAS0, r0 + mtspr MAS1, r1 + mtspr MAS2, r2 + isync + msync + tlbwe + li r3, 0 + mtspr MAS7, r3 +#endif + #ifdef CONFIG_SYS_FSL_ERRATUM_A004510 #define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000) #define LAW_SIZE_1M 0x13 diff --git a/arch/powerpc/include/asm/immap_85xx.h b/arch/powerpc/include/asm/immap_85xx.h index 7ce27db..62d8d0b 100644 --- a/arch/powerpc/include/asm/immap_85xx.h +++ b/arch/powerpc/include/asm/immap_85xx.h @@ -2789,6 +2789,7 @@ typedef struct ccsr_snvs_regs { #define CONFIG_SYS_FSL_FM2_RX4_1G_OFFSET 0x58c000 #define CONFIG_SYS_FSL_FM2_RX0_10G_OFFSET 0x590000 #define CONFIG_SYS_FSL_PAMU1_OFFSET 0x21000 +#define CONFIG_SYS_FSL_CLUSTER_1_L2_OFFSET 0xC20000 #else #define CONFIG_SYS_MPC85xx_ECM_OFFSET 0x0000 #define CONFIG_SYS_MPC85xx_DDR_OFFSET 0x2000 @@ -2946,4 +2947,46 @@ typedef struct ccsr_snvs_regs { #define TSEC_BASE_ADDR (CONFIG_SYS_IMMR + CONFIG_SYS_TSEC1_OFFSET) #define MDIO_BASE_ADDR (CONFIG_SYS_IMMR + CONFIG_SYS_MDIO1_OFFSET)
+#ifdef CONFIG_E6500 +struct ccsr_cluster_l2 { + u32 l2csr0; /* 0x000 L2 cache control and status register 0 */ + u32 l2csr1; /* 0x004 L2 cache control and status register 1 */ + u32 l2cfg0; /* 0x008 L2 cache configuration register 0 */ + u8 res_0c[500];/* 0x00c - 0x1ff */ + u32 l2pir0; /* 0x200 L2 cache partitioning ID register 0 */ + u8 res_204[4]; + u32 l2par0; /* 0x208 L2 cache partitioning allocation register 0 */ + u32 l2pwr0; /* 0x20c L2 cache partitioning way register 0 */ + u32 l2pir1; /* 0x210 L2 cache partitioning ID register 1 */ + u8 res_214[4]; + u32 l2par1; /* 0x218 L2 cache partitioning allocation register 1 */ + u32 l2pwr1; /* 0x21c L2 cache partitioning way register 1 */ + u32 u2pir2; /* 0x220 L2 cache partitioning ID register 2 */ + u8 res_224[4]; + u32 l2par2; /* 0x228 L2 cache partitioning allocation register 2 */ + u32 l2pwr2; /* 0x22c L2 cache partitioning way register 2 */ + u32 l2pir3; /* 0x230 L2 cache partitioning ID register 3 */ + u8 res_234[5]; + u32 l2par3; /* 0x238 L2 cache partitining allocation register 3 */ + u32 l2pwr3; /* 0x23c L2 cache partitining way register 3 */ + u8 res_240[3008]; /* 0x240 - 0xdff */ + u32 l2errinjhi; /* 0xe00 L2 cache error injection mask high */ + u32 l2errinjlo; /* 0xe04 L2 cache error injection mask low */ + u32 l2errinjctl;/* 0xe08 L2 cache error injection control */ + u8 res_e0c[20]; /* 0xe0c - 0x01f */ + u32 l2captdatahi; /* 0xe20 L2 cache error capture data high */ + u32 l2captdatalo; /* 0xe24 L2 cache error capture data low */ + u32 l2captecc; /* 0xe28 L2 cache error capture ECC syndrome */ + u8 res_e2c[20]; /* 0xe2c - 0xe3f */ + u32 l2errdet; /* 0xe40 L2 cache error detect */ + u32 l2errdis; /* 0xe44 L2 cache error disable */ + u32 l2errinten; /* 0xe48 L2 cache error interrupt enable */ + u32 l2errattr; /* 0xe4c L2 cache error attribute */ + u32 l2erreaddr; /* 0xe50 L2 cache error extended address */ + u32 l2erraddr; /* 0xe54 L2 cache error address */ + u32 l2errctl; /* 0xe58 L2 cache error control */ +}; +#define CONFIG_SYS_FSL_CLUSTER_1_L2 \ + (CONFIG_SYS_IMMR + CONFIG_SYS_FSL_CLUSTER_1_L2_OFFSET) +#endif /* CONFIG_E6500 */ #endif /*__IMMAP_85xx__*/