[U-Boot] [PATCH V2 0/3] OMAP5/DRA7: EMIF fixes for lowpower usecases

1) Currently the DDR3 memory on DRA7 ES1.0 evm board is enabled using software leveling. This was done since hardware leveling was not working. Now that the right sequence to do hw leveling is identified, use it. This is required for EMIF clockdomain to idle and come back during lowpower usecases
2) When core power domain hits oswr, then DDR3 memories does not come back while resuming. This is because when EMIF registers are lost, then the controller takes care of copying the values from the shadow registers. If the shadow registers are not updated with the right values, then this results in incorrect settings while resuming. So updating the shadow registers with the corresponding status registers here during the boot.
Sricharan R (3): ARM: DRA7: Add is_dra7xx cpu check definition ARM: DRA: EMIF: Change DDR3 settings to use hw leveling ARM: DRA7/OMAP5: EMIF: Add workaround for bug 0039
arch/arm/cpu/armv7/omap-common/emif-common.c | 142 +++++++++++++---- arch/arm/cpu/armv7/omap5/hw_data.c | 9 +- arch/arm/cpu/armv7/omap5/hwinit.c | 12 +- arch/arm/cpu/armv7/omap5/sdram.c | 214 ++++++++++++++++++-------- arch/arm/include/asm/arch-omap5/omap.h | 1 + arch/arm/include/asm/emif.h | 14 +- arch/arm/include/asm/omap_common.h | 8 + 7 files changed, 301 insertions(+), 99 deletions(-)

A generic is_dra7xx cpu check is useful for grouping all the revisions under that. This is used in the subsequent patches.
Signed-off-by: Sricharan R r.sricharan@ti.com --- arch/arm/include/asm/omap_common.h | 8 ++++++++ 1 file changed, 8 insertions(+)
diff --git a/arch/arm/include/asm/omap_common.h b/arch/arm/include/asm/omap_common.h index 3a998cc..3655e5a 100644 --- a/arch/arm/include/asm/omap_common.h +++ b/arch/arm/include/asm/omap_common.h @@ -600,6 +600,14 @@ static inline u8 is_omap54xx(void) extern u32 *const omap_si_rev; return ((*omap_si_rev & 0xFF000000) == OMAP54xx); } + +#define DRA7XX 0x07000000 + +static inline u8 is_dra7xx(void) +{ + extern u32 *const omap_si_rev; + return ((*omap_si_rev & 0xFF000000) == DRA7XX); +} #endif
/*

On Fri, Nov 08, 2013 at 05:40:36PM +0530, SRICHARAN R wrote:
A generic is_dra7xx cpu check is useful for grouping all the revisions under that. This is used in the subsequent patches.
Signed-off-by: Sricharan R r.sricharan@ti.com
Applied to u-boot-ti/master, thanks!

Currently the DDR3 memory on DRA7 ES1.0 evm board is enabled using software leveling. This was done since hardware leveling was not working. Now that the right sequence to do hw leveling is identified, use it. This is required for EMIF clockdomain to idle and come back during lowpower usecases.
Signed-off-by: Sricharan R r.sricharan@ti.com --- arch/arm/cpu/armv7/omap-common/emif-common.c | 100 +++++++++++++----- arch/arm/cpu/armv7/omap5/hw_data.c | 9 +- arch/arm/cpu/armv7/omap5/hwinit.c | 12 ++- arch/arm/cpu/armv7/omap5/sdram.c | 146 +++++++++++++++----------- arch/arm/include/asm/arch-omap5/omap.h | 1 + arch/arm/include/asm/emif.h | 4 +- 6 files changed, 174 insertions(+), 98 deletions(-)
diff --git a/arch/arm/cpu/armv7/omap-common/emif-common.c b/arch/arm/cpu/armv7/omap-common/emif-common.c index b0e1caa..dc1ea1d 100644 --- a/arch/arm/cpu/armv7/omap-common/emif-common.c +++ b/arch/arm/cpu/armv7/omap-common/emif-common.c @@ -206,7 +206,7 @@ void emif_update_timings(u32 base, const struct emif_regs *regs) } }
-static void ddr3_leveling(u32 base, const struct emif_regs *regs) +static void omap5_ddr3_leveling(u32 base, const struct emif_regs *regs) { struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
@@ -217,47 +217,86 @@ static void ddr3_leveling(u32 base, const struct emif_regs *regs)
/* * Set invert_clkout (if activated)--DDR_PHYCTRL_1 - * Invert clock adds an additional half cycle delay on the command - * interface. The additional half cycle, is usually meant to enable - * leveling in the situation that DQS is later than CK on the board.It - * also helps provide some additional margin for leveling. + * Invert clock adds an additional half cycle delay on the + * command interface. The additional half cycle, is usually + * meant to enable leveling in the situation that DQS is later + * than CK on the board.It also helps provide some additional + * margin for leveling. */ - writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1); - writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw); + writel(regs->emif_ddr_phy_ctlr_1, + &emif->emif_ddr_phy_ctrl_1); + + writel(regs->emif_ddr_phy_ctlr_1, + &emif->emif_ddr_phy_ctrl_1_shdw); __udelay(130);
writel(((LP_MODE_DISABLE << EMIF_REG_LP_MODE_SHIFT) - & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl); + & EMIF_REG_LP_MODE_MASK), &emif->emif_pwr_mgmt_ctrl);
/* Launch Full leveling */ writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl);
/* Wait till full leveling is complete */ readl(&emif->emif_rd_wr_lvl_ctl); - __udelay(130); + __udelay(130);
/* Read data eye leveling no of samples */ config_data_eye_leveling_samples(base);
- /* Launch 8 incremental WR_LVL- to compensate for PHY limitation */ - writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT, &emif->emif_rd_wr_lvl_ctl); + /* + * Launch 8 incremental WR_LVL- to compensate for + * PHY limitation. + */ + writel(0x2 << EMIF_REG_WRLVLINC_INT_SHIFT, + &emif->emif_rd_wr_lvl_ctl); + __udelay(130);
/* Launch Incremental leveling */ writel(DDR3_INC_LVL, &emif->emif_rd_wr_lvl_ctl); - __udelay(130); + __udelay(130); }
-static void ddr3_sw_leveling(u32 base, const struct emif_regs *regs) +static void dra7_ddr3_leveling(u32 base, const struct emif_regs *regs) { struct emif_reg_struct *emif = (struct emif_reg_struct *)base;
- writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1); - writel(regs->emif_ddr_phy_ctlr_1, &emif->emif_ddr_phy_ctrl_1_shdw); + u32 fifo_reg; + + fifo_reg = readl(&emif->emif_ddr_fifo_misaligned_clear_1); + writel(fifo_reg | 0x00000100, + &emif->emif_ddr_fifo_misaligned_clear_1); + + fifo_reg = readl(&emif->emif_ddr_fifo_misaligned_clear_2); + writel(fifo_reg | 0x00000100, + &emif->emif_ddr_fifo_misaligned_clear_2); + + /* Launch Full leveling */ + writel(DDR3_FULL_LVL, &emif->emif_rd_wr_lvl_ctl); + + /* Wait till full leveling is complete */ + readl(&emif->emif_rd_wr_lvl_ctl); + __udelay(130); + + /* Read data eye leveling no of samples */ config_data_eye_leveling_samples(base);
- writel(regs->emif_rd_wr_lvl_ctl, &emif->emif_rd_wr_lvl_ctl); - writel(regs->sdram_config, &emif->emif_sdram_config); + /* + * Disable leveling. This is because if leveling is kept + * enabled, then PHY triggers a false leveling during + * EMIF-idle scenario which results in wrong delay + * values getting updated. After this the EMIF becomes + * unaccessible. So disable it after the first time + */ + writel(0x0, &emif->emif_rd_wr_lvl_rmp_ctl); +} + +static void ddr3_leveling(u32 base, const struct emif_regs *regs) +{ + if (is_omap54xx()) + omap5_ddr3_leveling(base, regs); + else + dra7_ddr3_leveling(base, regs); }
static void ddr3_init(u32 base, const struct emif_regs *regs) @@ -270,9 +309,6 @@ static void ddr3_init(u32 base, const struct emif_regs *regs) * defined, contents of mode Registers must be fully initialized. * H/W takes care of this initialization */ - writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config); - writel(regs->sdram_config_init, &emif->emif_sdram_config); - writel(regs->emif_ddr_phy_ctlr_1_init, &emif->emif_ddr_phy_ctrl_1);
/* Update timing registers */ @@ -283,15 +319,24 @@ static void ddr3_init(u32 base, const struct emif_regs *regs) writel(regs->ref_ctrl, &emif->emif_sdram_ref_ctrl); writel(regs->read_idle_ctrl, &emif->emif_read_idlectrl);
- do_ext_phy_settings(base, regs); + /* + * The same sequence should work on OMAP5432 as well. But strange that + * it is not working + */ + if (omap_revision() == DRA752_ES1_0) { + do_ext_phy_settings(base, regs); + writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config); + writel(regs->sdram_config_init, &emif->emif_sdram_config); + } else { + writel(regs->sdram_config2, &emif->emif_lpddr2_nvm_config); + writel(regs->sdram_config_init, &emif->emif_sdram_config); + do_ext_phy_settings(base, regs); + }
/* enable leveling */ writel(regs->emif_rd_wr_lvl_rmp_ctl, &emif->emif_rd_wr_lvl_rmp_ctl);
- if (omap_revision() == DRA752_ES1_0) - ddr3_sw_leveling(base, regs); - else - ddr3_leveling(base, regs); + ddr3_leveling(base, regs); }
#ifndef CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS @@ -1079,10 +1124,7 @@ static void do_sdram_init(u32 base) if (warm_reset() && (emif_sdram_type() == EMIF_SDRAM_TYPE_DDR3)) { set_lpmode_selfrefresh(base); emif_reset_phy(base); - if (omap_revision() == DRA752_ES1_0) - ddr3_sw_leveling(base, regs); - else - ddr3_leveling(base, regs); + ddr3_leveling(base, regs); }
/* Write to the shadow registers */ diff --git a/arch/arm/cpu/armv7/omap5/hw_data.c b/arch/arm/cpu/armv7/omap5/hw_data.c index a1b249e..82910e8 100644 --- a/arch/arm/cpu/armv7/omap5/hw_data.c +++ b/arch/arm/cpu/armv7/omap5/hw_data.c @@ -600,6 +600,7 @@ const struct ctrl_ioregs ioregs_omap5432_es1 = { .ctrl_ddrio_1 = DDR_IO_1_VREF_CELLS_DDR3_VALUE, .ctrl_ddrio_2 = DDR_IO_2_VREF_CELLS_DDR3_VALUE, .ctrl_emif_sdram_config_ext = SDRAM_CONFIG_EXT_RD_LVL_11_SAMPLES, + .ctrl_emif_sdram_config_ext_final = SDRAM_CONFIG_EXT_RD_LVL_4_SAMPLES, };
const struct ctrl_ioregs ioregs_omap5432_es2 = { @@ -610,16 +611,18 @@ const struct ctrl_ioregs ioregs_omap5432_es2 = { .ctrl_ddrio_1 = DDR_IO_1_VREF_CELLS_DDR3_VALUE_ES2, .ctrl_ddrio_2 = DDR_IO_2_VREF_CELLS_DDR3_VALUE_ES2, .ctrl_emif_sdram_config_ext = SDRAM_CONFIG_EXT_RD_LVL_11_SAMPLES, + .ctrl_emif_sdram_config_ext_final = SDRAM_CONFIG_EXT_RD_LVL_4_SAMPLES, };
const struct ctrl_ioregs ioregs_dra7xx_es1 = { .ctrl_ddrch = 0x40404040, .ctrl_lpddr2ch = 0x40404040, .ctrl_ddr3ch = 0x80808080, - .ctrl_ddrio_0 = 0xbae8c631, - .ctrl_ddrio_1 = 0xb46318d8, + .ctrl_ddrio_0 = 0xA2084210, + .ctrl_ddrio_1 = 0x84210840, .ctrl_ddrio_2 = 0x84210000, - .ctrl_emif_sdram_config_ext = 0xb2c00000, + .ctrl_emif_sdram_config_ext = 0x0001C1A7, + .ctrl_emif_sdram_config_ext_final = 0x000101A7, .ctrl_ddr_ctrl_ext_0 = 0xA2000000, };
diff --git a/arch/arm/cpu/armv7/omap5/hwinit.c b/arch/arm/cpu/armv7/omap5/hwinit.c index 1065891..82c8638 100644 --- a/arch/arm/cpu/armv7/omap5/hwinit.c +++ b/arch/arm/cpu/armv7/omap5/hwinit.c @@ -297,13 +297,17 @@ void srcomp_enable(void)
void config_data_eye_leveling_samples(u32 emif_base) { + const struct ctrl_ioregs *ioregs; + + get_ioregs(&ioregs); + /*EMIF_SDRAM_CONFIG_EXT-Read data eye leveling no of samples =4*/ if (emif_base == EMIF1_BASE) - writel(SDRAM_CONFIG_EXT_RD_LVL_4_SAMPLES, - (*ctrl)->control_emif1_sdram_config_ext); + writel(ioregs->ctrl_emif_sdram_config_ext_final, + (*ctrl)->control_emif1_sdram_config_ext); else if (emif_base == EMIF2_BASE) - writel(SDRAM_CONFIG_EXT_RD_LVL_4_SAMPLES, - (*ctrl)->control_emif2_sdram_config_ext); + writel(ioregs->ctrl_emif_sdram_config_ext_final, + (*ctrl)->control_emif2_sdram_config_ext); }
void init_omap_revision(void) diff --git a/arch/arm/cpu/armv7/omap5/sdram.c b/arch/arm/cpu/armv7/omap5/sdram.c index e65c116..2aae3ef 100644 --- a/arch/arm/cpu/armv7/omap5/sdram.c +++ b/arch/arm/cpu/armv7/omap5/sdram.c @@ -148,13 +148,13 @@ const struct emif_regs emif_1_regs_ddr3_532_mhz_1cs_dra_es1 = { .read_idle_ctrl = 0x00050000, .zq_config = 0x0007190B, .temp_alert_config = 0x00000000, - .emif_ddr_phy_ctlr_1_init = 0x0E20400A, - .emif_ddr_phy_ctlr_1 = 0x0E24400A, - .emif_ddr_ext_phy_ctrl_1 = 0x04040100, - .emif_ddr_ext_phy_ctrl_2 = 0x009E009E, - .emif_ddr_ext_phy_ctrl_3 = 0x009E009E, - .emif_ddr_ext_phy_ctrl_4 = 0x009E009E, - .emif_ddr_ext_phy_ctrl_5 = 0x009E009E, + .emif_ddr_phy_ctlr_1_init = 0x0024400A, + .emif_ddr_phy_ctlr_1 = 0x0024400A, + .emif_ddr_ext_phy_ctrl_1 = 0x10040100, + .emif_ddr_ext_phy_ctrl_2 = 0x00B000B0, + .emif_ddr_ext_phy_ctrl_3 = 0x00B000B0, + .emif_ddr_ext_phy_ctrl_4 = 0x00B000B0, + .emif_ddr_ext_phy_ctrl_5 = 0x00B000B0, .emif_rd_wr_lvl_rmp_win = 0x00000000, .emif_rd_wr_lvl_rmp_ctl = 0x80000000, .emif_rd_wr_lvl_ctl = 0x00000000, @@ -172,13 +172,13 @@ const struct emif_regs emif_2_regs_ddr3_532_mhz_1cs_dra_es1 = { .read_idle_ctrl = 0x00050000, .zq_config = 0x0007190B, .temp_alert_config = 0x00000000, - .emif_ddr_phy_ctlr_1_init = 0x0020400A, - .emif_ddr_phy_ctlr_1 = 0x0E24400A, - .emif_ddr_ext_phy_ctrl_1 = 0x04040100, - .emif_ddr_ext_phy_ctrl_2 = 0x009D009D, - .emif_ddr_ext_phy_ctrl_3 = 0x009D009D, - .emif_ddr_ext_phy_ctrl_4 = 0x009D009D, - .emif_ddr_ext_phy_ctrl_5 = 0x009D009D, + .emif_ddr_phy_ctlr_1_init = 0x0024400A, + .emif_ddr_phy_ctlr_1 = 0x0024400A, + .emif_ddr_ext_phy_ctrl_1 = 0x10040100, + .emif_ddr_ext_phy_ctrl_2 = 0x00B000B0, + .emif_ddr_ext_phy_ctrl_3 = 0x00B000B0, + .emif_ddr_ext_phy_ctrl_4 = 0x00B000B0, + .emif_ddr_ext_phy_ctrl_5 = 0x00B000B0, .emif_rd_wr_lvl_rmp_win = 0x00000000, .emif_rd_wr_lvl_rmp_ctl = 0x80000000, .emif_rd_wr_lvl_ctl = 0x00000000, @@ -306,7 +306,7 @@ void emif_get_device_details(u32 emif_nr,
#endif /* CONFIG_SYS_EMIF_PRECALCULATED_TIMING_REGS */
-const u32 ext_phy_ctrl_const_base[EMIF_EXT_PHY_CTRL_CONST_REG] = { +const u32 ext_phy_ctrl_const_base[] = { 0x01004010, 0x00001004, 0x04010040, @@ -329,7 +329,7 @@ const u32 ext_phy_ctrl_const_base[EMIF_EXT_PHY_CTRL_CONST_REG] = { 0x0 };
-const u32 ddr3_ext_phy_ctrl_const_base_es1[EMIF_EXT_PHY_CTRL_CONST_REG] = { +const u32 ddr3_ext_phy_ctrl_const_base_es1[] = { 0x01004010, 0x00001004, 0x04010040, @@ -352,7 +352,7 @@ const u32 ddr3_ext_phy_ctrl_const_base_es1[EMIF_EXT_PHY_CTRL_CONST_REG] = { 0x0 };
-const u32 ddr3_ext_phy_ctrl_const_base_es2[EMIF_EXT_PHY_CTRL_CONST_REG] = { +const u32 ddr3_ext_phy_ctrl_const_base_es2[] = { 0x50D4350D, 0x00000D43, 0x04010040, @@ -376,51 +376,61 @@ const u32 ddr3_ext_phy_ctrl_const_base_es2[EMIF_EXT_PHY_CTRL_CONST_REG] = { };
const u32 -dra_ddr3_ext_phy_ctrl_const_base_es1_emif1[EMIF_EXT_PHY_CTRL_CONST_REG] = { - 0x009E009E, - 0x002E002E, - 0x002E002E, - 0x002E002E, - 0x002E002E, - 0x002E002E, - 0x004D004D, - 0x004D004D, - 0x004D004D, - 0x004D004D, - 0x004D004D, - 0x004D004D, - 0x004D004D, - 0x004D004D, - 0x004D004D, - 0x004D004D, - 0x0, - 0x600020, +dra_ddr3_ext_phy_ctrl_const_base_es1_emif1[] = { + 0x00B000B0, + 0x00400040, + 0x00400040, + 0x00400040, + 0x00400040, + 0x00400040, + 0x00800080, + 0x00800080, + 0x00800080, + 0x00800080, + 0x00800080, + 0x00600060, + 0x00600060, + 0x00600060, + 0x00600060, + 0x00600060, + 0x00800080, + 0x00800080, 0x40010080, - 0x8102040 + 0x08102040, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0 };
const u32 -dra_ddr3_ext_phy_ctrl_const_base_es1_emif2[EMIF_EXT_PHY_CTRL_CONST_REG] = { - 0x009D009D, - 0x002D002D, - 0x002D002D, - 0x002D002D, - 0x002D002D, - 0x002D002D, - 0x00570057, - 0x00570057, - 0x00570057, - 0x00570057, - 0x00570057, - 0x00570057, - 0x00570057, - 0x00570057, - 0x00570057, - 0x00570057, +dra_ddr3_ext_phy_ctrl_const_base_es1_emif2[] = { + 0x00BB00BB, + 0x00440044, + 0x00440044, + 0x00440044, + 0x00440044, + 0x00440044, + 0x007F007F, + 0x007F007F, + 0x007F007F, + 0x007F007F, + 0x007F007F, + 0x00600060, + 0x00600060, + 0x00600060, + 0x00600060, + 0x00600060, 0x0, - 0x600020, + 0x00600020, 0x40010080, - 0x8102040 + 0x08102040, + 0x0, + 0x0, + 0x0, + 0x0, + 0x0 };
const struct lpddr2_mr_regs mr_regs = { @@ -431,27 +441,38 @@ const struct lpddr2_mr_regs mr_regs = { .mr16 = MR16_REF_FULL_ARRAY };
-static void emif_get_ext_phy_ctrl_const_regs(u32 emif_nr, const u32 **regs) +static void emif_get_ext_phy_ctrl_const_regs(u32 emif_nr, + const u32 **regs, + u32 *size) { switch (omap_revision()) { case OMAP5430_ES1_0: case OMAP5430_ES2_0: *regs = ext_phy_ctrl_const_base; + *size = ARRAY_SIZE(ext_phy_ctrl_const_base); break; case OMAP5432_ES1_0: *regs = ddr3_ext_phy_ctrl_const_base_es1; + *size = ARRAY_SIZE(ddr3_ext_phy_ctrl_const_base_es1); break; case OMAP5432_ES2_0: *regs = ddr3_ext_phy_ctrl_const_base_es2; + *size = ARRAY_SIZE(ddr3_ext_phy_ctrl_const_base_es2); break; case DRA752_ES1_0: - if (emif_nr == 1) + if (emif_nr == 1) { *regs = dra_ddr3_ext_phy_ctrl_const_base_es1_emif1; - else + *size = + ARRAY_SIZE(dra_ddr3_ext_phy_ctrl_const_base_es1_emif1); + } else { *regs = dra_ddr3_ext_phy_ctrl_const_base_es1_emif2; + *size = + ARRAY_SIZE(dra_ddr3_ext_phy_ctrl_const_base_es1_emif2); + } break; default: *regs = ddr3_ext_phy_ctrl_const_base_es2; + *size = ARRAY_SIZE(ddr3_ext_phy_ctrl_const_base_es2);
} } @@ -468,6 +489,7 @@ void do_ext_phy_settings(u32 base, const struct emif_regs *regs) u32 emif_nr; const u32 *ext_phy_ctrl_const_regs; u32 i = 0; + u32 size;
emif_nr = (base == EMIF1_BASE) ? 1 : 2;
@@ -487,8 +509,10 @@ void do_ext_phy_settings(u32 base, const struct emif_regs *regs) * external phy 6-24 registers do not change with * ddr frequency */ - emif_get_ext_phy_ctrl_const_regs(emif_nr, &ext_phy_ctrl_const_regs); - for (i = 0; i < EMIF_EXT_PHY_CTRL_CONST_REG; i++) { + emif_get_ext_phy_ctrl_const_regs(emif_nr, + &ext_phy_ctrl_const_regs, &size); + + for (i = 0; i < size; i++) { writel(ext_phy_ctrl_const_regs[i], emif_ext_phy_ctrl_base++); /* Update shadow registers */ diff --git a/arch/arm/include/asm/arch-omap5/omap.h b/arch/arm/include/asm/arch-omap5/omap.h index 414d37a..92aebe5 100644 --- a/arch/arm/include/asm/arch-omap5/omap.h +++ b/arch/arm/include/asm/arch-omap5/omap.h @@ -239,6 +239,7 @@ struct ctrl_ioregs { u32 ctrl_ddrio_1; u32 ctrl_ddrio_2; u32 ctrl_emif_sdram_config_ext; + u32 ctrl_emif_sdram_config_ext_final; u32 ctrl_ddr_ctrl_ext_0; };
diff --git a/arch/arm/include/asm/emif.h b/arch/arm/include/asm/emif.h index 1b94a99..c12beea 100644 --- a/arch/arm/include/asm/emif.h +++ b/arch/arm/include/asm/emif.h @@ -581,7 +581,6 @@ (0xFF << EMIF_SYS_ADDR_SHIFT))
#define EMIF_EXT_PHY_CTRL_TIMING_REG 0x5 -#define EMIF_EXT_PHY_CTRL_CONST_REG 0x14
/* Reg mapping structure */ struct emif_reg_struct { @@ -690,6 +689,9 @@ struct emif_reg_struct { u32 emif_ddr_ext_phy_ctrl_23_shdw; u32 emif_ddr_ext_phy_ctrl_24; u32 emif_ddr_ext_phy_ctrl_24_shdw; + u32 padding[22]; + u32 emif_ddr_fifo_misaligned_clear_1; + u32 emif_ddr_fifo_misaligned_clear_2; };
struct dmm_lisa_map_regs {

On Fri, Nov 08, 2013 at 05:40:37PM +0530, SRICHARAN R wrote:
Currently the DDR3 memory on DRA7 ES1.0 evm board is enabled using software leveling. This was done since hardware leveling was not working. Now that the right sequence to do hw leveling is identified, use it. This is required for EMIF clockdomain to idle and come back during lowpower usecases.
Signed-off-by: Sricharan R r.sricharan@ti.com
Applied to u-boot-ti/master, thanks!

When core power domain hits oswr, then DDR3 memories does not come back while resuming. This is because when EMIF registers are lost, then the controller takes care of copying the values from the shadow registers. If the shadow registers are not updated with the right values, then this results in incorrect settings while resuming. So updating the shadow registers with the corresponding status registers here during the boot.
Signed-off-by: Sricharan R r.sricharan@ti.com --- arch/arm/cpu/armv7/omap-common/emif-common.c | 42 ++++++++++++++++ arch/arm/cpu/armv7/omap4/sdram_elpida.c | 5 ++ arch/arm/cpu/armv7/omap5/sdram.c | 68 ++++++++++++++++++++++++++ arch/arm/include/asm/emif.h | 10 +++- 4 files changed, 124 insertions(+), 1 deletion(-)
diff --git a/arch/arm/cpu/armv7/omap-common/emif-common.c b/arch/arm/cpu/armv7/omap-common/emif-common.c index dc1ea1d..5a3f285 100644 --- a/arch/arm/cpu/armv7/omap-common/emif-common.c +++ b/arch/arm/cpu/armv7/omap-common/emif-common.c @@ -1286,6 +1286,42 @@ void dmm_init(u32 base)
}
+static void do_bug0039_workaround(u32 base) +{ + u32 val, i, clkctrl; + struct emif_reg_struct *emif_base = (struct emif_reg_struct *)base; + const struct read_write_regs *bug_00339_regs; + u32 iterations; + u32 *phy_status_base = &emif_base->emif_ddr_phy_status[0]; + u32 *phy_ctrl_base = &emif_base->emif_ddr_ext_phy_ctrl_1; + + if (is_dra7xx()) + phy_status_base++; + + bug_00339_regs = get_bug_regs(&iterations); + + /* Put EMIF in to idle */ + clkctrl = __raw_readl((*prcm)->cm_memif_clkstctrl); + __raw_writel(0x0, (*prcm)->cm_memif_clkstctrl); + + /* Copy the phy status registers in to phy ctrl shadow registers */ + for (i = 0; i < iterations; i++) { + val = __raw_readl(phy_status_base + + bug_00339_regs[i].read_reg - 1); + + __raw_writel(val, phy_ctrl_base + + ((bug_00339_regs[i].write_reg - 1) << 1)); + + __raw_writel(val, phy_ctrl_base + + (bug_00339_regs[i].write_reg << 1) - 1); + } + + /* Disable leveling */ + writel(0x0, &emif_base->emif_rd_wr_lvl_rmp_ctl); + + __raw_writel(clkctrl, (*prcm)->cm_memif_clkstctrl); +} + /* * SDRAM initialization: * SDRAM initialization has two parts: @@ -1361,5 +1397,11 @@ void sdram_init(void) debug("get_ram_size() successful"); }
+ if (sdram_type == EMIF_SDRAM_TYPE_DDR3 && + (!in_sdram && !warm_reset())) { + do_bug0039_workaround(EMIF1_BASE); + do_bug0039_workaround(EMIF2_BASE); + } + debug("<<sdram_init()\n"); } diff --git a/arch/arm/cpu/armv7/omap4/sdram_elpida.c b/arch/arm/cpu/armv7/omap4/sdram_elpida.c index e4c8316..811e776 100644 --- a/arch/arm/cpu/armv7/omap4/sdram_elpida.c +++ b/arch/arm/cpu/armv7/omap4/sdram_elpida.c @@ -321,3 +321,8 @@ void get_lpddr2_mr_regs(const struct lpddr2_mr_regs **regs) { *regs = &mr_regs; } + +__weak const struct read_write_regs *get_bug_regs(u32 *iterations) +{ + return 0; +} diff --git a/arch/arm/cpu/armv7/omap5/sdram.c b/arch/arm/cpu/armv7/omap5/sdram.c index 2aae3ef..2e18706 100644 --- a/arch/arm/cpu/armv7/omap5/sdram.c +++ b/arch/arm/cpu/armv7/omap5/sdram.c @@ -569,6 +569,74 @@ static const struct lpddr2_device_timings dev_4G_S4_timings = { .min_tck = &min_tck, };
+/* + * List of status registers to be controlled back to control registers + * after initial leveling + * readreg, writereg + */ +const struct read_write_regs omap5_bug_00339_regs[] = { + { 8, 5 }, + { 9, 6 }, + { 10, 7 }, + { 14, 8 }, + { 15, 9 }, + { 16, 10 }, + { 11, 2 }, + { 12, 3 }, + { 13, 4 }, + { 17, 11 }, + { 18, 12 }, + { 19, 13 }, +}; + +const struct read_write_regs dra_bug_00339_regs[] = { + { 7, 7 }, + { 8, 8 }, + { 9, 9 }, + { 10, 10 }, + { 11, 11 }, + { 12, 2 }, + { 13, 3 }, + { 14, 4 }, + { 15, 5 }, + { 16, 6 }, + { 17, 12 }, + { 18, 13 }, + { 19, 14 }, + { 20, 15 }, + { 21, 16 }, + { 22, 17 }, + { 23, 18 }, + { 24, 19 }, + { 25, 20 }, + { 26, 21} +}; + +const struct read_write_regs *get_bug_regs(u32 *iterations) +{ + const struct read_write_regs *bug_00339_regs_ptr = NULL; + + switch (omap_revision()) { + case OMAP5430_ES1_0: + case OMAP5430_ES2_0: + case OMAP5432_ES1_0: + case OMAP5432_ES2_0: + bug_00339_regs_ptr = omap5_bug_00339_regs; + *iterations = sizeof(omap5_bug_00339_regs)/ + sizeof(omap5_bug_00339_regs[0]); + break; + case DRA752_ES1_0: + bug_00339_regs_ptr = dra_bug_00339_regs; + *iterations = sizeof(dra_bug_00339_regs)/ + sizeof(dra_bug_00339_regs[0]); + break; + default: + printf("\n Error: UnKnown SOC"); + } + + return bug_00339_regs_ptr; +} + void emif_get_device_timings_sdp(u32 emif_nr, const struct lpddr2_device_timings **cs0_device_timings, const struct lpddr2_device_timings **cs1_device_timings) diff --git a/arch/arm/include/asm/emif.h b/arch/arm/include/asm/emif.h index c12beea..d9d521a 100644 --- a/arch/arm/include/asm/emif.h +++ b/arch/arm/include/asm/emif.h @@ -640,7 +640,9 @@ struct emif_reg_struct { u32 emif_ddr_phy_ctrl_2; u32 padding7[12]; u32 emif_rd_wr_exec_thresh; - u32 padding8[55]; + u32 padding8[7]; + u32 emif_ddr_phy_status[21]; + u32 padding9[27]; u32 emif_ddr_ext_phy_ctrl_1; u32 emif_ddr_ext_phy_ctrl_1_shdw; u32 emif_ddr_ext_phy_ctrl_2; @@ -1141,6 +1143,11 @@ struct lpddr2_mr_regs { s8 mr16; };
+struct read_write_regs { + u32 read_reg; + u32 write_reg; +}; + /* assert macros */ #if defined(DEBUG) #define emif_assert(c) ({ if (!(c)) for (;;); }) @@ -1169,4 +1176,5 @@ extern u32 *const T_den;
void config_data_eye_leveling_samples(u32 emif_base); u32 emif_sdram_type(void); +const struct read_write_regs *get_bug_regs(u32 *iterations); #endif

On Fri, Nov 08, 2013 at 05:40:38PM +0530, SRICHARAN R wrote:
When core power domain hits oswr, then DDR3 memories does not come back while resuming. This is because when EMIF registers are lost, then the controller takes care of copying the values from the shadow registers. If the shadow registers are not updated with the right values, then this results in incorrect settings while resuming. So updating the shadow registers with the corresponding status registers here during the boot.
Signed-off-by: Sricharan R r.sricharan@ti.com
Applied to u-boot-ti/master, thanks!
participants (2)
-
Sricharan R
-
Tom Rini