unsigned int compute_cas_latency_ddr3(const dimm_params_t *dimm_params, common_timing_params_t *outpdimm, unsigned int number_of_dimms) { unsigned int i; unsigned int tAAmin_ps = 0; unsigned int tCKmin_X_ps = 0; unsigned int common_caslat; unsigned int caslat_actual; unsigned int retry = 16; unsigned int tmp; const unsigned int mclk_ps = get_memory_clk_period_ps(); /* compute the common CAS latency supported between slots */ tmp = dimm_params[0].caslat_X; for (i = 1; i < number_of_dimms; i++) tmp &= dimm_params[i].caslat_X; common_caslat = tmp; /* compute the max tAAmin tCKmin between slots */ for (i = 0; i < number_of_dimms; i++) { tAAmin_ps = max(tAAmin_ps, dimm_params[i].tAA_ps); tCKmin_X_ps = max(tCKmin_X_ps, dimm_params[i].tCKmin_X_ps); } /* validate if the memory clk is in the range of dimms */ if (mclk_ps < tCKmin_X_ps) { printf("The DIMM max tCKmin is %d ps," "doesn't support the MCLK cycle %d ps\n", tCKmin_X_ps, mclk_ps); return 1; } /* determine the acutal cas latency */ caslat_actual = (tAAmin_ps + mclk_ps - 1) / mclk_ps; /* check if the dimms support the CAS latency */ while (!(common_caslat & (1 << caslat_actual)) && retry > 0) { caslat_actual++; retry--; } /* once the caculation of caslat_actual is completed * we must verify that this CAS latency value does not * exceed tAAmax, which is 20 ns for all DDR3 speed grades */ if (caslat_actual * mclk_ps > 20000) { printf("The choosen cas latency %d is too large\n", caslat_actual); return 1; } outpdimm->lowest_common_SPD_caslat = caslat_actual; return 0; }
unsigned int populate_memctl_options(int all_dimms_registered, memctl_options_t *popts, dimm_params_t *pdimm, unsigned int ctrl_num) { unsigned int i; char buffer[HWCONFIG_BUFFER_SIZE]; char *buf = NULL; #if defined(CONFIG_SYS_FSL_DDR3) || \ defined(CONFIG_SYS_FSL_DDR2) || \ defined(CONFIG_SYS_FSL_DDR4) const struct dynamic_odt *pdodt = odt_unknown; #endif ulong ddr_freq; /* * Extract hwconfig from environment since we have not properly setup * the environment but need it for ddr config params */ if (getenv_f("hwconfig", buffer, sizeof(buffer)) > 0) buf = buffer; #if defined(CONFIG_SYS_FSL_DDR3) || \ defined(CONFIG_SYS_FSL_DDR2) || \ defined(CONFIG_SYS_FSL_DDR4) /* Chip select options. */ #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) switch (pdimm[0].n_ranks) { case 1: pdodt = single_S; break; case 2: pdodt = single_D; break; case 4: pdodt = single_Q; break; } #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2) switch (pdimm[0].n_ranks) { #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE case 4: pdodt = single_Q; if (pdimm[1].n_ranks) printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n"); break; #endif case 2: switch (pdimm[1].n_ranks) { case 2: pdodt = dual_DD; break; case 1: pdodt = dual_DS; break; case 0: pdodt = dual_D0; break; } break; case 1: switch (pdimm[1].n_ranks) { case 2: pdodt = dual_SD; break; case 1: pdodt = dual_SS; break; case 0: pdodt = dual_S0; break; } break; case 0: switch (pdimm[1].n_ranks) { case 2: pdodt = dual_0D; break; case 1: pdodt = dual_0S; break; } break; } #endif /* CONFIG_DIMM_SLOTS_PER_CTLR */ #endif /* CONFIG_SYS_FSL_DDR2, 3, 4 */ /* Pick chip-select local options. */ for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { #if defined(CONFIG_SYS_FSL_DDR3) || \ defined(CONFIG_SYS_FSL_DDR2) || \ defined(CONFIG_SYS_FSL_DDR4) popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg; popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg; popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm; popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr; #else popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER; popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS; #endif popts->cs_local_opts[i].auto_precharge = 0; } /* Pick interleaving mode. */ /* * 0 = no interleaving * 1 = interleaving between 2 controllers */ popts->memctl_interleaving = 0; /* * 0 = cacheline * 1 = page * 2 = (logical) bank * 3 = superbank (only if CS interleaving is enabled) */ popts->memctl_interleaving_mode = 0; /* * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl * 1: page: bit to the left of the column bits selects the memctl * 2: bank: bit to the left of the bank bits selects the memctl * 3: superbank: bit to the left of the chip select selects the memctl * * NOTE: ba_intlv (rank interleaving) is independent of memory * controller interleaving; it is only within a memory controller. * Must use superbank interleaving if rank interleaving is used and * memory controller interleaving is enabled. */ /* * 0 = no * 0x40 = CS0,CS1 * 0x20 = CS2,CS3 * 0x60 = CS0,CS1 + CS2,CS3 * 0x04 = CS0,CS1,CS2,CS3 */ popts->ba_intlv_ctl = 0; /* Memory Organization Parameters */ popts->registered_dimm_en = all_dimms_registered; /* Operational Mode Paramters */ /* Pick ECC modes */ popts->ecc_mode = 0; /* 0 = disabled, 1 = enabled */ #ifdef CONFIG_DDR_ECC if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) { if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf)) popts->ecc_mode = 1; } else popts->ecc_mode = 1; #endif popts->ecc_init_using_memctl = 1; /* 0 = use DMA, 1 = use memctl */ /* * Choose DQS config * 0 for DDR1 * 1 for DDR2 */ #if defined(CONFIG_SYS_FSL_DDR1) popts->dqs_config = 0; #elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3) popts->dqs_config = 1; #endif /* Choose self-refresh during sleep. */ popts->self_refresh_in_sleep = 1; /* Choose dynamic power management mode. */ popts->dynamic_power = 0; /* * check first dimm for primary sdram width * presuming all dimms are similar * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit */ #if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) if (pdimm[0].n_ranks != 0) { if ((pdimm[0].data_width >= 64) && \ (pdimm[0].data_width <= 72)) popts->data_bus_width = 0; else if ((pdimm[0].data_width >= 32) || \ (pdimm[0].data_width <= 40)) popts->data_bus_width = 1; else { panic("Error: data width %u is invalid!\n", pdimm[0].data_width); } } #else if (pdimm[0].n_ranks != 0) { if (pdimm[0].primary_sdram_width == 64) popts->data_bus_width = 0; else if (pdimm[0].primary_sdram_width == 32) popts->data_bus_width = 1; else if (pdimm[0].primary_sdram_width == 16) popts->data_bus_width = 2; else { panic("Error: primary sdram width %u is invalid!\n", pdimm[0].primary_sdram_width); } } #endif popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0; /* Choose burst length. */ #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) #if defined(CONFIG_E500MC) popts->otf_burst_chop_en = 0; /* on-the-fly burst chop disable */ popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */ #else if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) { /* 32-bit or 16-bit bus */ popts->otf_burst_chop_en = 0; popts->burst_length = DDR_BL8; } else { popts->otf_burst_chop_en = 1; /* on-the-fly burst chop */ popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */ } #endif #else popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */ #endif /* Choose ddr controller address mirror mode */ #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) { if (pdimm[i].n_ranks) { popts->mirrored_dimm = pdimm[i].mirrored_dimm; break; } } #endif /* Global Timing Parameters. */ debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num)); /* Pick a caslat override. */ popts->cas_latency_override = 0; popts->cas_latency_override_value = 3; if (popts->cas_latency_override) { debug("using caslat override value = %u\n", popts->cas_latency_override_value); } /* Decide whether to use the computed derated latency */ popts->use_derated_caslat = 0; /* Choose an additive latency. */ popts->additive_latency_override = 0; popts->additive_latency_override_value = 3; if (popts->additive_latency_override) { debug("using additive latency override value = %u\n", popts->additive_latency_override_value); } /* * 2T_EN setting * * Factors to consider for 2T_EN: * - number of DIMMs installed * - number of components, number of active ranks * - how much time you want to spend playing around */ popts->twot_en = 0; popts->threet_en = 0; /* for RDIMM, address parity enable */ popts->ap_en = 1; /* * BSTTOPRE precharge interval * * Set this to 0 for global auto precharge * The value of 0x100 has been used for DDR1, DDR2, DDR3. * It is not wrong. Any value should be OK. The performance depends on * applications. There is no one good value for all. */ popts->bstopre = 0x100; /* * Window for four activates -- tFAW * * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only * FIXME: varies depending upon number of column addresses or data * FIXME: width, was considering looking at pdimm->primary_sdram_width */ #if defined(CONFIG_SYS_FSL_DDR1) popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1); #elif defined(CONFIG_SYS_FSL_DDR2) /* * x4/x8; some datasheets have 35000 * x16 wide columns only? Use 50000? */ popts->tfaw_window_four_activates_ps = 37500; #else popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps; #endif popts->zq_en = 0; popts->wrlvl_en = 0; #if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) /* * due to ddr3 dimm is fly-by topology * we suggest to enable write leveling to * meet the tQDSS under different loading. */ popts->wrlvl_en = 1; popts->zq_en = 1; popts->wrlvl_override = 0; #endif /* * Check interleaving configuration from environment. * Please refer to doc/README.fsl-ddr for the detail. * * If memory controller interleaving is enabled, then the data * bus widths must be programmed identically for all memory controllers. * * Attempt to set all controllers to the same chip select * interleaving mode. It will do a best effort to get the * requested ranks interleaved together such that the result * should be a subset of the requested configuration. * * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving * with 256 Byte is enabled. */ #if (CONFIG_NUM_DDR_CONTROLLERS > 1) if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf)) #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B ; #else goto done; #endif if (pdimm[0].n_ranks == 0) { printf("There is no rank on CS0 for controller %d.\n", ctrl_num); popts->memctl_interleaving = 0; goto done; } popts->memctl_interleaving = 1; #ifdef CONFIG_SYS_FSL_DDR_INTLV_256B popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING; popts->memctl_interleaving = 1; debug("256 Byte interleaving\n"); #else /* * test null first. if CONFIG_HWCONFIG is not defined * hwconfig_arg_cmp returns non-zero */ if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "null", buf)) { popts->memctl_interleaving = 0; debug("memory controller interleaving disabled.\n"); } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "cacheline", buf)) { popts->memctl_interleaving_mode = ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ? 0 : FSL_DDR_CACHE_LINE_INTERLEAVING; popts->memctl_interleaving = ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ? 0 : 1; } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "page", buf)) { popts->memctl_interleaving_mode = ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ? 0 : FSL_DDR_PAGE_INTERLEAVING; popts->memctl_interleaving = ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ? 0 : 1; } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "bank", buf)) { popts->memctl_interleaving_mode = ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ? 0 : FSL_DDR_BANK_INTERLEAVING; popts->memctl_interleaving = ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ? 0 : 1; } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "superbank", buf)) { popts->memctl_interleaving_mode = ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ? 0 : FSL_DDR_SUPERBANK_INTERLEAVING; popts->memctl_interleaving = ((CONFIG_NUM_DDR_CONTROLLERS == 3) && ctrl_num == 2) ? 0 : 1; #if (CONFIG_NUM_DDR_CONTROLLERS == 3) } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "3way_1KB", buf)) { popts->memctl_interleaving_mode = FSL_DDR_3WAY_1KB_INTERLEAVING; } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "3way_4KB", buf)) { popts->memctl_interleaving_mode = FSL_DDR_3WAY_4KB_INTERLEAVING; } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "3way_8KB", buf)) { popts->memctl_interleaving_mode = FSL_DDR_3WAY_8KB_INTERLEAVING; #elif (CONFIG_NUM_DDR_CONTROLLERS == 4) } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "4way_1KB", buf)) { popts->memctl_interleaving_mode = FSL_DDR_4WAY_1KB_INTERLEAVING; } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "4way_4KB", buf)) { popts->memctl_interleaving_mode = FSL_DDR_4WAY_4KB_INTERLEAVING; } else if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv", "4way_8KB", buf)) { popts->memctl_interleaving_mode = FSL_DDR_4WAY_8KB_INTERLEAVING; #endif } else { popts->memctl_interleaving = 0; printf("hwconfig has unrecognized parameter for ctlr_intlv.\n"); } #endif /* CONFIG_SYS_FSL_DDR_INTLV_256B */ done: #endif /* CONFIG_NUM_DDR_CONTROLLERS > 1 */ if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) && (CONFIG_CHIP_SELECTS_PER_CTRL > 1)) { /* test null first. if CONFIG_HWCONFIG is not defined, * hwconfig_subarg_cmp_f returns non-zero */ if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", "null", buf)) debug("bank interleaving disabled.\n"); else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", "cs0_cs1", buf)) popts->ba_intlv_ctl = FSL_DDR_CS0_CS1; else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", "cs2_cs3", buf)) popts->ba_intlv_ctl = FSL_DDR_CS2_CS3; else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", "cs0_cs1_and_cs2_cs3", buf)) popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3; else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", "cs0_cs1_cs2_cs3", buf)) popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3; else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv", "auto", buf)) popts->ba_intlv_ctl = auto_bank_intlv(pdimm); else printf("hwconfig has unrecognized parameter for bank_intlv.\n"); switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) { case FSL_DDR_CS0_CS1_CS2_CS3: #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) if (pdimm[0].n_ranks < 4) { popts->ba_intlv_ctl = 0; printf("Not enough bank(chip-select) for " "CS0+CS1+CS2+CS3 on controller %d, " "interleaving disabled!\n", ctrl_num); } #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2) #ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE if (pdimm[0].n_ranks == 4) break; #endif if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) { popts->ba_intlv_ctl = 0; printf("Not enough bank(chip-select) for " "CS0+CS1+CS2+CS3 on controller %d, " "interleaving disabled!\n", ctrl_num); } if (pdimm[0].capacity != pdimm[1].capacity) { popts->ba_intlv_ctl = 0; printf("Not identical DIMM size for " "CS0+CS1+CS2+CS3 on controller %d, " "interleaving disabled!\n", ctrl_num); } #endif break; case FSL_DDR_CS0_CS1: if (pdimm[0].n_ranks < 2) { popts->ba_intlv_ctl = 0; printf("Not enough bank(chip-select) for " "CS0+CS1 on controller %d, " "interleaving disabled!\n", ctrl_num); } break; case FSL_DDR_CS2_CS3: #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) if (pdimm[0].n_ranks < 4) { popts->ba_intlv_ctl = 0; printf("Not enough bank(chip-select) for CS2+CS3 " "on controller %d, interleaving disabled!\n", ctrl_num); } #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2) if (pdimm[1].n_ranks < 2) { popts->ba_intlv_ctl = 0; printf("Not enough bank(chip-select) for CS2+CS3 " "on controller %d, interleaving disabled!\n", ctrl_num); } #endif break; case FSL_DDR_CS0_CS1_AND_CS2_CS3: #if (CONFIG_DIMM_SLOTS_PER_CTLR == 1) if (pdimm[0].n_ranks < 4) { popts->ba_intlv_ctl = 0; printf("Not enough bank(CS) for CS0+CS1 and " "CS2+CS3 on controller %d, " "interleaving disabled!\n", ctrl_num); } #elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2) if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) { popts->ba_intlv_ctl = 0; printf("Not enough bank(CS) for CS0+CS1 and " "CS2+CS3 on controller %d, " "interleaving disabled!\n", ctrl_num); } #endif break; default: popts->ba_intlv_ctl = 0; break; } } if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) { if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf)) popts->addr_hash = 0; else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "true", buf)) popts->addr_hash = 1; } if (pdimm[0].n_ranks == 4) popts->quad_rank_present = 1; ddr_freq = get_ddr_freq(ctrl_num) / 1000000; if (popts->registered_dimm_en) { popts->rcw_override = 1; popts->rcw_1 = 0x000a5a00; if (ddr_freq <= 800) popts->rcw_2 = 0x00000000; else if (ddr_freq <= 1066) popts->rcw_2 = 0x00100000; else if (ddr_freq <= 1333) popts->rcw_2 = 0x00200000; else popts->rcw_2 = 0x00300000; } fsl_ddr_board_options(popts, pdimm, ctrl_num); return 0; }
/* * compute_lowest_common_dimm_parameters() * * Determine the worst-case DIMM timing parameters from the set of DIMMs * whose parameters have been computed into the array pointed to * by dimm_params. */ unsigned int compute_lowest_common_dimm_parameters(const dimm_params_t *dimm_params, common_timing_params_t *outpdimm, unsigned int number_of_dimms) { unsigned int i, j; unsigned int tCKmin_X_ps = 0; unsigned int tCKmax_ps = 0xFFFFFFFF; unsigned int tCKmax_max_ps = 0; unsigned int tRCD_ps = 0; unsigned int tRP_ps = 0; unsigned int tRAS_ps = 0; unsigned int tWR_ps = 0; unsigned int tWTR_ps = 0; unsigned int tRFC_ps = 0; unsigned int tRRD_ps = 0; unsigned int tRC_ps = 0; unsigned int refresh_rate_ps = 0; unsigned int tIS_ps = 0; unsigned int tIH_ps = 0; unsigned int tDS_ps = 0; unsigned int tDH_ps = 0; unsigned int tRTP_ps = 0; unsigned int tDQSQ_max_ps = 0; unsigned int tQHS_ps = 0; unsigned int temp1, temp2; unsigned int additive_latency = 0; #if !defined(CONFIG_FSL_DDR3) const unsigned int mclk_ps = get_memory_clk_period_ps(); unsigned int lowest_good_caslat; unsigned int not_ok; debug("using mclk_ps = %u\n", mclk_ps); #endif temp1 = 0; for (i = 0; i < number_of_dimms; i++) { /* * If there are no ranks on this DIMM, * it probably doesn't exist, so skip it. */ if (dimm_params[i].n_ranks == 0) { temp1++; continue; } if (dimm_params[i].n_ranks == 4 && i != 0) { printf("Found Quad-rank DIMM in wrong bank, ignored." " Software may not run as expected.\n"); temp1++; continue; } if (dimm_params[i].n_ranks == 4 && \ CONFIG_CHIP_SELECTS_PER_CTRL/CONFIG_DIMM_SLOTS_PER_CTLR < 4) { printf("Found Quad-rank DIMM, not able to support."); temp1++; continue; } /* * Find minimum tCKmax_ps to find fastest slow speed, * i.e., this is the slowest the whole system can go. */ tCKmax_ps = min(tCKmax_ps, dimm_params[i].tCKmax_ps); /* Either find maximum value to determine slowest * speed, delay, time, period, etc */ tCKmin_X_ps = max(tCKmin_X_ps, dimm_params[i].tCKmin_X_ps); tCKmax_max_ps = max(tCKmax_max_ps, dimm_params[i].tCKmax_ps); tRCD_ps = max(tRCD_ps, dimm_params[i].tRCD_ps); tRP_ps = max(tRP_ps, dimm_params[i].tRP_ps); tRAS_ps = max(tRAS_ps, dimm_params[i].tRAS_ps); tWR_ps = max(tWR_ps, dimm_params[i].tWR_ps); tWTR_ps = max(tWTR_ps, dimm_params[i].tWTR_ps); tRFC_ps = max(tRFC_ps, dimm_params[i].tRFC_ps); tRRD_ps = max(tRRD_ps, dimm_params[i].tRRD_ps); tRC_ps = max(tRC_ps, dimm_params[i].tRC_ps); tIS_ps = max(tIS_ps, dimm_params[i].tIS_ps); tIH_ps = max(tIH_ps, dimm_params[i].tIH_ps); tDS_ps = max(tDS_ps, dimm_params[i].tDS_ps); tDH_ps = max(tDH_ps, dimm_params[i].tDH_ps); tRTP_ps = max(tRTP_ps, dimm_params[i].tRTP_ps); tQHS_ps = max(tQHS_ps, dimm_params[i].tQHS_ps); refresh_rate_ps = max(refresh_rate_ps, dimm_params[i].refresh_rate_ps); /* * Find maximum tDQSQ_max_ps to find slowest. * * FIXME: is finding the slowest value the correct * strategy for this parameter? */ tDQSQ_max_ps = max(tDQSQ_max_ps, dimm_params[i].tDQSQ_max_ps); } outpdimm->ndimms_present = number_of_dimms - temp1; if (temp1 == number_of_dimms) { debug("no dimms this memory controller\n"); return 0; } outpdimm->tCKmin_X_ps = tCKmin_X_ps; outpdimm->tCKmax_ps = tCKmax_ps; outpdimm->tCKmax_max_ps = tCKmax_max_ps; outpdimm->tRCD_ps = tRCD_ps; outpdimm->tRP_ps = tRP_ps; outpdimm->tRAS_ps = tRAS_ps; outpdimm->tWR_ps = tWR_ps; outpdimm->tWTR_ps = tWTR_ps; outpdimm->tRFC_ps = tRFC_ps; outpdimm->tRRD_ps = tRRD_ps; outpdimm->tRC_ps = tRC_ps; outpdimm->refresh_rate_ps = refresh_rate_ps; outpdimm->tIS_ps = tIS_ps; outpdimm->tIH_ps = tIH_ps; outpdimm->tDS_ps = tDS_ps; outpdimm->tDH_ps = tDH_ps; outpdimm->tRTP_ps = tRTP_ps; outpdimm->tDQSQ_max_ps = tDQSQ_max_ps; outpdimm->tQHS_ps = tQHS_ps; /* Determine common burst length for all DIMMs. */ temp1 = 0xff; for (i = 0; i < number_of_dimms; i++) { if (dimm_params[i].n_ranks) { temp1 &= dimm_params[i].burst_lengths_bitmask; } } outpdimm->all_DIMMs_burst_lengths_bitmask = temp1; /* Determine if all DIMMs registered buffered. */ temp1 = temp2 = 0; for (i = 0; i < number_of_dimms; i++) { if (dimm_params[i].n_ranks) { if (dimm_params[i].registered_dimm) { temp1 = 1; printf("Detected RDIMM %s\n", dimm_params[i].mpart); } else { temp2 = 1; printf("Detected UDIMM %s\n", dimm_params[i].mpart); } } } outpdimm->all_DIMMs_registered = 0; outpdimm->all_DIMMs_unbuffered = 0; if (temp1 && !temp2) { outpdimm->all_DIMMs_registered = 1; } else if (!temp1 && temp2) { outpdimm->all_DIMMs_unbuffered = 1; } else { printf("ERROR: Mix of registered buffered and unbuffered " "DIMMs detected!\n"); } temp1 = 0; if (outpdimm->all_DIMMs_registered) for (j = 0; j < 16; j++) { outpdimm->rcw[j] = dimm_params[0].rcw[j]; for (i = 1; i < number_of_dimms; i++) if (dimm_params[i].rcw[j] != dimm_params[0].rcw[j]) { temp1 = 1; break; } } if (temp1 != 0) printf("ERROR: Mix different RDIMM detected!\n"); #if defined(CONFIG_FSL_DDR3) if (compute_cas_latency_ddr3(dimm_params, outpdimm, number_of_dimms)) return 1; #else /* * Compute a CAS latency suitable for all DIMMs * * Strategy for SPD-defined latencies: compute only * CAS latency defined by all DIMMs. */ /* * Step 1: find CAS latency common to all DIMMs using bitwise * operation. */ temp1 = 0xFF; for (i = 0; i < number_of_dimms; i++) { if (dimm_params[i].n_ranks) { temp2 = 0; temp2 |= 1 << dimm_params[i].caslat_X; temp2 |= 1 << dimm_params[i].caslat_X_minus_1; temp2 |= 1 << dimm_params[i].caslat_X_minus_2; /* * FIXME: If there was no entry for X-2 (X-1) in * the SPD, then caslat_X_minus_2 * (caslat_X_minus_1) contains either 255 or * 0xFFFFFFFF because that's what the glorious * __ilog2 function returns for an input of 0. * On 32-bit PowerPC, left shift counts with bit * 26 set (that the value of 255 or 0xFFFFFFFF * will have), cause the destination register to * be 0. That is why this works. */ temp1 &= temp2; } } /* * Step 2: check each common CAS latency against tCK of each * DIMM's SPD. */ lowest_good_caslat = 0; temp2 = 0; while (temp1) { not_ok = 0; temp2 = __ilog2(temp1); debug("checking common caslat = %u\n", temp2); /* Check if this CAS latency will work on all DIMMs at tCK. */ for (i = 0; i < number_of_dimms; i++) { if (!dimm_params[i].n_ranks) { continue; } if (dimm_params[i].caslat_X == temp2) { if (mclk_ps >= dimm_params[i].tCKmin_X_ps) { debug("CL = %u ok on DIMM %u at tCK=%u" " ps with its tCKmin_X_ps of %u\n", temp2, i, mclk_ps, dimm_params[i].tCKmin_X_ps); continue; } else { not_ok++; } } if (dimm_params[i].caslat_X_minus_1 == temp2) { unsigned int tCKmin_X_minus_1_ps = dimm_params[i].tCKmin_X_minus_1_ps; if (mclk_ps >= tCKmin_X_minus_1_ps) { debug("CL = %u ok on DIMM %u at " "tCK=%u ps with its " "tCKmin_X_minus_1_ps of %u\n", temp2, i, mclk_ps, tCKmin_X_minus_1_ps); continue; } else { not_ok++; } } if (dimm_params[i].caslat_X_minus_2 == temp2) { unsigned int tCKmin_X_minus_2_ps = dimm_params[i].tCKmin_X_minus_2_ps; if (mclk_ps >= tCKmin_X_minus_2_ps) { debug("CL = %u ok on DIMM %u at " "tCK=%u ps with its " "tCKmin_X_minus_2_ps of %u\n", temp2, i, mclk_ps, tCKmin_X_minus_2_ps); continue; } else { not_ok++; } } } if (!not_ok) { lowest_good_caslat = temp2; } temp1 &= ~(1 << temp2); } debug("lowest common SPD-defined CAS latency = %u\n", lowest_good_caslat); outpdimm->lowest_common_SPD_caslat = lowest_good_caslat; /* * Compute a common 'de-rated' CAS latency. * * The strategy here is to find the *highest* dereated cas latency * with the assumption that all of the DIMMs will support a dereated * CAS latency higher than or equal to their lowest dereated value. */ temp1 = 0; for (i = 0; i < number_of_dimms; i++) { temp1 = max(temp1, dimm_params[i].caslat_lowest_derated); } outpdimm->highest_common_derated_caslat = temp1; debug("highest common dereated CAS latency = %u\n", temp1); #endif /* #if defined(CONFIG_FSL_DDR3) */ /* Determine if all DIMMs ECC capable. */ temp1 = 1; for (i = 0; i < number_of_dimms; i++) { if (dimm_params[i].n_ranks && !(dimm_params[i].edc_config & EDC_ECC)) { temp1 = 0; break; } } if (temp1) { debug("all DIMMs ECC capable\n"); } else { debug("Warning: not all DIMMs ECC capable, cant enable ECC\n"); } outpdimm->all_DIMMs_ECC_capable = temp1; #ifndef CONFIG_FSL_DDR3 /* FIXME: move to somewhere else to validate. */ if (mclk_ps > tCKmax_max_ps) { printf("Warning: some of the installed DIMMs " "can not operate this slowly.\n"); return 1; } #endif /* * Compute additive latency. * * For DDR1, additive latency should be 0. * * For DDR2, with ODT enabled, use "a value" less than ACTTORW, * which comes from Trcd, and also note that: * add_lat + caslat must be >= 4 * * For DDR3, we use the AL=0 * * When to use additive latency for DDR2: * * I. Because you are using CL=3 and need to do ODT on writes and * want functionality. * 1. Are you going to use ODT? (Does your board not have * additional termination circuitry for DQ, DQS, DQS_, * DM, RDQS, RDQS_ for x4/x8 configs?) * 2. If so, is your lowest supported CL going to be 3? * 3. If so, then you must set AL=1 because * * WL >= 3 for ODT on writes * RL = AL + CL * WL = RL - 1 * -> * WL = AL + CL - 1 * AL + CL - 1 >= 3 * AL + CL >= 4 * QED * * RL >= 3 for ODT on reads * RL = AL + CL * * Since CL aren't usually less than 2, AL=0 is a minimum, * so the WL-derived AL should be the -- FIXME? * * II. Because you are using auto-precharge globally and want to * use additive latency (posted CAS) to get more bandwidth. * 1. Are you going to use auto-precharge mode globally? * * Use addtivie latency and compute AL to be 1 cycle less than * tRCD, i.e. the READ or WRITE command is in the cycle * immediately following the ACTIVATE command.. * * III. Because you feel like it or want to do some sort of * degraded-performance experiment. * 1. Do you just want to use additive latency because you feel * like it? * * Validation: AL is less than tRCD, and within the other * read-to-precharge constraints. */ additive_latency = 0; #if defined(CONFIG_FSL_DDR2) if (lowest_good_caslat < 4) { additive_latency = picos_to_mclk(tRCD_ps) - lowest_good_caslat; if (mclk_to_picos(additive_latency) > tRCD_ps) { additive_latency = picos_to_mclk(tRCD_ps); debug("setting additive_latency to %u because it was " " greater than tRCD_ps\n", additive_latency); } } #elif defined(CONFIG_FSL_DDR3) /* * The system will not use the global auto-precharge mode. * However, it uses the page mode, so we set AL=0 */ additive_latency = 0; #endif /* * Validate additive latency * FIXME: move to somewhere else to validate * * AL <= tRCD(min) */ if (mclk_to_picos(additive_latency) > tRCD_ps) { printf("Error: invalid additive latency exceeds tRCD(min).\n"); return 1; } /* * RL = CL + AL; RL >= 3 for ODT_RD_CFG to be enabled * WL = RL - 1; WL >= 3 for ODT_WL_CFG to be enabled * ADD_LAT (the register) must be set to a value less * than ACTTORW if WL = 1, then AL must be set to 1 * RD_TO_PRE (the register) must be set to a minimum * tRTP + AL if AL is nonzero */ /* * Additive latency will be applied only if the memctl option to * use it. */ outpdimm->additive_latency = additive_latency; return 0; }
unsigned int populate_memctl_options(int all_DIMMs_registered, memctl_options_t *popts, dimm_params_t *pdimm, unsigned int ctrl_num) { unsigned int i; const char *p; /* Chip select options. */ /* Pick chip-select local options. */ for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) { /* If not DDR2, odt_rd_cfg and odt_wr_cfg need to be 0. */ /* only for single CS? */ popts->cs_local_opts[i].odt_rd_cfg = 0; popts->cs_local_opts[i].odt_wr_cfg = 1; popts->cs_local_opts[i].auto_precharge = 0; } /* Pick interleaving mode. */ /* * 0 = no interleaving * 1 = interleaving between 2 controllers */ popts->memctl_interleaving = 0; /* * 0 = cacheline * 1 = page * 2 = (logical) bank * 3 = superbank (only if CS interleaving is enabled) */ popts->memctl_interleaving_mode = 0; /* * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl * 1: page: bit to the left of the column bits selects the memctl * 2: bank: bit to the left of the bank bits selects the memctl * 3: superbank: bit to the left of the chip select selects the memctl * * NOTE: ba_intlv (rank interleaving) is independent of memory * controller interleaving; it is only within a memory controller. * Must use superbank interleaving if rank interleaving is used and * memory controller interleaving is enabled. */ /* * 0 = no * 0x40 = CS0,CS1 * 0x20 = CS2,CS3 * 0x60 = CS0,CS1 + CS2,CS3 * 0x04 = CS0,CS1,CS2,CS3 */ popts->ba_intlv_ctl = 0; /* Memory Organization Parameters */ popts->registered_dimm_en = all_DIMMs_registered; /* Operational Mode Paramters */ /* Pick ECC modes */ #ifdef CONFIG_DDR_ECC popts->ECC_mode = 1; /* 0 = disabled, 1 = enabled */ #else popts->ECC_mode = 0; /* 0 = disabled, 1 = enabled */ #endif popts->ECC_init_using_memctl = 1; /* 0 = use DMA, 1 = use memctl */ /* * Choose DQS config * 0 for DDR1 * 1 for DDR2 */ #if defined(CONFIG_FSL_DDR1) popts->DQS_config = 0; #elif defined(CONFIG_FSL_DDR2) || defined(CONFIG_FSL_DDR3) popts->DQS_config = 1; #endif /* Choose self-refresh during sleep. */ popts->self_refresh_in_sleep = 1; /* Choose dynamic power management mode. */ popts->dynamic_power = 0; /* 0 = 64-bit, 1 = 32-bit, 2 = 16-bit */ popts->data_bus_width = 0; /* Choose burst length. */ #if defined(CONFIG_FSL_DDR3) #if defined(CONFIG_E500MC) popts->OTF_burst_chop_en = 0; /* on-the-fly burst chop disable */ popts->burst_length = DDR_BL8; /* Fixed 8-beat burst len */ #else popts->OTF_burst_chop_en = 1; /* on-the-fly burst chop */ popts->burst_length = DDR_OTF; /* on-the-fly BC4 and BL8 */ #endif #else popts->burst_length = DDR_BL4; /* has to be 4 for DDR2 */ #endif /* Choose ddr controller address mirror mode */ #if defined(CONFIG_FSL_DDR3) popts->mirrored_dimm = pdimm[0].mirrored_dimm; #endif /* Global Timing Parameters. */ debug("mclk_ps = %u ps\n", get_memory_clk_period_ps()); /* Pick a caslat override. */ popts->cas_latency_override = 0; popts->cas_latency_override_value = 3; if (popts->cas_latency_override) { debug("using caslat override value = %u\n", popts->cas_latency_override_value); } /* Decide whether to use the computed derated latency */ popts->use_derated_caslat = 0; /* Choose an additive latency. */ popts->additive_latency_override = 0; popts->additive_latency_override_value = 3; if (popts->additive_latency_override) { debug("using additive latency override value = %u\n", popts->additive_latency_override_value); } /* * 2T_EN setting * * Factors to consider for 2T_EN: * - number of DIMMs installed * - number of components, number of active ranks * - how much time you want to spend playing around */ popts->twoT_en = 0; popts->threeT_en = 0; /* * BSTTOPRE precharge interval * * Set this to 0 for global auto precharge * * FIXME: Should this be configured in picoseconds? * Why it should be in ps: better understanding of this * relative to actual DRAM timing parameters such as tRAS. * e.g. tRAS(min) = 40 ns */ popts->bstopre = 0x100; /* Minimum CKE pulse width -- tCKE(MIN) */ popts->tCKE_clock_pulse_width_ps = mclk_to_picos(FSL_DDR_MIN_TCKE_PULSE_WIDTH_DDR); /* * Window for four activates -- tFAW * * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only * FIXME: varies depending upon number of column addresses or data * FIXME: width, was considering looking at pdimm->primary_sdram_width */ #if defined(CONFIG_FSL_DDR1) popts->tFAW_window_four_activates_ps = mclk_to_picos(1); #elif defined(CONFIG_FSL_DDR2) /* * x4/x8; some datasheets have 35000 * x16 wide columns only? Use 50000? */ popts->tFAW_window_four_activates_ps = 37500; #elif defined(CONFIG_FSL_DDR3) popts->tFAW_window_four_activates_ps = pdimm[0].tFAW_ps; #endif popts->zq_en = 0; popts->wrlvl_en = 0; #if defined(CONFIG_FSL_DDR3) /* * due to ddr3 dimm is fly-by topology * we suggest to enable write leveling to * meet the tQDSS under different loading. */ popts->wrlvl_en = 1; popts->wrlvl_override = 0; #endif /* * Check interleaving configuration from environment. * Please refer to doc/README.fsl-ddr for the detail. * * If memory controller interleaving is enabled, then the data * bus widths must be programmed identically for the 2 memory * controllers. * * XXX: Attempt to set both controllers to the same chip select * interleaving mode. It will do a best effort to get the * requested ranks interleaved together such that the result * should be a subset of the requested configuration. */ #if (CONFIG_NUM_DDR_CONTROLLERS > 1) if ((p = getenv("memctl_intlv_ctl")) != NULL) { if (pdimm[0].n_ranks == 0) { printf("There is no rank on CS0. Because only rank on " "CS0 and ranks chip-select interleaved with CS0" " are controller interleaved, force non memory " "controller interleaving\n"); popts->memctl_interleaving = 0; } else { popts->memctl_interleaving = 1; if (strcmp(p, "cacheline") == 0) popts->memctl_interleaving_mode = FSL_DDR_CACHE_LINE_INTERLEAVING; else if (strcmp(p, "page") == 0) popts->memctl_interleaving_mode = FSL_DDR_PAGE_INTERLEAVING; else if (strcmp(p, "bank") == 0) popts->memctl_interleaving_mode = FSL_DDR_BANK_INTERLEAVING; else if (strcmp(p, "superbank") == 0) popts->memctl_interleaving_mode = FSL_DDR_SUPERBANK_INTERLEAVING; else popts->memctl_interleaving_mode = simple_strtoul(p, NULL, 0); } } #endif if( ((p = getenv("ba_intlv_ctl")) != NULL) && (CONFIG_CHIP_SELECTS_PER_CTRL > 1)) { if (strcmp(p, "cs0_cs1") == 0) popts->ba_intlv_ctl = FSL_DDR_CS0_CS1; else if (strcmp(p, "cs2_cs3") == 0) popts->ba_intlv_ctl = FSL_DDR_CS2_CS3; else if (strcmp(p, "cs0_cs1_and_cs2_cs3") == 0) popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3; else if (strcmp(p, "cs0_cs1_cs2_cs3") == 0) popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3; else popts->ba_intlv_ctl = simple_strtoul(p, NULL, 0); switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) { case FSL_DDR_CS0_CS1_CS2_CS3: case FSL_DDR_CS0_CS1: if (pdimm[0].n_ranks != 2) { popts->ba_intlv_ctl = 0; printf("Not enough bank(chip-select) for " "CS0+CS1, force non-interleaving!\n"); } break; case FSL_DDR_CS2_CS3: if (pdimm[1].n_ranks !=2) { popts->ba_intlv_ctl = 0; printf("Not enough bank(CS) for CS2+CS3, " "force non-interleaving!\n"); } break; case FSL_DDR_CS0_CS1_AND_CS2_CS3: if ((pdimm[0].n_ranks != 2)||(pdimm[1].n_ranks != 2)) { popts->ba_intlv_ctl = 0; printf("Not enough bank(CS) for CS0+CS1 or " "CS2+CS3, force non-interleaving!\n"); } break; default: popts->ba_intlv_ctl = 0; break; } } fsl_ddr_board_options(popts, pdimm, ctrl_num); return 0; }
/* DDR SDRAM Mode configuration set (DDR_SDRAM_MODE) */ static void set_ddr_sdram_mode(fsl_ddr_cfg_regs_t *ddr, const memctl_options_t *popts, const common_timing_params_t *common_dimm, unsigned int cas_latency, unsigned int additive_latency) { unsigned short esdmode; /* Extended SDRAM mode */ unsigned short sdmode; /* SDRAM mode */ /* * FIXME: This ought to be pre-calculated in a * technology-specific routine, * e.g. compute_DDR2_mode_register(), and then the * sdmode and esdmode passed in as part of common_dimm. */ /* Extended Mode Register */ unsigned int mrs = 0; /* Mode Register Set */ unsigned int outputs = 0; /* 0=Enabled, 1=Disabled */ unsigned int rdqs_en = 0; /* RDQS Enable: 0=no, 1=yes */ unsigned int dqs_en = 0; /* DQS# Enable: 0=enable, 1=disable */ unsigned int ocd = 0; /* 0x0=OCD not supported, 0x7=OCD default state */ unsigned int rtt; unsigned int al; /* Posted CAS# additive latency (AL) */ unsigned int ods = 0; /* Output Drive Strength: 0 = Full strength (18ohm) 1 = Reduced strength (4ohm) */ unsigned int dll_en = 0; /* DLL Enable 0=Enable (Normal), 1=Disable (Test/Debug) */ /* Mode Register (MR) */ unsigned int mr; /* Mode Register Definition */ unsigned int pd; /* Power-Down Mode */ unsigned int wr; /* Write Recovery */ unsigned int dll_res; /* DLL Reset */ unsigned int mode; /* Normal=0 or Test=1 */ unsigned int caslat = 0;/* CAS# latency */ /* BT: Burst Type (0=Sequential, 1=Interleaved) */ unsigned int bt; unsigned int bl; /* BL: Burst Length */ #if defined(CONFIG_FSL_DDR2) const unsigned int mclk_ps = get_memory_clk_period_ps(); #endif rtt = fsl_ddr_get_rtt(); al = additive_latency; esdmode = (0 | ((mrs & 0x3) << 14) | ((outputs & 0x1) << 12) | ((rdqs_en & 0x1) << 11) | ((dqs_en & 0x1) << 10) | ((ocd & 0x7) << 7) | ((rtt & 0x2) << 5) /* rtt field is split */ | ((al & 0x7) << 3) | ((rtt & 0x1) << 2) /* rtt field is split */ | ((ods & 0x1) << 1) | ((dll_en & 0x1) << 0) ); mr = 0; /* FIXME: CHECKME */ /* * 0 = Fast Exit (Normal) * 1 = Slow Exit (Low Power) */ pd = 0; #if defined(CONFIG_FSL_DDR1) wr = 0; /* Historical */ #elif defined(CONFIG_FSL_DDR2) wr = (common_dimm->tWR_ps + mclk_ps - 1) / mclk_ps - 1; #else #error "Write tWR_auto for DDR3" #endif dll_res = 0; mode = 0; #if defined(CONFIG_FSL_DDR1) if (1 <= cas_latency && cas_latency <= 4) { unsigned char mode_caslat_table[4] = { 0x5, /* 1.5 clocks */ 0x2, /* 2.0 clocks */ 0x6, /* 2.5 clocks */ 0x3 /* 3.0 clocks */ }; caslat = mode_caslat_table[cas_latency - 1]; } else { printf("Warning: unknown cas_latency %d\n", cas_latency); } #elif defined(CONFIG_FSL_DDR2) caslat = cas_latency; #else #error "Fix the mode CAS Latency for DDR3" #endif bt = 0; switch (popts->burst_length) { case 4: bl = 2; break; case 8: bl = 3; break; default: printf("Error: invalid burst length of %u specified. " " Defaulting to 4 beats.\n", popts->burst_length); bl = 2; break; } sdmode = (0 | ((mr & 0x3) << 14) | ((pd & 0x1) << 12) | ((wr & 0x7) << 9) | ((dll_res & 0x1) << 8) | ((mode & 0x1) << 7) | ((caslat & 0x7) << 4) | ((bt & 0x1) << 3) | ((bl & 0x7) << 0) ); ddr->ddr_sdram_mode = (0 | ((esdmode & 0xFFFF) << 16) | ((sdmode & 0xFFFF) << 0) ); }