summaryrefslogtreecommitdiff
path: root/common
diff options
context:
space:
mode:
authorSascha Hauer <s.hauer@pengutronix.de>2023-02-23 16:07:24 +0100
committerSascha Hauer <s.hauer@pengutronix.de>2023-02-23 16:07:24 +0100
commitc4237bc715c68e871b5aef84fa0d76e770b20dbf (patch)
treeea44dc6abe4768557c9438453a974539569cdec0 /common
parent7d49087043214b98fb89134f393433685abe454b (diff)
parent28d261712db0195335e4b774b1ed88216d1229d9 (diff)
downloadbarebox-c4237bc715c68e871b5aef84fa0d76e770b20dbf.tar.gz
Merge branch 'for-next/imx'
Diffstat (limited to 'common')
-rw-r--r--common/Makefile4
-rw-r--r--common/ddr1_dimm_params.c323
-rw-r--r--common/ddr2_dimm_params.c326
-rw-r--r--common/ddr3_dimm_params.c328
-rw-r--r--common/ddr4_dimm_params.c355
-rw-r--r--common/ddr_spd.c17
6 files changed, 1347 insertions, 6 deletions
diff --git a/common/Makefile b/common/Makefile
index bd5b50bc3b..8dc475f324 100644
--- a/common/Makefile
+++ b/common/Makefile
@@ -33,6 +33,10 @@ obj-$(CONFIG_CONSOLE_FULL) += console.o
obj-$(CONFIG_CONSOLE_SIMPLE) += console_simple.o
obj-y += console_countdown.o
obj-pbl-$(CONFIG_DDR_SPD) += ddr_spd.o
+obj-pbl-$(CONFIG_DDR_SPD) += ddr1_dimm_params.o
+obj-pbl-$(CONFIG_DDR_SPD) += ddr2_dimm_params.o
+obj-pbl-$(CONFIG_DDR_SPD) += ddr3_dimm_params.o
+obj-pbl-$(CONFIG_DDR_SPD) += ddr4_dimm_params.o
obj-$(CONFIG_ENV_HANDLING) += environment.o envfs-core.o
obj-$(CONFIG_DEFAULT_ENVIRONMENT) += envfs-core.o
obj-$(CONFIG_ENVIRONMENT_VARIABLES) += env.o
diff --git a/common/ddr1_dimm_params.c b/common/ddr1_dimm_params.c
new file mode 100644
index 0000000000..4a7db30e2e
--- /dev/null
+++ b/common/ddr1_dimm_params.c
@@ -0,0 +1,323 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ */
+#include <common.h>
+#include <linux/log2.h>
+#include <ddr_dimms.h>
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * Study these table from Byte 31 of JEDEC SPD Spec.
+ *
+ * DDR I DDR II
+ * Bit Size Size
+ * --- ----- ------
+ * 7 high 512MB 512MB
+ * 6 256MB 256MB
+ * 5 128MB 128MB
+ * 4 64MB 16GB
+ * 3 32MB 8GB
+ * 2 16MB 4GB
+ * 1 2GB 2GB
+ * 0 low 1GB 1GB
+ *
+ * Reorder Table to be linear by stripping the bottom
+ * 2 or 5 bits off and shifting them up to the top.
+ */
+
+static unsigned long long
+compute_ranksize(unsigned int mem_type, unsigned char row_dens)
+{
+ unsigned long long bsize;
+
+ /* Bottom 2 bits up to the top. */
+ bsize = ((row_dens >> 2) | ((row_dens & 3) << 6));
+ bsize <<= 24ULL;
+ debug("DDR: DDR I rank density = 0x%16llx\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * Convert a two-nibble BCD value into a cycle time.
+ * While the spec calls for nano-seconds, picos are returned.
+ *
+ * This implements the tables for bytes 9, 23 and 25 for both
+ * DDR I and II. No allowance for distinguishing the invalid
+ * fields absent for DDR I yet present in DDR II is made.
+ * (That is, cycle times of .25, .33, .66 and .75 ns are
+ * allowed for both DDR II and I.)
+ */
+static unsigned int
+convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
+{
+ /* Table look up the lower nibble, allow DDR I & II. */
+ unsigned int tenths_ps[16] = {
+ 0,
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 250, /* This and the next 3 entries valid ... */
+ 330, /* ... only for tCK calculations. */
+ 660,
+ 750,
+ 0, /* undefined */
+ 0 /* undefined */
+ };
+
+ unsigned int whole_ns = (spd_val & 0xF0) >> 4;
+ unsigned int tenth_ns = spd_val & 0x0F;
+ unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
+
+ return ps;
+}
+
+static unsigned int
+convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val)
+{
+ unsigned int tenth_ns = (spd_val & 0xF0) >> 4;
+ unsigned int hundredth_ns = spd_val & 0x0F;
+ unsigned int ps = tenth_ns * 100 + hundredth_ns * 10;
+
+ return ps;
+}
+
+static unsigned int byte40_table_ps[8] = {
+ 0,
+ 250,
+ 330,
+ 500,
+ 660,
+ 750,
+ 0, /* supposed to be RFC, but not sure what that means */
+ 0 /* Undefined */
+};
+
+static unsigned int
+compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc)
+{
+ return ((trctrfc_ext & 0x1) * 256 + trfc) * 1000
+ + byte40_table_ps[(trctrfc_ext >> 1) & 0x7];
+}
+
+static unsigned int
+compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc)
+{
+ return trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7];
+}
+
+/*
+ * tCKmax from DDR I SPD Byte 43
+ *
+ * Bits 7:2 == whole ns
+ * Bits 1:0 == quarter ns
+ * 00 == 0.00 ns
+ * 01 == 0.25 ns
+ * 10 == 0.50 ns
+ * 11 == 0.75 ns
+ *
+ * Returns picoseconds.
+ */
+static unsigned int
+compute_tckmax_from_spd_ps(unsigned int byte43)
+{
+ return (byte43 >> 2) * 1000 + (byte43 & 0x3) * 250;
+}
+
+/*
+ * Determine Refresh Rate. Ignore self refresh bit on DDR I.
+ * Table from SPD Spec, Byte 12, converted to picoseconds and
+ * filled in with "default" normal values.
+ */
+static unsigned int
+determine_refresh_rate_ps(const unsigned int spd_refresh)
+{
+ unsigned int refresh_time_ps[8] = {
+ 15625000, /* 0 Normal 1.00x */
+ 3900000, /* 1 Reduced .25x */
+ 7800000, /* 2 Extended .50x */
+ 31300000, /* 3 Extended 2.00x */
+ 62500000, /* 4 Extended 4.00x */
+ 125000000, /* 5 Extended 8.00x */
+ 15625000, /* 6 Normal 1.00x filler */
+ 15625000, /* 7 Normal 1.00x filler */
+ };
+
+ return refresh_time_ps[spd_refresh & 0x7];
+}
+
+/*
+ * The purpose of this function is to compute a suitable
+ * CAS latency given the DRAM clock period. The SPD only
+ * defines at most 3 CAS latencies. Typically the slower in
+ * frequency the DIMM runs at, the shorter its CAS latency can be.
+ * If the DIMM is operating at a sufficiently low frequency,
+ * it may be able to run at a CAS latency shorter than the
+ * shortest SPD-defined CAS latency.
+ *
+ * If a CAS latency is not found, 0 is returned.
+ *
+ * Do this by finding in the standard speed bin table the longest
+ * tCKmin that doesn't exceed the value of mclk_ps (tCK).
+ *
+ * An assumption made is that the SDRAM device allows the
+ * CL to be programmed for a value that is lower than those
+ * advertised by the SPD. This is not always the case,
+ * as those modes not defined in the SPD are optional.
+ *
+ * CAS latency de-rating based upon values JEDEC Standard No. 79-E
+ * Table 11.
+ *
+ * ordinal 2, ddr1_speed_bins[1] contains tCK for CL=2
+ */
+ /* CL2.0 CL2.5 CL3.0 */
+unsigned short ddr1_speed_bins[] = {0, 7500, 6000, 5000 };
+
+static unsigned int
+compute_derated_DDR1_CAS_latency(unsigned int mclk_ps)
+{
+ const unsigned int num_speed_bins = ARRAY_SIZE(ddr1_speed_bins);
+ unsigned int lowest_tCKmin_found = 0;
+ unsigned int lowest_tCKmin_CL = 0;
+ unsigned int i;
+
+ debug("mclk_ps = %u\n", mclk_ps);
+
+ for (i = 0; i < num_speed_bins; i++) {
+ unsigned int x = ddr1_speed_bins[i];
+ debug("i=%u, x = %u, lowest_tCKmin_found = %u\n",
+ i, x, lowest_tCKmin_found);
+ if (x && lowest_tCKmin_found <= x && x <= mclk_ps) {
+ lowest_tCKmin_found = x;
+ lowest_tCKmin_CL = i + 1;
+ }
+ }
+
+ debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL);
+
+ return lowest_tCKmin_CL;
+}
+
+/*
+ * ddr1_compute_dimm_parameters for DDR1 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the struct dimm_params structure pointed by pdimm.
+ *
+ * FIXME: use #define for the retvals
+ */
+unsigned int ddr1_compute_dimm_parameters(unsigned int mclk_ps,
+ const struct ddr1_spd_eeprom *spd,
+ struct dimm_params *pdimm)
+{
+ int ret;
+
+ if (spd->mem_type != SPD_MEMTYPE_DDR) {
+ printf("DIMM: SPD data is not DDR1\n");
+ return 3;
+ }
+
+ ret = ddr1_spd_check(spd);
+ if (ret) {
+ printf("DIMM: failed checksum\n");
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = spd->nrows;
+ pdimm->rank_density = compute_ranksize(spd->mem_type, spd->bank_dens);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->data_width = spd->dataw_lsb;
+ pdimm->primary_sdram_width = spd->primw;
+ pdimm->ec_sdram_width = spd->ecw;
+
+ /*
+ * FIXME: Need to determine registered_dimm status.
+ * 1 == register buffered
+ * 0 == unbuffered
+ */
+ pdimm->registered_dimm = 0; /* unbuffered */
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = spd->nrow_addr;
+ pdimm->n_col_addr = spd->ncol_addr;
+ pdimm->n_banks_per_sdram_device = spd->nbanks;
+ pdimm->edc_config = spd->config;
+ pdimm->burst_lengths_bitmask = spd->burstl;
+
+ /*
+ * Calculate the Maximum Data Rate based on the Minimum Cycle time.
+ * The SPD clk_cycle field (tCKmin) is measured in tenths of
+ * nanoseconds and represented as BCD.
+ */
+ pdimm->tckmin_x_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle);
+ pdimm->tckmin_x_minus_1_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2);
+ pdimm->tckmin_x_minus_2_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3);
+
+ pdimm->tckmax_ps = compute_tckmax_from_spd_ps(spd->tckmax);
+
+ /*
+ * Compute CAS latencies defined by SPD
+ * The SPD caslat_x should have at least 1 and at most 3 bits set.
+ *
+ * If cas_lat after masking is 0, the __ilog2 function returns
+ * 255 into the variable. This behavior is abused once.
+ */
+ pdimm->caslat_x = ilog2(spd->cas_lat);
+ pdimm->caslat_x_minus_1 = ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_x));
+ pdimm->caslat_x_minus_2 = ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_x)
+ & ~(1 << pdimm->caslat_x_minus_1));
+
+ /* Compute CAS latencies below that defined by SPD */
+ pdimm->caslat_lowest_derated = compute_derated_DDR1_CAS_latency(
+ mclk_ps);
+
+ /* Compute timing parameters */
+ pdimm->trcd_ps = spd->trcd * 250;
+ pdimm->trp_ps = spd->trp * 250;
+ pdimm->tras_ps = spd->tras * 1000;
+
+ pdimm->twr_ps = mclk_ps * 3;
+ pdimm->twtr_ps = mclk_ps * 1;
+ pdimm->trfc_ps = compute_trfc_ps_from_spd(0, spd->trfc);
+
+ pdimm->trrd_ps = spd->trrd * 250;
+ pdimm->trc_ps = compute_trc_ps_from_spd(0, spd->trc);
+
+ pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh);
+
+ pdimm->tis_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup);
+ pdimm->tih_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold);
+ pdimm->tds_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup);
+ pdimm->tdh_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold);
+
+ pdimm->trtp_ps = mclk_ps * 2; /* By the book. */
+ pdimm->tdqsq_max_ps = spd->tdqsq * 10;
+ pdimm->tqhs_ps = spd->tqhs * 10;
+
+ return 0;
+}
diff --git a/common/ddr2_dimm_params.c b/common/ddr2_dimm_params.c
new file mode 100644
index 0000000000..b9c0922385
--- /dev/null
+++ b/common/ddr2_dimm_params.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008 Freescale Semiconductor, Inc.
+ */
+
+#include <common.h>
+#include <linux/log2.h>
+#include <ddr_dimms.h>
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * Study these table from Byte 31 of JEDEC SPD Spec.
+ *
+ * DDR I DDR II
+ * Bit Size Size
+ * --- ----- ------
+ * 7 high 512MB 512MB
+ * 6 256MB 256MB
+ * 5 128MB 128MB
+ * 4 64MB 16GB
+ * 3 32MB 8GB
+ * 2 16MB 4GB
+ * 1 2GB 2GB
+ * 0 low 1GB 1GB
+ *
+ * Reorder Table to be linear by stripping the bottom
+ * 2 or 5 bits off and shifting them up to the top.
+ *
+ */
+static unsigned long long
+compute_ranksize(unsigned int mem_type, unsigned char row_dens)
+{
+ unsigned long long bsize;
+
+ /* Bottom 5 bits up to the top. */
+ bsize = ((row_dens >> 5) | ((row_dens & 31) << 3));
+ bsize <<= 27ULL;
+ debug("DDR: DDR II rank density = 0x%16llx\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * Convert a two-nibble BCD value into a cycle time.
+ * While the spec calls for nano-seconds, picos are returned.
+ *
+ * This implements the tables for bytes 9, 23 and 25 for both
+ * DDR I and II. No allowance for distinguishing the invalid
+ * fields absent for DDR I yet present in DDR II is made.
+ * (That is, cycle times of .25, .33, .66 and .75 ns are
+ * allowed for both DDR II and I.)
+ */
+static unsigned int
+convert_bcd_tenths_to_cycle_time_ps(unsigned int spd_val)
+{
+ /* Table look up the lower nibble, allow DDR I & II. */
+ unsigned int tenths_ps[16] = {
+ 0,
+ 100,
+ 200,
+ 300,
+ 400,
+ 500,
+ 600,
+ 700,
+ 800,
+ 900,
+ 250, /* This and the next 3 entries valid ... */
+ 330, /* ... only for tCK calculations. */
+ 660,
+ 750,
+ 0, /* undefined */
+ 0 /* undefined */
+ };
+
+ unsigned int whole_ns = (spd_val & 0xF0) >> 4;
+ unsigned int tenth_ns = spd_val & 0x0F;
+ unsigned int ps = whole_ns * 1000 + tenths_ps[tenth_ns];
+
+ return ps;
+}
+
+static unsigned int
+convert_bcd_hundredths_to_cycle_time_ps(unsigned int spd_val)
+{
+ unsigned int tenth_ns = (spd_val & 0xF0) >> 4;
+ unsigned int hundredth_ns = spd_val & 0x0F;
+ unsigned int ps = tenth_ns * 100 + hundredth_ns * 10;
+
+ return ps;
+}
+
+static unsigned int byte40_table_ps[8] = {
+ 0,
+ 250,
+ 330,
+ 500,
+ 660,
+ 750,
+ 0, /* supposed to be RFC, but not sure what that means */
+ 0 /* Undefined */
+};
+
+static unsigned int
+compute_trfc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trfc)
+{
+ return (((trctrfc_ext & 0x1) * 256) + trfc) * 1000
+ + byte40_table_ps[(trctrfc_ext >> 1) & 0x7];
+}
+
+static unsigned int
+compute_trc_ps_from_spd(unsigned char trctrfc_ext, unsigned char trc)
+{
+ return trc * 1000 + byte40_table_ps[(trctrfc_ext >> 4) & 0x7];
+}
+
+/*
+ * Determine Refresh Rate. Ignore self refresh bit on DDR I.
+ * Table from SPD Spec, Byte 12, converted to picoseconds and
+ * filled in with "default" normal values.
+ */
+static unsigned int
+determine_refresh_rate_ps(const unsigned int spd_refresh)
+{
+ unsigned int refresh_time_ps[8] = {
+ 15625000, /* 0 Normal 1.00x */
+ 3900000, /* 1 Reduced .25x */
+ 7800000, /* 2 Extended .50x */
+ 31300000, /* 3 Extended 2.00x */
+ 62500000, /* 4 Extended 4.00x */
+ 125000000, /* 5 Extended 8.00x */
+ 15625000, /* 6 Normal 1.00x filler */
+ 15625000, /* 7 Normal 1.00x filler */
+ };
+
+ return refresh_time_ps[spd_refresh & 0x7];
+}
+
+/*
+ * The purpose of this function is to compute a suitable
+ * CAS latency given the DRAM clock period. The SPD only
+ * defines at most 3 CAS latencies. Typically the slower in
+ * frequency the DIMM runs at, the shorter its CAS latency can.
+ * be. If the DIMM is operating at a sufficiently low frequency,
+ * it may be able to run at a CAS latency shorter than the
+ * shortest SPD-defined CAS latency.
+ *
+ * If a CAS latency is not found, 0 is returned.
+ *
+ * Do this by finding in the standard speed bin table the longest
+ * tCKmin that doesn't exceed the value of mclk_ps (tCK).
+ *
+ * An assumption made is that the SDRAM device allows the
+ * CL to be programmed for a value that is lower than those
+ * advertised by the SPD. This is not always the case,
+ * as those modes not defined in the SPD are optional.
+ *
+ * CAS latency de-rating based upon values JEDEC Standard No. 79-2C
+ * Table 40, "DDR2 SDRAM stanadard speed bins and tCK, tRCD, tRP, tRAS,
+ * and tRC for corresponding bin"
+ *
+ * ordinal 2, ddr2_speed_bins[1] contains tCK for CL=3
+ * Not certain if any good value exists for CL=2
+ */
+ /* CL2 CL3 CL4 CL5 CL6 CL7*/
+static unsigned short ddr2_speed_bins[] = { 0, 5000, 3750, 3000, 2500, 1875 };
+
+static unsigned int
+compute_derated_DDR2_CAS_latency(unsigned int mclk_ps)
+{
+ const unsigned int num_speed_bins = ARRAY_SIZE(ddr2_speed_bins);
+ unsigned int lowest_tCKmin_found = 0;
+ unsigned int lowest_tCKmin_CL = 0;
+ unsigned int i;
+
+ debug("mclk_ps = %u\n", mclk_ps);
+
+ for (i = 0; i < num_speed_bins; i++) {
+ unsigned int x = ddr2_speed_bins[i];
+ debug("i=%u, x = %u, lowest_tCKmin_found = %u\n",
+ i, x, lowest_tCKmin_found);
+ if (x && x <= mclk_ps && x >= lowest_tCKmin_found ) {
+ lowest_tCKmin_found = x;
+ lowest_tCKmin_CL = i + 2;
+ }
+ }
+
+ debug("lowest_tCKmin_CL = %u\n", lowest_tCKmin_CL);
+
+ return lowest_tCKmin_CL;
+}
+
+/*
+ * ddr2_compute_dimm_parameters for DDR2 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the struct dimm_params structure pointed by pdimm.
+ *
+ * FIXME: use #define for the retvals
+ */
+unsigned int ddr2_compute_dimm_parameters(unsigned int mclk_ps,
+ const struct ddr2_spd_eeprom *spd,
+ struct dimm_params *pdimm)
+{
+ int ret;
+
+ if (spd->mem_type != SPD_MEMTYPE_DDR2 &&
+ spd->mem_type != SPD_MEMTYPE_DDR2_FBDIMM &&
+ spd->mem_type != SPD_MEMTYPE_DDR2_FBDIMM_PROBE) {
+ printf("DIMM: SPD data is not DDR2\n");
+ return 3;
+ }
+
+ ret = ddr2_spd_check(spd);
+ if (ret) {
+ printf("DIMM: failed checksum\n");
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = (spd->mod_ranks & 0x7) + 1;
+ pdimm->rank_density = compute_ranksize(spd->mem_type, spd->rank_dens);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->data_width = spd->dataw;
+ pdimm->primary_sdram_width = spd->primw;
+ pdimm->ec_sdram_width = spd->ecw;
+
+ /* These are all the types defined by the JEDEC DDR2 SPD 1.3 spec */
+ switch (spd->dimm_type) {
+ case DDR2_SPD_DIMMTYPE_RDIMM:
+ case DDR2_SPD_DIMMTYPE_72B_SO_RDIMM:
+ case DDR2_SPD_DIMMTYPE_MINI_RDIMM:
+ /* Registered/buffered DIMMs */
+ pdimm->registered_dimm = 1;
+ break;
+
+ case DDR2_SPD_DIMMTYPE_UDIMM:
+ case DDR2_SPD_DIMMTYPE_SO_DIMM:
+ case DDR2_SPD_DIMMTYPE_MICRO_DIMM:
+ case DDR2_SPD_DIMMTYPE_MINI_UDIMM:
+ /* Unbuffered DIMMs */
+ pdimm->registered_dimm = 0;
+ break;
+
+ case DDR2_SPD_DIMMTYPE_72B_SO_CDIMM:
+ default:
+ printf("unknown dimm_type 0x%02X\n", spd->dimm_type);
+ return 1;
+ }
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = spd->nrow_addr;
+ pdimm->n_col_addr = spd->ncol_addr;
+ pdimm->n_banks_per_sdram_device = spd->nbanks;
+ pdimm->edc_config = spd->config;
+ pdimm->burst_lengths_bitmask = spd->burstl;
+
+ /*
+ * Calculate the Maximum Data Rate based on the Minimum Cycle time.
+ * The SPD clk_cycle field (tCKmin) is measured in tenths of
+ * nanoseconds and represented as BCD.
+ */
+ pdimm->tckmin_x_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle);
+ pdimm->tckmin_x_minus_1_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle2);
+ pdimm->tckmin_x_minus_2_ps
+ = convert_bcd_tenths_to_cycle_time_ps(spd->clk_cycle3);
+
+ pdimm->tckmax_ps = convert_bcd_tenths_to_cycle_time_ps(spd->tckmax);
+
+ /*
+ * Compute CAS latencies defined by SPD
+ * The SPD caslat_x should have at least 1 and at most 3 bits set.
+ *
+ * If cas_lat after masking is 0, the __ilog2 function returns
+ * 255 into the variable. This behavior is abused once.
+ */
+ pdimm->caslat_x = ilog2(spd->cas_lat);
+ pdimm->caslat_x_minus_1 = ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_x));
+ pdimm->caslat_x_minus_2 = ilog2(spd->cas_lat
+ & ~(1 << pdimm->caslat_x)
+ & ~(1 << pdimm->caslat_x_minus_1));
+
+ /* Compute CAS latencies below that defined by SPD */
+ pdimm->caslat_lowest_derated = compute_derated_DDR2_CAS_latency(
+ mclk_ps);
+
+ /* Compute timing parameters */
+ pdimm->trcd_ps = spd->trcd * 250;
+ pdimm->trp_ps = spd->trp * 250;
+ pdimm->tras_ps = spd->tras * 1000;
+
+ pdimm->twr_ps = spd->twr * 250;
+ pdimm->twtr_ps = spd->twtr * 250;
+ pdimm->trfc_ps = compute_trfc_ps_from_spd(spd->trctrfc_ext, spd->trfc);
+
+ pdimm->trrd_ps = spd->trrd * 250;
+ pdimm->trc_ps = compute_trc_ps_from_spd(spd->trctrfc_ext, spd->trc);
+
+ pdimm->refresh_rate_ps = determine_refresh_rate_ps(spd->refresh);
+
+ pdimm->tis_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_setup);
+ pdimm->tih_ps = convert_bcd_hundredths_to_cycle_time_ps(spd->ca_hold);
+ pdimm->tds_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_setup);
+ pdimm->tdh_ps
+ = convert_bcd_hundredths_to_cycle_time_ps(spd->data_hold);
+
+ pdimm->trtp_ps = spd->trtp * 250;
+ pdimm->tdqsq_max_ps = spd->tdqsq * 10;
+ pdimm->tqhs_ps = spd->tqhs * 10;
+
+ return 0;
+}
diff --git a/common/ddr3_dimm_params.c b/common/ddr3_dimm_params.c
new file mode 100644
index 0000000000..1b7512a275
--- /dev/null
+++ b/common/ddr3_dimm_params.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2008-2012 Freescale Semiconductor, Inc.
+ * Dave Liu <daveliu@freescale.com>
+ *
+ * calculate the organization and timing parameter
+ * from ddr3 spd, please refer to the spec
+ * JEDEC standard No.21-C 4_01_02_11R18.pdf
+ */
+
+#include <common.h>
+#include <ddr_dimms.h>
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * each rank size =
+ * sdram capacity(bit) / 8 * primary bus width / sdram width
+ *
+ * where: sdram capacity = spd byte4[3:0]
+ * primary bus width = spd byte8[2:0]
+ * sdram width = spd byte7[2:0]
+ *
+ * SPD byte4 - sdram density and banks
+ * bit[3:0] size(bit) size(byte)
+ * 0000 256Mb 32MB
+ * 0001 512Mb 64MB
+ * 0010 1Gb 128MB
+ * 0011 2Gb 256MB
+ * 0100 4Gb 512MB
+ * 0101 8Gb 1GB
+ * 0110 16Gb 2GB
+ *
+ * SPD byte8 - module memory bus width
+ * bit[2:0] primary bus width
+ * 000 8bits
+ * 001 16bits
+ * 010 32bits
+ * 011 64bits
+ *
+ * SPD byte7 - module organiztion
+ * bit[2:0] sdram device width
+ * 000 4bits
+ * 001 8bits
+ * 010 16bits
+ * 011 32bits
+ *
+ */
+static unsigned long long
+compute_ranksize(const struct ddr3_spd_eeprom *spd)
+{
+ unsigned long long bsize;
+
+ int nbit_sdram_cap_bsize = 0;
+ int nbit_primary_bus_width = 0;
+ int nbit_sdram_width = 0;
+
+ if ((spd->density_banks & 0xf) < 7)
+ nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28;
+ if ((spd->bus_width & 0x7) < 4)
+ nbit_primary_bus_width = (spd->bus_width & 0x7) + 3;
+ if ((spd->organization & 0x7) < 4)
+ nbit_sdram_width = (spd->organization & 0x7) + 2;
+
+ bsize = 1ULL << (nbit_sdram_cap_bsize - 3
+ + nbit_primary_bus_width - nbit_sdram_width);
+
+ debug("DDR: DDR III rank density = 0x%16llx\n", bsize);
+
+ return bsize;
+}
+
+/*
+ * ddr3_compute_dimm_parameters for DDR3 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the struct dimm_params structure pointed by pdimm.
+ *
+ */
+unsigned int ddr3_compute_dimm_parameters(const struct ddr3_spd_eeprom *spd,
+ struct dimm_params *pdimm)
+{
+ int ret;
+ unsigned int mtb_ps;
+ int ftb_10th_ps;
+ int i;
+
+ if (spd->mem_type != SPD_MEMTYPE_DDR3) {
+ printf("DIMM: SPD data is not DDR3\n");
+ return 3;
+ }
+
+ ret = ddr3_spd_check(spd);
+ if (ret) {
+ printf("DIMM: failed checksum\n");
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ if ((spd->info_size_crc & 0xF) > 1)
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1;
+ pdimm->rank_density = compute_ranksize(spd);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7));
+ if ((spd->bus_width >> 3) & 0x3)
+ pdimm->ec_sdram_width = 8;
+ else
+ pdimm->ec_sdram_width = 0;
+ pdimm->data_width = pdimm->primary_sdram_width
+ + pdimm->ec_sdram_width;
+ pdimm->device_width = 1 << ((spd->organization & 0x7) + 2);
+
+ /* These are the types defined by the JEDEC DDR3 SPD spec */
+ pdimm->mirrored_dimm = 0;
+ pdimm->registered_dimm = 0;
+ switch (spd->module_type & DDR3_SPD_MODULETYPE_MASK) {
+ case DDR3_SPD_MODULETYPE_RDIMM:
+ case DDR3_SPD_MODULETYPE_MINI_RDIMM:
+ case DDR3_SPD_MODULETYPE_72B_SO_RDIMM:
+ /* Registered/buffered DIMMs */
+ pdimm->registered_dimm = 1;
+ for (i = 0; i < 16; i += 2) {
+ u8 rcw = spd->mod_section.registered.rcw[i/2];
+ pdimm->rcw[i] = (rcw >> 0) & 0x0F;
+ pdimm->rcw[i+1] = (rcw >> 4) & 0x0F;
+ }
+ break;
+
+ case DDR3_SPD_MODULETYPE_UDIMM:
+ case DDR3_SPD_MODULETYPE_SO_DIMM:
+ case DDR3_SPD_MODULETYPE_MICRO_DIMM:
+ case DDR3_SPD_MODULETYPE_MINI_UDIMM:
+ case DDR3_SPD_MODULETYPE_MINI_CDIMM:
+ case DDR3_SPD_MODULETYPE_72B_SO_UDIMM:
+ case DDR3_SPD_MODULETYPE_72B_SO_CDIMM:
+ case DDR3_SPD_MODULETYPE_LRDIMM:
+ case DDR3_SPD_MODULETYPE_16B_SO_DIMM:
+ case DDR3_SPD_MODULETYPE_32B_SO_DIMM:
+ /* Unbuffered DIMMs */
+ if (spd->mod_section.unbuffered.addr_mapping & 0x1)
+ pdimm->mirrored_dimm = 1;
+ break;
+
+ default:
+ printf("unknown module_type 0x%02X\n", spd->module_type);
+ return 1;
+ }
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12;
+ pdimm->n_col_addr = (spd->addressing & 0x7) + 9;
+ pdimm->n_banks_per_sdram_device = 8 << ((spd->density_banks >> 4) & 0x7);
+
+ /*
+ * The SPD spec has not the ECC bit,
+ * We consider the DIMM as ECC capability
+ * when the extension bus exist
+ */
+ if (pdimm->ec_sdram_width)
+ pdimm->edc_config = 0x02;
+ else
+ pdimm->edc_config = 0x00;
+
+ /*
+ * The SPD spec has not the burst length byte
+ * but DDR3 spec has nature BL8 and BC4,
+ * BL8 -bit3, BC4 -bit2
+ */
+ pdimm->burst_lengths_bitmask = 0x0c;
+
+ /* MTB - medium timebase
+ * The unit in the SPD spec is ns,
+ * We convert it to ps.
+ * eg: MTB = 0.125ns (125ps)
+ */
+ mtb_ps = (spd->mtb_dividend * 1000) /spd->mtb_divisor;
+ pdimm->mtb_ps = mtb_ps;
+
+ /*
+ * FTB - fine timebase
+ * use 1/10th of ps as our unit to avoid floating point
+ * eg, 10 for 1ps, 25 for 2.5ps, 50 for 5ps
+ */
+ ftb_10th_ps =
+ ((spd->ftb_div & 0xf0) >> 4) * 10 / (spd->ftb_div & 0x0f);
+ pdimm->ftb_10th_ps = ftb_10th_ps;
+ /*
+ * sdram minimum cycle time
+ * we assume the MTB is 0.125ns
+ * eg:
+ * tck_min=15 MTB (1.875ns) ->DDR3-1066
+ * =12 MTB (1.5ns) ->DDR3-1333
+ * =10 MTB (1.25ns) ->DDR3-1600
+ */
+ pdimm->tckmin_x_ps = spd->tck_min * mtb_ps +
+ (spd->fine_tck_min * ftb_10th_ps) / 10;
+
+ /*
+ * CAS latency supported
+ * bit4 - CL4
+ * bit5 - CL5
+ * bit18 - CL18
+ */
+ pdimm->caslat_x = ((spd->caslat_msb << 8) | spd->caslat_lsb) << 4;
+
+ /*
+ * min CAS latency time
+ * eg: taa_min =
+ * DDR3-800D 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25ns)
+ */
+ pdimm->taa_ps = spd->taa_min * mtb_ps +
+ (spd->fine_taa_min * ftb_10th_ps) / 10;
+
+ /*
+ * min write recovery time
+ * eg:
+ * twr_min = 120 MTB (15ns) -> all speed grades.
+ */
+ pdimm->twr_ps = spd->twr_min * mtb_ps;
+
+ /*
+ * min RAS to CAS delay time
+ * eg: trcd_min =
+ * DDR3-800 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25)
+ */
+ pdimm->trcd_ps = spd->trcd_min * mtb_ps +
+ (spd->fine_trcd_min * ftb_10th_ps) / 10;
+
+ /*
+ * min row active to row active delay time
+ * eg: trrd_min =
+ * DDR3-800(1KB page) 80 MTB (10ns)
+ * DDR3-1333(1KB page) 48 MTB (6ns)
+ */
+ pdimm->trrd_ps = spd->trrd_min * mtb_ps;
+
+ /*
+ * min row precharge delay time
+ * eg: trp_min =
+ * DDR3-800D 100 MTB (12.5ns)
+ * DDR3-1066F 105 MTB (13.125ns)
+ * DDR3-1333H 108 MTB (13.5ns)
+ * DDR3-1600H 90 MTB (11.25ns)
+ */
+ pdimm->trp_ps = spd->trp_min * mtb_ps +
+ (spd->fine_trp_min * ftb_10th_ps) / 10;
+
+ /* min active to precharge delay time
+ * eg: tRAS_min =
+ * DDR3-800D 300 MTB (37.5ns)
+ * DDR3-1066F 300 MTB (37.5ns)
+ * DDR3-1333H 288 MTB (36ns)
+ * DDR3-1600H 280 MTB (35ns)
+ */
+ pdimm->tras_ps = (((spd->tras_trc_ext & 0xf) << 8) | spd->tras_min_lsb)
+ * mtb_ps;
+ /*
+ * min active to actice/refresh delay time
+ * eg: tRC_min =
+ * DDR3-800D 400 MTB (50ns)
+ * DDR3-1066F 405 MTB (50.625ns)
+ * DDR3-1333H 396 MTB (49.5ns)
+ * DDR3-1600H 370 MTB (46.25ns)
+ */
+ pdimm->trc_ps = (((spd->tras_trc_ext & 0xf0) << 4) | spd->trc_min_lsb)
+ * mtb_ps + (spd->fine_trc_min * ftb_10th_ps) / 10;
+ /*
+ * min refresh recovery delay time
+ * eg: tRFC_min =
+ * 512Mb 720 MTB (90ns)
+ * 1Gb 880 MTB (110ns)
+ * 2Gb 1280 MTB (160ns)
+ */
+ pdimm->trfc_ps = ((spd->trfc_min_msb << 8) | spd->trfc_min_lsb)
+ * mtb_ps;
+ /*
+ * min internal write to read command delay time
+ * eg: twtr_min = 40 MTB (7.5ns) - all speed bins.
+ * tWRT is at least 4 mclk independent of operating freq.
+ */
+ pdimm->twtr_ps = spd->twtr_min * mtb_ps;
+
+ /*
+ * min internal read to precharge command delay time
+ * eg: trtp_min = 40 MTB (7.5ns) - all speed bins.
+ * tRTP is at least 4 mclk independent of operating freq.
+ */
+ pdimm->trtp_ps = spd->trtp_min * mtb_ps;
+
+ /*
+ * Average periodic refresh interval
+ * tREFI = 7.8 us at normal temperature range
+ * = 3.9 us at ext temperature range
+ */
+ pdimm->refresh_rate_ps = 7800000;
+ if ((spd->therm_ref_opt & 0x1) && !(spd->therm_ref_opt & 0x2)) {
+ pdimm->refresh_rate_ps = 3900000;
+ pdimm->extended_op_srt = 1;
+ }
+
+ /*
+ * min four active window delay time
+ * eg: tfaw_min =
+ * DDR3-800(1KB page) 320 MTB (40ns)
+ * DDR3-1066(1KB page) 300 MTB (37.5ns)
+ * DDR3-1333(1KB page) 240 MTB (30ns)
+ * DDR3-1600(1KB page) 240 MTB (30ns)
+ */
+ pdimm->tfaw_ps = (((spd->tfaw_msb & 0xf) << 8) | spd->tfaw_min)
+ * mtb_ps;
+
+ return 0;
+}
diff --git a/common/ddr4_dimm_params.c b/common/ddr4_dimm_params.c
new file mode 100644
index 0000000000..045cbd457c
--- /dev/null
+++ b/common/ddr4_dimm_params.c
@@ -0,0 +1,355 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2014-2016 Freescale Semiconductor, Inc.
+ * Copyright 2017-2018 NXP Semiconductor
+ *
+ * calculate the organization and timing parameter
+ * from ddr3 spd, please refer to the spec
+ * JEDEC standard No.21-C 4_01_02_12R23A.pdf
+ *
+ *
+ */
+
+#include <common.h>
+#include <ddr_dimms.h>
+
+/*
+ * Calculate the Density of each Physical Rank.
+ * Returned size is in bytes.
+ *
+ * Total DIMM size =
+ * sdram capacity(bit) / 8 * primary bus width / sdram width
+ * * Logical Ranks per DIMM
+ *
+ * where: sdram capacity = spd byte4[3:0]
+ * primary bus width = spd byte13[2:0]
+ * sdram width = spd byte12[2:0]
+ * Logical Ranks per DIMM = spd byte12[5:3] for SDP, DDP, QDP
+ * spd byte12{5:3] * spd byte6[6:4] for 3DS
+ *
+ * To simplify each rank size = total DIMM size / Number of Package Ranks
+ * where Number of Package Ranks = spd byte12[5:3]
+ *
+ * SPD byte4 - sdram density and banks
+ * bit[3:0] size(bit) size(byte)
+ * 0000 256Mb 32MB
+ * 0001 512Mb 64MB
+ * 0010 1Gb 128MB
+ * 0011 2Gb 256MB
+ * 0100 4Gb 512MB
+ * 0101 8Gb 1GB
+ * 0110 16Gb 2GB
+ * 0111 32Gb 4GB
+ *
+ * SPD byte13 - module memory bus width
+ * bit[2:0] primary bus width
+ * 000 8bits
+ * 001 16bits
+ * 010 32bits
+ * 011 64bits
+ *
+ * SPD byte12 - module organization
+ * bit[2:0] sdram device width
+ * 000 4bits
+ * 001 8bits
+ * 010 16bits
+ * 011 32bits
+ *
+ * SPD byte12 - module organization
+ * bit[5:3] number of package ranks per DIMM
+ * 000 1
+ * 001 2
+ * 010 3
+ * 011 4
+ *
+ * SPD byte6 - SDRAM package type
+ * bit[6:4] Die count
+ * 000 1
+ * 001 2
+ * 010 3
+ * 011 4
+ * 100 5
+ * 101 6
+ * 110 7
+ * 111 8
+ *
+ * SPD byte6 - SRAM package type
+ * bit[1:0] Signal loading
+ * 00 Not specified
+ * 01 Multi load stack
+ * 10 Sigle load stack (3DS)
+ * 11 Reserved
+ */
+static unsigned long long
+compute_ranksize(const struct ddr4_spd_eeprom *spd)
+{
+ unsigned long long bsize;
+
+ int nbit_sdram_cap_bsize = 0;
+ int nbit_primary_bus_width = 0;
+ int nbit_sdram_width = 0;
+ int die_count = 0;
+ bool package_3ds;
+
+ if ((spd->density_banks & 0xf) <= 7)
+ nbit_sdram_cap_bsize = (spd->density_banks & 0xf) + 28;
+ if ((spd->bus_width & 0x7) < 4)
+ nbit_primary_bus_width = (spd->bus_width & 0x7) + 3;
+ if ((spd->organization & 0x7) < 4)
+ nbit_sdram_width = (spd->organization & 0x7) + 2;
+ package_3ds = (spd->package_type & 0x3) == 0x2;
+ if ((spd->package_type & 0x80) && !package_3ds) { /* other than 3DS */
+ printf("Warning: not supported SDRAM package type\n");
+ return 0;
+ }
+ if (package_3ds)
+ die_count = (spd->package_type >> 4) & 0x7;
+
+ bsize = 1ULL << (nbit_sdram_cap_bsize - 3 +
+ nbit_primary_bus_width - nbit_sdram_width +
+ die_count);
+
+ debug("DDR: DDR rank density = 0x%16llx\n", bsize);
+
+ return bsize;
+}
+
+#define spd_to_ps(mtb, ftb) \
+ (mtb * pdimm->mtb_ps + (ftb * pdimm->ftb_10th_ps) / 10)
+/*
+ * ddr4_compute_dimm_parameters for DDR4 SPD
+ *
+ * Compute DIMM parameters based upon the SPD information in spd.
+ * Writes the results to the struct dimm_params structure pointed by pdimm.
+ *
+ */
+unsigned int ddr4_compute_dimm_parameters(const struct ddr4_spd_eeprom *spd,
+ struct dimm_params *pdimm)
+{
+ int ret;
+ int i;
+ const u8 udimm_rc_e_dq[18] = {
+ 0x0c, 0x2c, 0x15, 0x35, 0x15, 0x35, 0x0b, 0x2c, 0x15,
+ 0x35, 0x0b, 0x35, 0x0b, 0x2c, 0x0b, 0x35, 0x15, 0x36
+ };
+ int spd_error = 0;
+ u8 *ptr;
+ u8 val;
+
+ if (spd->mem_type != SPD_MEMTYPE_DDR4) {
+ printf("DIMM: SPD data is not DDR4\n");
+ return 3;
+ }
+
+ ret = ddr4_spd_check(spd);
+ if (ret) {
+ printf("DIMM: failed checksum\n");
+ return 2;
+ }
+
+ /*
+ * The part name in ASCII in the SPD EEPROM is not null terminated.
+ * Guarantee null termination here by presetting all bytes to 0
+ * and copying the part name in ASCII from the SPD onto it
+ */
+ memset(pdimm->mpart, 0, sizeof(pdimm->mpart));
+ if ((spd->info_size_crc & 0xF) > 2)
+ memcpy(pdimm->mpart, spd->mpart, sizeof(pdimm->mpart) - 1);
+
+ /* DIMM organization parameters */
+ pdimm->n_ranks = ((spd->organization >> 3) & 0x7) + 1;
+ pdimm->rank_density = compute_ranksize(spd);
+ pdimm->capacity = pdimm->n_ranks * pdimm->rank_density;
+ pdimm->die_density = spd->density_banks & 0xf;
+ pdimm->primary_sdram_width = 1 << (3 + (spd->bus_width & 0x7));
+ if ((spd->bus_width >> 3) & 0x3)
+ pdimm->ec_sdram_width = 8;
+ else
+ pdimm->ec_sdram_width = 0;
+ pdimm->data_width = pdimm->primary_sdram_width
+ + pdimm->ec_sdram_width;
+ pdimm->device_width = 1 << ((spd->organization & 0x7) + 2);
+ pdimm->package_3ds = (spd->package_type & 0x3) == 0x2 ?
+ (spd->package_type >> 4) & 0x7 : 0;
+
+ /* These are the types defined by the JEDEC SPD spec */
+ pdimm->mirrored_dimm = 0;
+ pdimm->registered_dimm = 0;
+ switch (spd->module_type & DDR4_SPD_MODULETYPE_MASK) {
+ case DDR4_SPD_MODULETYPE_RDIMM:
+ /* Registered/buffered DIMMs */
+ pdimm->registered_dimm = 1;
+ if (spd->mod_section.registered.reg_map & 0x1)
+ pdimm->mirrored_dimm = 1;
+ val = spd->mod_section.registered.ca_stren;
+ pdimm->rcw[3] = val >> 4;
+ pdimm->rcw[4] = ((val & 0x3) << 2) | ((val & 0xc) >> 2);
+ val = spd->mod_section.registered.clk_stren;
+ pdimm->rcw[5] = ((val & 0x3) << 2) | ((val & 0xc) >> 2);
+ /* Not all in SPD. For convience only. Boards may overwrite. */
+ pdimm->rcw[6] = 0xf;
+ /*
+ * A17 only used for 16Gb and above devices.
+ * C[2:0] only used for 3DS.
+ */
+ pdimm->rcw[8] = pdimm->die_density >= 0x6 ? 0x0 : 0x8 |
+ (pdimm->package_3ds > 0x3 ? 0x0 :
+ (pdimm->package_3ds > 0x1 ? 0x1 :
+ (pdimm->package_3ds > 0 ? 0x2 : 0x3)));
+ if (pdimm->package_3ds || pdimm->n_ranks != 4)
+ pdimm->rcw[13] = 0xc;
+ else
+ pdimm->rcw[13] = 0xd; /* Fix encoded by board */
+
+ break;
+
+ case DDR4_SPD_MODULETYPE_UDIMM:
+ case DDR4_SPD_MODULETYPE_SO_DIMM:
+ /* Unbuffered DIMMs */
+ if (spd->mod_section.unbuffered.addr_mapping & 0x1)
+ pdimm->mirrored_dimm = 1;
+ if ((spd->mod_section.unbuffered.mod_height & 0xe0) == 0 &&
+ (spd->mod_section.unbuffered.ref_raw_card == 0x04)) {
+ /* Fix SPD error found on DIMMs with raw card E0 */
+ for (i = 0; i < 18; i++) {
+ if (spd->mapping[i] == udimm_rc_e_dq[i])
+ continue;
+ spd_error = 1;
+ debug("SPD byte %d: 0x%x, should be 0x%x\n",
+ 60 + i, spd->mapping[i],
+ udimm_rc_e_dq[i]);
+ ptr = (u8 *)&spd->mapping[i];
+ *ptr = udimm_rc_e_dq[i];
+ }
+ if (spd_error)
+ printf("SPD DQ mapping error fixed\n");
+ }
+ break;
+
+ default:
+ printf("unknown module_type 0x%02X\n", spd->module_type);
+ return 1;
+ }
+
+ /* SDRAM device parameters */
+ pdimm->n_row_addr = ((spd->addressing >> 3) & 0x7) + 12;
+ pdimm->n_col_addr = (spd->addressing & 0x7) + 9;
+ pdimm->bank_addr_bits = (spd->density_banks >> 4) & 0x3;
+ pdimm->bank_group_bits = (spd->density_banks >> 6) & 0x3;
+
+ /*
+ * The SPD spec has not the ECC bit,
+ * We consider the DIMM as ECC capability
+ * when the extension bus exist
+ */
+ if (pdimm->ec_sdram_width)
+ pdimm->edc_config = 0x02;
+ else
+ pdimm->edc_config = 0x00;
+
+ /*
+ * The SPD spec has not the burst length byte
+ * but DDR4 spec has nature BL8 and BC4,
+ * BL8 -bit3, BC4 -bit2
+ */
+ pdimm->burst_lengths_bitmask = 0x0c;
+
+ /* MTB - medium timebase
+ * The MTB in the SPD spec is 125ps,
+ *
+ * FTB - fine timebase
+ * use 1/10th of ps as our unit to avoid floating point
+ * eg, 10 for 1ps, 25 for 2.5ps, 50 for 5ps
+ */
+ if ((spd->timebases & 0xf) == 0x0) {
+ pdimm->mtb_ps = 125;
+ pdimm->ftb_10th_ps = 10;
+
+ } else {
+ printf("Unknown Timebases\n");
+ }
+
+ /* sdram minimum cycle time */
+ pdimm->tckmin_x_ps = spd_to_ps(spd->tck_min, spd->fine_tck_min);
+
+ /* sdram max cycle time */
+ pdimm->tckmax_ps = spd_to_ps(spd->tck_max, spd->fine_tck_max);
+
+ /*
+ * CAS latency supported
+ * bit0 - CL7
+ * bit4 - CL11
+ * bit8 - CL15
+ * bit12- CL19
+ * bit16- CL23
+ */
+ pdimm->caslat_x = (spd->caslat_b1 << 7) |
+ (spd->caslat_b2 << 15) |
+ (spd->caslat_b3 << 23);
+
+ BUG_ON(spd->caslat_b4 != 0);
+
+ /*
+ * min CAS latency time
+ */
+ pdimm->taa_ps = spd_to_ps(spd->taa_min, spd->fine_taa_min);
+
+ /*
+ * min RAS to CAS delay time
+ */
+ pdimm->trcd_ps = spd_to_ps(spd->trcd_min, spd->fine_trcd_min);
+
+ /*
+ * Min Row Precharge Delay Time
+ */
+ pdimm->trp_ps = spd_to_ps(spd->trp_min, spd->fine_trp_min);
+
+ /* min active to precharge delay time */
+ pdimm->tras_ps = (((spd->tras_trc_ext & 0xf) << 8) +
+ spd->tras_min_lsb) * pdimm->mtb_ps;
+
+ /* min active to actice/refresh delay time */
+ pdimm->trc_ps = spd_to_ps((((spd->tras_trc_ext & 0xf0) << 4) +
+ spd->trc_min_lsb), spd->fine_trc_min);
+ /* Min Refresh Recovery Delay Time */
+ pdimm->trfc1_ps = ((spd->trfc1_min_msb << 8) | (spd->trfc1_min_lsb)) *
+ pdimm->mtb_ps;
+ pdimm->trfc2_ps = ((spd->trfc2_min_msb << 8) | (spd->trfc2_min_lsb)) *
+ pdimm->mtb_ps;
+ pdimm->trfc4_ps = ((spd->trfc4_min_msb << 8) | (spd->trfc4_min_lsb)) *
+ pdimm->mtb_ps;
+ /* min four active window delay time */
+ pdimm->tfaw_ps = (((spd->tfaw_msb & 0xf) << 8) | spd->tfaw_min) *
+ pdimm->mtb_ps;
+
+ /* min row active to row active delay time, different bank group */
+ pdimm->trrds_ps = spd_to_ps(spd->trrds_min, spd->fine_trrds_min);
+ /* min row active to row active delay time, same bank group */
+ pdimm->trrdl_ps = spd_to_ps(spd->trrdl_min, spd->fine_trrdl_min);
+ /* min CAS to CAS Delay Time (tCCD_Lmin), same bank group */
+ pdimm->tccdl_ps = spd_to_ps(spd->tccdl_min, spd->fine_tccdl_min);
+
+ if (pdimm->package_3ds) {
+ if (pdimm->die_density <= 0x4) {
+ pdimm->trfc_slr_ps = 260000;
+ } else if (pdimm->die_density <= 0x5) {
+ pdimm->trfc_slr_ps = 350000;
+ } else {
+ printf("WARN: Unsupported logical rank density 0x%x\n",
+ pdimm->die_density);
+ }
+ }
+
+ /*
+ * Average periodic refresh interval
+ * tREFI = 7.8 us at normal temperature range
+ */
+ pdimm->refresh_rate_ps = 7800000;
+
+ for (i = 0; i < 18; i++)
+ pdimm->dq_mapping[i] = spd->mapping[i];
+
+ pdimm->dq_mapping_ors = ((spd->mapping[0] >> 6) & 0x3) == 0 ? 1 : 0;
+
+ return 0;
+}
diff --git a/common/ddr_spd.c b/common/ddr_spd.c
index dd3b8511e6..1c1d5826e0 100644
--- a/common/ddr_spd.c
+++ b/common/ddr_spd.c
@@ -480,6 +480,7 @@ static int read_buf(struct pbl_i2c *i2c,
* @i2c: I2C controller handle
* @addr: I2C bus address for the EEPROM
* @buf: buffer to read the SPD data to
+ * @memtype: Memory type, such as SPD_MEMTYPE_DDR4
*
* This function takes a I2C message transfer function and reads the contents
* from a SPD EEPROM to the buffer provided at @buf. The buffer should at least
@@ -487,19 +488,23 @@ static int read_buf(struct pbl_i2c *i2c,
* otherwise.
*/
int spd_read_eeprom(struct pbl_i2c *i2c,
- uint8_t addr, void *buf)
+ uint8_t addr, void *buf,
+ int memtype)
{
- unsigned char *buf8 = buf;
int ret;
- ret = read_buf(i2c, addr, SPD_SPA0_ADDRESS, buf);
- if (ret < 0)
- return ret;
+ if (memtype == SPD_MEMTYPE_DDR4) {
+ ret = read_buf(i2c, addr, SPD_SPA0_ADDRESS, buf);
+ if (ret < 0)
+ return ret;
- if (buf8[2] == SPD_MEMTYPE_DDR4) {
ret = read_buf(i2c, addr, SPD_SPA1_ADDRESS, buf + 256);
if (ret < 0)
return ret;
+ } else {
+ ret = read_buf(i2c, addr, 0, buf);
+ if (ret < 0)
+ return ret;
}
return 0;