soc/intel/tigerlake: Reorganize memory initialization support

This change reorganizes memory initialization code for LPDDR4x on
TGL to allow sharing of code when adding support for other memory
types. In follow-up changes, support for DDR4 will be added.

1. It adds configuration for memory topology which is currently only
MEMORY_DOWN, however DDR4 requires more topologies to be
supported.
2. spd_info structure is organized to allow mixed topologies as well.
3. DQ/DQS maps are organized to reflect hardware configuration.

TEST=Verified that volteer still boots and memory initialization is
successful.

Signed-off-by: Furquan Shaikh <furquan@google.com>
Change-Id: Ib625f2ab30a6e1362a310d9abb3f2051f85c3013
Reviewed-on: https://review.coreboot.org/c/coreboot/+/39865
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: EricR Lai <ericr_lai@compal.corp-partner.google.com>
This commit is contained in:
Furquan Shaikh
2020-03-26 15:36:19 -07:00
parent 3c57819005
commit 5b1f335ef8
10 changed files with 473 additions and 244 deletions

View File

@@ -17,12 +17,13 @@
void mainboard_memory_init_params(FSPM_UPD *mupd)
{
FSP_M_CONFIG *mem_cfg = &mupd->FspmConfig;
const struct mb_lpddr4x_cfg *board_cfg = variant_memory_params();
const struct lpddr4x_cfg *board_cfg = variant_memory_params();
const struct spd_info spd_info = {
.read_type = READ_SPD_CBFS,
.spd_spec.spd_index = variant_memory_sku(),
.topology = MEMORY_DOWN,
.md_spd_loc = SPD_CBFS,
.cbfs_index = variant_memory_sku(),
};
bool half_populated = gpio_get(GPIO_MEM_CH_SEL);
meminit_lpddr4x_dimm0(mem_cfg, board_cfg, &spd_info, half_populated);
meminit_lpddr4x(mem_cfg, board_cfg, &spd_info, half_populated);
}

View File

@@ -23,7 +23,7 @@ const struct pad_config *variant_override_gpio_table(size_t *num);
const struct cros_gpio *variant_cros_gpios(size_t *num);
const struct mb_lpddr4x_cfg *variant_memory_params(void);
const struct lpddr4x_cfg *variant_memory_params(void);
int variant_memory_sku(void);
#endif /* __BASEBOARD_VARIANTS_H__ */

View File

@@ -9,38 +9,59 @@
#include <baseboard/variants.h>
#include <gpio.h>
static const struct mb_lpddr4x_cfg baseboard_memcfg = {
/* DQ byte map */
static const struct lpddr4x_cfg baseboard_memcfg = {
/* DQ CPU<>DRAM map */
.dq_map = {
{ 0, 1, 2, 3, 4, 5, 6, 7, /* Byte 0 */
12, 13, 14, 15, 11, 10, 9, 8 }, /* Byte 1 */
{ 7, 2, 6, 3, 5, 1, 4, 0, /* Byte 2 */
10, 8, 9, 11, 15, 12, 14, 13 }, /* Byte 3 */
{ 3, 2, 1, 0, 4, 5, 6, 7, /* Byte 4 */
12, 13, 14, 15, 11, 10, 9, 8 }, /* Byte 5 */
{ 7, 0, 1, 6, 5, 4, 2, 3, /* Byte 6 */
15, 14, 8, 9, 10, 12, 11, 13 }, /* Byte 7 */
{ 3, 2, 1, 0, 4, 5, 6, 7, /* Byte 0 */
12, 13, 14, 15, 11, 10, 9, 8 }, /* Byte 1 */
{ 3, 4, 2, 5, 0, 6, 1, 7, /* Byte 2 */
13, 12, 11, 10, 14, 15, 9, 8 }, /* Byte 3 */
{ 3, 2, 1, 0, 7, 4, 5, 6, /* Byte 4 */
15, 14, 13, 12, 8, 9, 10, 11 }, /* Byte 5 */
{ 3, 4, 2, 5, 1, 0, 7, 6, /* Byte 6 */
15, 14, 9, 8, 12, 10, 11, 13 } /* Byte 7 */
[0] = {
{ 0, 1, 2, 3, 4, 5, 6, 7, }, /* DDR0_DQ0[7:0] */
{ 12, 13, 14, 15, 11, 10, 9, 8, }, /* DDR0_DQ1[7:0] */
},
[1] = {
{ 7, 2, 6, 3, 5, 1, 4, 0, }, /* DDR1_DQ0[7:0] */
{ 10, 8, 9, 11, 15, 12, 14, 13, }, /* DDR1_DQ1[7:0] */
},
[2] = {
{ 3, 2, 1, 0, 4, 5, 6, 7, }, /* DDR2_DQ0[7:0] */
{ 12, 13, 14, 15, 11, 10, 9, 8, }, /* DDR2_DQ1[7:0] */
},
[3] = {
{ 7, 0, 1, 6, 5, 4, 2, 3, }, /* DDR3_DQ0[7:0] */
{ 15, 14, 8, 9, 10, 12, 11, 13, }, /* DDR3_DQ1[7:0] */
},
[4] = {
{ 3, 2, 1, 0, 4, 5, 6, 7, }, /* DDR4_DQ0[7:0] */
{ 12, 13, 14, 15, 11, 10, 9, 8, }, /* DDR4_DQ1[7:0] */
},
[5] = {
{ 3, 4, 2, 5, 0, 6, 1, 7, }, /* DDR5_DQ0[7:0] */
{ 13, 12, 11, 10, 14, 15, 9, 8, }, /* DDR5_DQ1[7:0] */
},
[6] = {
{ 3, 2, 1, 0, 7, 4, 5, 6, }, /* DDR6_DQ0[7:0] */
{ 15, 14, 13, 12, 8, 9, 10, 11, }, /* DDR6_DQ1[7:0] */
},
[7] = {
{ 3, 4, 2, 5, 1, 0, 7, 6, }, /* DDR7_DQ0[7:0] */
{ 15, 14, 9, 8, 12, 10, 11, 13, }, /* DDR7_DQ1[7:0] */
},
},
/* DQS CPU<>DRAM map */
.dqs_map = {
/* Ch 0 1 2 3 */
{ 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
{ 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }
[0] = { 0, 1 }, /* DDR0_DQS[1:0] */
[1] = { 0, 1 }, /* DDR1_DQS[1:0] */
[2] = { 0, 1 }, /* DDR2_DQS[1:0] */
[3] = { 0, 1 }, /* DDR3_DQS[1:0] */
[4] = { 0, 1 }, /* DDR4_DQS[1:0] */
[5] = { 0, 1 }, /* DDR5_DQS[1:0] */
[6] = { 0, 1 }, /* DDR6_DQS[1:0] */
[7] = { 0, 1 }, /* DDR7_DQS[1:0] */
},
.ect = 0, /* Disable Early Command Training */
};
const struct mb_lpddr4x_cfg *__weak variant_memory_params(void)
const struct lpddr4x_cfg *__weak variant_memory_params(void)
{
return &baseboard_memcfg;
}

View File

@@ -3,38 +3,59 @@
#include <baseboard/variants.h>
static const struct mb_lpddr4x_cfg malefor_memcfg = {
static const struct lpddr4x_cfg malefor_memcfg = {
/* DQ byte map */
.dq_map = {
{ 3, 1, 0, 2, 4, 6, 7, 5, /* Byte 0 */
12, 13, 14, 15, 8, 9, 10, 11 }, /* Byte 1 */
{ 0, 7, 1, 6, 2, 4, 3, 5, /* Byte 2 */
8, 15, 14, 9, 13, 10, 12, 11 }, /* Byte 3 */
{ 3, 2, 0, 1, 4, 5, 6, 7, /* Byte 4 */
12, 13, 15, 14, 8, 9, 10, 11 }, /* Byte 5 */
{ 6, 0, 1, 7, 5, 4, 2, 3, /* Byte 6 */
15, 14, 8, 9, 10, 12, 11, 13 }, /* Byte 7 */
{ 5, 0, 1, 3, 4, 2, 7, 6, /* Byte 0 */
11, 14, 13, 12, 8, 9, 15, 10 }, /* Byte 1 */
{ 3, 4, 2, 5, 0, 6, 1, 7, /* Byte 2 */
13, 12, 11, 10, 14, 15, 9, 8 }, /* Byte 3 */
{ 3, 2, 1, 0, 5, 4, 7, 6, /* Byte 4 */
12, 13, 15, 14, 8, 11, 9, 10 }, /* Byte 5 */
{ 3, 4, 2, 5, 1, 0, 7, 6, /* Byte 6 */
15, 14, 9, 8, 12, 10, 11, 13 } /* Byte 7 */
[0] = {
{ 3, 1, 0, 2, 4, 6, 7, 5, }, /* DDR0_DQ0[7:0] */
{ 12, 13, 14, 15, 8, 9, 10, 11 }, /* DDR0_DQ1[7:0] */
},
[1] = {
{ 0, 7, 1, 6, 2, 4, 3, 5, }, /* DDR1_DQ0[7:0] */
{ 8, 15, 14, 9, 13, 10, 12, 11 }, /* DDR1_DQ1[7:0] */
},
[2] = {
{ 3, 2, 0, 1, 4, 5, 6, 7, }, /* DDR2_DQ0[7:0] */
{ 12, 13, 15, 14, 8, 9, 10, 11 }, /* DDR2_DQ1[7:0] */
},
[3] = {
{ 6, 0, 1, 7, 5, 4, 2, 3, }, /* DDR3_DQ0[7:0] */
{ 15, 14, 8, 9, 10, 12, 11, 13 }, /* DDR3_DQ1[7:0] */
},
[4] = {
{ 5, 0, 1, 3, 4, 2, 7, 6, }, /* DDR4_DQ0[7:0] */
{ 11, 14, 13, 12, 8, 9, 15, 10 }, /* DDR4_DQ1[7:0] */
},
[5] = {
{ 3, 4, 2, 5, 0, 6, 1, 7, }, /* DDR5_DQ0[7:0] */
{ 13, 12, 11, 10, 14, 15, 9, 8 }, /* DDR5_DQ1[7:0] */
},
[6] = {
{ 3, 2, 1, 0, 5, 4, 7, 6, }, /* DDR6_DQ0[7:0] */
{ 12, 13, 15, 14, 8, 11, 9, 10 }, /* DDR6_DQ1[7:0] */
},
[7] = {
{ 3, 4, 2, 5, 1, 0, 7, 6, }, /* DDR7_DQ0[7:0] */
{ 15, 14, 9, 8, 12, 10, 11, 13 }, /* DDR7_DQ1[7:0] */
},
},
/* DQS CPU<>DRAM map */
.dqs_map = {
/* Ch 0 1 2 3 */
{ 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 },
{ 0, 1 }, { 0, 1 }, { 0, 1 }, { 0, 1 }
[0] = { 0, 1 }, /* DDR0_DQS[1:0] */
[1] = { 0, 1 }, /* DDR1_DQS[1:0] */
[2] = { 0, 1 }, /* DDR2_DQS[1:0] */
[3] = { 0, 1 }, /* DDR3_DQS[1:0] */
[4] = { 0, 1 }, /* DDR4_DQS[1:0] */
[5] = { 0, 1 }, /* DDR5_DQS[1:0] */
[6] = { 0, 1 }, /* DDR6_DQS[1:0] */
[7] = { 0, 1 }, /* DDR7_DQS[1:0] */
},
.ect = 0, /* Disable Early Command Training */
};
const struct mb_lpddr4x_cfg *variant_memory_params(void)
const struct lpddr4x_cfg *variant_memory_params(void)
{
return &malefor_memcfg;
}

View File

@@ -56,13 +56,14 @@ void mainboard_memory_init_params(FSPM_UPD *mupd)
{
FSP_M_CONFIG *mem_cfg = &mupd->FspmConfig;
const struct mb_lpddr4x_cfg *mem_config = variant_memory_params();
const struct lpddr4x_cfg *mem_config = variant_memory_params();
const struct spd_info spd_info = {
.read_type = READ_SPD_CBFS,
.spd_spec.spd_index = mainboard_get_spd_index(),
.topology = MEMORY_DOWN,
.md_spd_loc = SPD_CBFS,
.cbfs_index = mainboard_get_spd_index(),
};
bool half_populated = false;
meminit_lpddr4x_dimm0(mem_cfg, mem_config, &spd_info, half_populated);
meminit_lpddr4x(mem_cfg, mem_config, &spd_info, half_populated);
}

View File

@@ -28,6 +28,6 @@ const struct pad_config *variant_early_gpio_table(size_t *num);
const struct cros_gpio *variant_cros_gpios(size_t *num);
size_t variant_memory_sku(void);
const struct mb_lpddr4x_cfg *variant_memory_params(void);
const struct lpddr4x_cfg *variant_memory_params(void);
#endif /*__BASEBOARD_VARIANTS_H__ */

View File

@@ -21,38 +21,59 @@ size_t __weak variant_memory_sku(void)
return 0;
}
static const struct mb_lpddr4x_cfg mem_config = {
static const struct lpddr4x_cfg mem_config = {
/* DQ byte map */
.dq_map = {
{ 0, 1, 6, 7, 3, 2, 5, 4, /* Byte 0 */
15, 8, 9, 14, 12, 11, 10, 13 }, /* Byte 1 */
{ 11, 12, 8, 15, 9, 14, 10, 13, /* Byte 2 */
3, 4, 7, 0, 6, 1, 5, 2 }, /* Byte 3 */
{ 4, 5, 3, 2, 7, 1, 0, 6, /* Byte 4 */
11, 10, 12, 13, 8, 9, 14, 15 }, /* Byte 5 */
{ 12, 11, 8, 13, 14, 15, 9, 10, /* Byte 6 */
4, 7, 3, 2, 1, 6, 0, 5 }, /* Byte 7 */
{ 11, 10, 9, 8, 12, 13, 15, 14, /* Byte 0 */
4, 5, 6, 7, 3, 2, 0, 1 }, /* Byte 1 */
{ 0, 7, 1, 6, 3, 5, 2, 4, /* Byte 2 */
9, 8, 10, 11, 14, 15, 13, 12 }, /* Byte 3 */
{ 4, 5, 6, 1, 3, 2, 7, 0, /* Byte 4 */
10, 13, 12, 11, 14, 9, 15, 8 }, /* Byte 5 */
{ 10, 12, 9, 15, 8, 11, 13, 14, /* Byte 6 */
3, 4, 1, 2, 6, 0, 5, 7 } /* Byte 7 */
[0] = {
{ 0, 1, 6, 7, 3, 2, 5, 4, }, /* DDR0_DQ0[7:0] */
{ 15, 8, 9, 14, 12, 11, 10, 13, }, /* DDR1_DQ1[7:0] */
},
[1] = {
{ 11, 12, 8, 15, 9, 14, 10, 13, }, /* DDR1_DQ0[7:0] */
{ 3, 4, 7, 0, 6, 1, 5, 2, }, /* DDR1_DQ1[7:0] */
},
[2] = {
{ 4, 5, 3, 2, 7, 1, 0, 6, }, /* DDR2_DQ0[7:0] */
{ 11, 10, 12, 13, 8, 9, 14, 15, }, /* DDR2_DQ1[7:0] */
},
[3] = {
{ 12, 11, 8, 13, 14, 15, 9, 10, }, /* DDR3_DQ0[7:0] */
{ 4, 7, 3, 2, 1, 6, 0, 5, }, /* DDR3_DQ1[7:0] */
},
[4] = {
{ 11, 10, 9, 8, 12, 13, 15, 14, }, /* DDR4_DQ0[7:0] */
{ 4, 5, 6, 7, 3, 2, 0, 1, }, /* DDR4_DQ1[7:0] */
},
[5] = {
{ 0, 7, 1, 6, 3, 5, 2, 4, }, /* DDR5_DQ0[7:0] */
{ 9, 8, 10, 11, 14, 15, 13, 12, }, /* DDR5_DQ1[7:0] */
},
[6] = {
{ 4, 5, 6, 1, 3, 2, 7, 0, }, /* DDR6_DQ0[7:0] */
{ 10, 13, 12, 11, 14, 9, 15, 8, }, /* DDR6_DQ1[7:0] */
},
[7] = {
{ 10, 12, 9, 15, 8, 11, 13, 14, }, /* DDR7_DQ0[7:0] */
{ 3, 4, 1, 2, 6, 0, 5, 7, }, /* DDR7_DQ1[7:0] */
},
},
/* DQS CPU<>DRAM map */
.dqs_map = {
/* Ch 0 1 2 3 */
{ 0, 1 }, { 1, 0 }, { 0, 1 }, { 1, 0 },
{ 1, 0 }, { 0, 1 }, { 0, 1 }, { 1, 0 }
[0] = { 0, 1 }, /* DDR0_DQS[1:0] */
[1] = { 1, 0 }, /* DDR1_DQS[1:0] */
[2] = { 0, 1 }, /* DDR2_DQS[1:0] */
[3] = { 1, 0 }, /* DDR3_DQS[1:0] */
[4] = { 1, 0 }, /* DDR4_DQS[1:0] */
[5] = { 0, 1 }, /* DDR5_DQS[1:0] */
[6] = { 0, 1 }, /* DDR6_DQS[1:0] */
[7] = { 1, 0 }, /* DDR7_DQS[1:0] */
},
.ect = 1, /* Early Command Training */
};
const struct mb_lpddr4x_cfg *__weak variant_memory_params(void)
const struct lpddr4x_cfg *__weak variant_memory_params(void)
{
return &mem_config;
}

View File

@@ -21,38 +21,59 @@ size_t __weak variant_memory_sku(void)
return 0;
}
static const struct mb_lpddr4x_cfg mem_config = {
static const struct lpddr4x_cfg mem_config = {
/* DQ byte map */
.dq_map = {
{ 8, 9, 12, 11, 13, 15, 10, 14, /* Byte 0 */
4, 6, 0, 2, 5, 7, 1, 3 }, /* Byte 1 */
{ 2, 3, 0, 6, 1, 7, 5, 4, /* Byte 2 */
15, 14, 13, 8, 12, 11, 9, 10 }, /* Byte 3 */
{ 1, 0, 3, 2, 5, 4, 7, 6, /* Byte 4 */
14, 15, 12, 13, 8, 10, 9, 11 }, /* Byte 5 */
{ 8, 10, 11, 9, 15, 12, 14, 13, /* Byte 6 */
4, 7, 6, 5, 2, 0, 1, 3 }, /* Byte 7 */
{ 8, 9, 10, 11, 13, 12, 15, 14, /* Byte 0 */
7, 6, 4, 5, 0, 2, 1, 3 }, /* Byte 1 */
{ 1, 3, 0, 2, 6, 4, 5, 7, /* Byte 2 */
14, 15, 10, 12, 8, 13, 11, 9 }, /* Byte 3 */
{ 1, 0, 2, 4, 5, 3, 7, 6, /* Byte 4 */
12, 14, 15, 13, 9, 10, 8, 11 }, /* Byte 5 */
{ 11, 9, 8, 13, 12, 14, 15, 10, /* Byte 6 */
4, 7, 5, 1, 2, 6, 3, 0 } /* Byte 7 */
[0] = {
{ 8, 9, 12, 11, 13, 15, 10, 14, }, /* DDR0_DQ0[7:0] */
{ 4, 6, 0, 2, 5, 7, 1, 3, }, /* DDR0_DQ1[7:0] */
},
[1] = {
{ 2, 3, 0, 6, 1, 7, 5, 4, }, /* DDR1_DQ0[7:0] */
{ 15, 14, 13, 8, 12, 11, 9, 10, }, /* DDR1_DQ1[7:0] */
},
[2] = {
{ 1, 0, 3, 2, 5, 4, 7, 6, }, /* DDR2_DQ0[7:0] */
{ 14, 15, 12, 13, 8, 10, 9, 11, }, /* DDR2_DQ1[7:0] */
},
[3] = {
{ 8, 10, 11, 9, 15, 12, 14, 13, }, /* DDR3_DQ0[7:0] */
{ 4, 7, 6, 5, 2, 0, 1, 3, }, /* DDR3_DQ1[7:0] */
},
[4] = {
{ 8, 9, 10, 11, 13, 12, 15, 14, }, /* DDR4_DQ0[7:0] */
{ 7, 6, 4, 5, 0, 2, 1, 3, }, /* DDR4_DQ1[7:0] */
},
[5] = {
{ 1, 3, 0, 2, 6, 4, 5, 7, }, /* DDR5_DQ0[7:0] */
{ 14, 15, 10, 12, 8, 13, 11, 9, }, /* DDR5_DQ1[7:0] */
},
[6] = {
{ 1, 0, 2, 4, 5, 3, 7, 6, }, /* DDR6_DQ0[7:0] */
{ 12, 14, 15, 13, 9, 10, 8, 11, }, /* DDR6_DQ1[7:0] */
},
[7] = {
{ 11, 9, 8, 13, 12, 14, 15, 10, }, /* DDR7_DQ0[7:0] */
{ 4, 7, 5, 1, 2, 6, 3, 0, }, /* DDR7_DQ1[7:0] */
},
},
/* DQS CPU<>DRAM map */
.dqs_map = {
/* Ch 0 1 2 3 */
{ 1, 0 }, { 0, 1 }, { 0, 1 }, { 1, 0 },
{ 1, 0 }, { 0, 1 }, { 0, 1 }, { 1, 0 }
[0] = { 1, 0 }, /* DDR0_DQS[1:0] */
[1] = { 0, 1 }, /* DDR1_DQS[1:0] */
[2] = { 0, 1 }, /* DDR2_DQS[1:0] */
[3] = { 1, 0 }, /* DDR3_DQS[1:0] */
[4] = { 1, 0 }, /* DDR4_DQS[1:0] */
[5] = { 0, 1 }, /* DDR5_DQS[1:0] */
[6] = { 0, 1 }, /* DDR6_DQS[1:0] */
[7] = { 1, 0 }, /* DDR7_DQS[1:0] */
},
.ect = 1, /* Early Command Training */
};
const struct mb_lpddr4x_cfg *__weak variant_memory_params(void)
const struct lpddr4x_cfg *__weak variant_memory_params(void)
{
return &mem_config;
}

View File

@@ -12,46 +12,65 @@
#include <stdint.h>
#include <fsp/soc_binding.h>
#define BYTES_PER_CHANNEL 2
#define BITS_PER_BYTE 8
#define DQS_PER_CHANNEL 2
#define NUM_CHANNELS 8
struct spd_by_pointer {
size_t spd_data_len;
uintptr_t spd_data_ptr;
#define LPDDR4X_CHANNELS 8
#define LPDDR4X_BYTES_PER_CHANNEL 2
enum mem_topology {
MEMORY_DOWN, /* Supports reading SPD from CBFS or in-memory pointer. */
};
enum mem_info_read_type {
NOT_EXISTING, /* No memory in this channel */
READ_SPD_CBFS, /* Find spd file in CBFS. */
READ_SPD_MEMPTR /* Find spd data from pointer. */
enum md_spd_loc {
/* Read SPD from pointer provided to memory location. */
SPD_MEMPTR,
/* Read SPD using index into spd.bin in CBFS. */
SPD_CBFS,
};
struct spd_info {
enum mem_info_read_type read_type;
union spd_data_by {
/* To identify spd file when read_type is READ_SPD_CBFS. */
int spd_index;
enum mem_topology topology;
/* To find spd data when read_type is READ_SPD_MEMPTR. */
struct spd_by_pointer spd_data_ptr_info;
} spd_spec;
/* SPD info for Memory down topology */
enum md_spd_loc md_spd_loc;
union {
/* Used for SPD_CBFS */
uint8_t cbfs_index;
struct {
/* Used for SPD_MEMPTR */
uintptr_t data_ptr;
size_t data_len;
};
};
};
/* Board-specific memory configuration information */
struct mb_lpddr4x_cfg {
/* DQ mapping */
uint8_t dq_map[NUM_CHANNELS][BYTES_PER_CHANNEL * BITS_PER_BYTE];
struct lpddr4x_cfg {
/*
* DQ CPU<>DRAM map:
* LPDDR4x memory interface has 2 DQs per channel. Each DQ consists of 8 bits(1
* byte). Thus, dq_map is represented as DDR[7-0]_DQ[1-0][7:0], where
* DDR[7-0] : LPDDR4x channel #
* DQ[1-0] : DQ # within the channel
* [7:0] : Bits within the DQ
*
* Index of the array represents DQ pin# on the CPU, whereas value in
* the array represents DQ pin# on the memory part.
*/
uint8_t dq_map[LPDDR4X_CHANNELS][LPDDR4X_BYTES_PER_CHANNEL][BITS_PER_BYTE];
/*
* DQS CPU<>DRAM map. Each array entry represents a
* mapping of a dq bit on the CPU to the bit it's connected to on
* the memory part. The array index represents the dqs bit number
* on the memory part, and the values in the array represent which
* pin on the CPU that DRAM pin connects to.
* DQS CPU<>DRAM map:
* LPDDR4x memory interface has 2 DQS pairs(P/N) per channel. Thus, dqs_map is
* represented as DDR[7-0]_DQS[1:0], where
* DDR[7-0] : LPDDR4x channel #
* DQS[1-0] : DQS # within the channel
*
* Index of the array represents DQS pin# on the CPU, whereas value in
* the array represents DQ pin# on the memory part.
*/
uint8_t dqs_map[NUM_CHANNELS][DQS_PER_CHANNEL];
uint8_t dqs_map[LPDDR4X_CHANNELS][LPDDR4X_BYTES_PER_CHANNEL];
/*
* Early Command Training Enable/Disable Control
@@ -60,10 +79,7 @@ struct mb_lpddr4x_cfg {
uint8_t ect;
};
/* Initialize default memory configurations for dimm0-only lpddr4x */
void meminit_lpddr4x_dimm0(FSP_M_CONFIG *mem_cfg,
const struct mb_lpddr4x_cfg *board_cfg,
const struct spd_info *spd,
bool half_populated);
void meminit_lpddr4x(FSP_M_CONFIG *mem_cfg, const struct lpddr4x_cfg *board_cfg,
const struct spd_info *spd, bool half_populated);
#endif /* _SOC_TIGERLAKE_MEMINIT_H_ */

View File

@@ -12,6 +12,10 @@
#include <spd_bin.h>
#include <string.h>
/* If memory is half-populated, then upper half of the channels need to be left empty. */
#define LPDDR4X_CHANNEL_UNPOPULATED(ch, half_populated) \
((half_populated) && ((ch) >= (LPDDR4X_CHANNELS / 2)))
enum dimm_enable_options {
ENABLE_BOTH_DIMMS = 0,
DISABLE_DIMM0 = 1,
@@ -19,145 +23,268 @@ enum dimm_enable_options {
DISABLE_BOTH_DIMMS = 3
};
#define MEM_INIT_CH_DQ_DQS_MAP(_mem_cfg, _b_cfg, _ch) \
do { \
memcpy(&_mem_cfg->DqMapCpu2DramCh ## _ch, \
&_b_cfg->dq_map[_ch], \
sizeof(_b_cfg->dq_map[_ch])); \
memcpy(&_mem_cfg->DqsMapCpu2DramCh ## _ch, \
&_b_cfg->dqs_map[_ch], \
sizeof(_b_cfg->dqs_map[_ch])); \
} while (0)
static uint8_t get_dimm_cfg(uintptr_t dimm0, uintptr_t dimm1)
{
if (dimm0 && dimm1)
return ENABLE_BOTH_DIMMS;
if (!dimm0 && !dimm1)
return DISABLE_BOTH_DIMMS;
if (!dimm1)
return DISABLE_DIMM1;
if (!dimm0)
die("Disabling of only dimm0 is not supported!\n");
return DISABLE_BOTH_DIMMS;
}
static void spd_read_from_cbfs(const struct spd_info *spd,
uintptr_t *spd_data_ptr, size_t *spd_data_len)
static void init_spd_upds(FSP_M_CONFIG *mem_cfg, int channel, uintptr_t spd_dimm0,
uintptr_t spd_dimm1)
{
mem_cfg->Reserved9[channel] = get_dimm_cfg(spd_dimm0, spd_dimm1);
switch (channel) {
case 0:
mem_cfg->MemorySpdPtr00 = spd_dimm0;
mem_cfg->MemorySpdPtr01 = spd_dimm1;
break;
case 1:
mem_cfg->MemorySpdPtr02 = spd_dimm0;
mem_cfg->MemorySpdPtr03 = spd_dimm1;
break;
case 2:
mem_cfg->MemorySpdPtr04 = spd_dimm0;
mem_cfg->MemorySpdPtr05 = spd_dimm1;
break;
case 3:
mem_cfg->MemorySpdPtr06 = spd_dimm0;
mem_cfg->MemorySpdPtr07 = spd_dimm1;
break;
case 4:
mem_cfg->MemorySpdPtr08 = spd_dimm0;
mem_cfg->MemorySpdPtr09 = spd_dimm1;
break;
case 5:
mem_cfg->MemorySpdPtr10 = spd_dimm0;
mem_cfg->MemorySpdPtr11 = spd_dimm1;
break;
case 6:
mem_cfg->MemorySpdPtr12 = spd_dimm0;
mem_cfg->MemorySpdPtr13 = spd_dimm1;
break;
case 7:
mem_cfg->MemorySpdPtr14 = spd_dimm0;
mem_cfg->MemorySpdPtr15 = spd_dimm1;
break;
default:
die("Invalid channel: %d\n", channel);
}
}
static inline void init_spd_upds_empty(FSP_M_CONFIG *mem_cfg, int channel)
{
init_spd_upds(mem_cfg, channel, 0, 0);
}
static inline void init_spd_upds_dimm0(FSP_M_CONFIG *mem_cfg, int channel, uintptr_t spd_dimm0)
{
init_spd_upds(mem_cfg, channel, spd_dimm0, 0);
}
static void init_dq_upds(FSP_M_CONFIG *mem_cfg, int byte_pair, const uint8_t *dq_byte0,
const uint8_t *dq_byte1)
{
uint8_t *dq_upd;
switch (byte_pair) {
case 0:
dq_upd = mem_cfg->DqMapCpu2DramCh0;
break;
case 1:
dq_upd = mem_cfg->DqMapCpu2DramCh1;
break;
case 2:
dq_upd = mem_cfg->DqMapCpu2DramCh2;
break;
case 3:
dq_upd = mem_cfg->DqMapCpu2DramCh3;
break;
case 4:
dq_upd = mem_cfg->DqMapCpu2DramCh4;
break;
case 5:
dq_upd = mem_cfg->DqMapCpu2DramCh5;
break;
case 6:
dq_upd = mem_cfg->DqMapCpu2DramCh6;
break;
case 7:
dq_upd = mem_cfg->DqMapCpu2DramCh7;
break;
default:
die("Invalid byte_pair: %d\n", byte_pair);
}
if (dq_byte0 && dq_byte1) {
memcpy(dq_upd, dq_byte0, BITS_PER_BYTE);
memcpy(dq_upd + BITS_PER_BYTE, dq_byte1, BITS_PER_BYTE);
} else {
memset(dq_upd, 0, BITS_PER_BYTE * 2);
}
}
static inline void init_dq_upds_empty(FSP_M_CONFIG *mem_cfg, int byte_pair)
{
init_dq_upds(mem_cfg, byte_pair, NULL, NULL);
}
static void init_dqs_upds(FSP_M_CONFIG *mem_cfg, int byte_pair, uint8_t dqs_byte0,
uint8_t dqs_byte1)
{
uint8_t *dqs_upd;
switch (byte_pair) {
case 0:
dqs_upd = mem_cfg->DqsMapCpu2DramCh0;
break;
case 1:
dqs_upd = mem_cfg->DqsMapCpu2DramCh1;
break;
case 2:
dqs_upd = mem_cfg->DqsMapCpu2DramCh2;
break;
case 3:
dqs_upd = mem_cfg->DqsMapCpu2DramCh3;
break;
case 4:
dqs_upd = mem_cfg->DqsMapCpu2DramCh4;
break;
case 5:
dqs_upd = mem_cfg->DqsMapCpu2DramCh5;
break;
case 6:
dqs_upd = mem_cfg->DqsMapCpu2DramCh6;
break;
case 7:
dqs_upd = mem_cfg->DqsMapCpu2DramCh7;
break;
default:
die("Invalid byte_pair: %d\n", byte_pair);
}
dqs_upd[0] = dqs_byte0;
dqs_upd[1] = dqs_byte1;
}
static inline void init_dqs_upds_empty(FSP_M_CONFIG *mem_cfg, int byte_pair)
{
init_dqs_upds(mem_cfg, byte_pair, 0, 0);
}
static void read_spd_from_cbfs(uint8_t index, uintptr_t *data, size_t *len)
{
struct region_device spd_rdev;
size_t spd_index = spd->spd_spec.spd_index;
printk(BIOS_DEBUG, "SPD INDEX = %lu\n", spd_index);
if (get_spd_cbfs_rdev(&spd_rdev, spd_index) < 0)
printk(BIOS_DEBUG, "SPD INDEX = %u\n", index);
if (get_spd_cbfs_rdev(&spd_rdev, index) < 0)
die("spd.bin not found or incorrect index\n");
*spd_data_len = region_device_sz(&spd_rdev);
/* Memory leak is ok since we have memory mapped boot media */
assert(CONFIG(BOOT_DEVICE_MEMORY_MAPPED));
*spd_data_ptr = (uintptr_t)rdev_mmap_full(&spd_rdev);
*len = region_device_sz(&spd_rdev);
*data = (uintptr_t)rdev_mmap_full(&spd_rdev);
}
static void get_spd_data(const struct spd_info *spd,
uintptr_t *spd_data_ptr, size_t *spd_data_len)
static void read_md_spd(const struct spd_info *info, uintptr_t *data, size_t *len)
{
if (spd->read_type == READ_SPD_MEMPTR) {
*spd_data_ptr = spd->spd_spec.spd_data_ptr_info.spd_data_ptr;
*spd_data_len = spd->spd_spec.spd_data_ptr_info.spd_data_len;
return;
if (info->md_spd_loc == SPD_MEMPTR) {
*data = info->data_ptr;
*len = info->data_len;
} else if (info->md_spd_loc == SPD_CBFS) {
read_spd_from_cbfs(info->cbfs_index, data, len);
} else {
die("Not a valid location(%d) for Memory-down SPD!\n", info->md_spd_loc);
}
if (spd->read_type == READ_SPD_CBFS) {
spd_read_from_cbfs(spd, spd_data_ptr, spd_data_len);
return;
}
die("no valid way to read SPD info");
print_spd_info((unsigned char *)data);
}
static void meminit_dq_dqs_map(FSP_M_CONFIG *mem_cfg,
const struct mb_lpddr4x_cfg *board_cfg,
bool half_populated)
{
MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 0);
MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 1);
MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 2);
MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 3);
if (half_populated)
return;
MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 4);
MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 5);
MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 6);
MEM_INIT_CH_DQ_DQS_MAP(mem_cfg, board_cfg, 7);
}
static void meminit_channels_dimm0(FSP_M_CONFIG *mem_cfg,
const struct mb_lpddr4x_cfg *board_cfg,
uintptr_t spd_data_ptr,
bool half_populated)
{
uint8_t dimm_cfg = DISABLE_DIMM1; /* Use only DIMM0 */
/* Channel 0 */
mem_cfg->Reserved9[0] = dimm_cfg;
mem_cfg->MemorySpdPtr00 = spd_data_ptr;
mem_cfg->MemorySpdPtr01 = 0;
/* Channel 1 */
mem_cfg->Reserved9[1] = dimm_cfg;
mem_cfg->MemorySpdPtr02 = spd_data_ptr;
mem_cfg->MemorySpdPtr03 = 0;
/* Channel 2 */
mem_cfg->Reserved9[2] = dimm_cfg;
mem_cfg->MemorySpdPtr04 = spd_data_ptr;
mem_cfg->MemorySpdPtr05 = 0;
/* Channel 3 */
mem_cfg->Reserved9[3] = dimm_cfg;
mem_cfg->MemorySpdPtr06 = spd_data_ptr;
mem_cfg->MemorySpdPtr07 = 0;
if (half_populated) {
printk(BIOS_INFO, "%s: DRAM half-populated\n", __func__);
dimm_cfg = DISABLE_BOTH_DIMMS;
spd_data_ptr = 0;
}
/* Channel 4 */
mem_cfg->Reserved9[4] = dimm_cfg;
mem_cfg->MemorySpdPtr08 = spd_data_ptr;
mem_cfg->MemorySpdPtr09 = 0;
/* Channel 5 */
mem_cfg->Reserved9[5] = dimm_cfg;
mem_cfg->MemorySpdPtr10 = spd_data_ptr;
mem_cfg->MemorySpdPtr11 = 0;
/* Channel 6 */
mem_cfg->Reserved9[6] = dimm_cfg;
mem_cfg->MemorySpdPtr12 = spd_data_ptr;
mem_cfg->MemorySpdPtr13 = 0;
/* Channel 7 */
mem_cfg->Reserved9[7] = dimm_cfg;
mem_cfg->MemorySpdPtr14 = spd_data_ptr;
mem_cfg->MemorySpdPtr15 = 0;
meminit_dq_dqs_map(mem_cfg, board_cfg, half_populated);
}
/* Initialize onboard memory configurations for lpddr4x */
void meminit_lpddr4x_dimm0(FSP_M_CONFIG *mem_cfg,
const struct mb_lpddr4x_cfg *board_cfg,
const struct spd_info *spd,
bool half_populated)
void meminit_lpddr4x(FSP_M_CONFIG *mem_cfg, const struct lpddr4x_cfg *board_cfg,
const struct spd_info *info, bool half_populated)
{
size_t spd_data_len;
uintptr_t spd_data_ptr;
size_t spd_len;
uintptr_t spd_data;
int i;
get_spd_data(spd, &spd_data_ptr, &spd_data_len);
print_spd_info((unsigned char *)spd_data_ptr);
if (info->topology != MEMORY_DOWN)
die("LPDDR4x only support memory-down topology.\n");
mem_cfg->MemorySpdDataLen = spd_data_len;
meminit_channels_dimm0(mem_cfg, board_cfg, spd_data_ptr,
half_populated);
/* LPDDR4 does not allow interleaved memory */
/* LPDDR4x does not allow interleaved memory */
mem_cfg->DqPinsInterleaved = 0;
mem_cfg->ECT = board_cfg->ect;
mem_cfg->MrcSafeConfig = 0x1;
read_md_spd(info, &spd_data, &spd_len);
mem_cfg->MemorySpdDataLen = spd_len;
for (i = 0; i < LPDDR4X_CHANNELS; i++) {
if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
init_spd_upds_empty(mem_cfg, i);
else
init_spd_upds_dimm0(mem_cfg, i, spd_data);
}
/*
* LPDDR4x memory interface has 2 DQs per channel. Each DQ consists of 8 bits (1
* byte). However, FSP UPDs for DQ Map expect a DQ pair (i.e. mapping for 2 bytes) in
* each UPD.
*
* Thus, init_dq_upds() needs to be called for dq pair of each channel.
* DqMapCpu2DramCh0 --> dq_map[CHAN=0][0-1]
* DqMapCpu2DramCh1 --> dq_map[CHAN=1][0-1]
* DqMapCpu2DramCh2 --> dq_map[CHAN=2][0-1]
* DqMapCpu2DramCh3 --> dq_map[CHAN=3][0-1]
* DqMapCpu2DramCh4 --> dq_map[CHAN=4][0-1]
* DqMapCpu2DramCh5 --> dq_map[CHAN=5][0-1]
* DqMapCpu2DramCh6 --> dq_map[CHAN=6][0-1]
* DqMapCpu2DramCh7 --> dq_map[CHAN=7][0-1]
*/
for (i = 0; i < LPDDR4X_CHANNELS; i++) {
if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
init_dq_upds_empty(mem_cfg, i);
else
init_dq_upds(mem_cfg, i, board_cfg->dq_map[i][0],
board_cfg->dq_map[i][1]);
}
/*
* LPDDR4x memory interface has 2 DQS pairs per channel. FSP UPDs for DQS Map expect a
* pair in each UPD.
*
* Thus, init_dqs_upds() needs to be called for dqs pair of each channel.
* DqsMapCpu2DramCh0 --> dqs_map[CHAN=0][0-1]
* DqsMapCpu2DramCh1 --> dqs_map[CHAN=1][0-1]
* DqsMapCpu2DramCh2 --> dqs_map[CHAN=2][0-1]
* DqsMapCpu2DramCh3 --> dqs_map[CHAN=3][0-1]
* DqsMapCpu2DramCh4 --> dqs_map[CHAN=4][0-1]
* DqsMapCpu2DramCh5 --> dqs_map[CHAN=5][0-1]
* DqsMapCpu2DramCh6 --> dqs_map[CHAN=6][0-1]
* DqsMapCpu2DramCh7 --> dqs_map[CHAN=7][0-1]
*/
for (i = 0; i < LPDDR4X_CHANNELS; i++) {
if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
init_dqs_upds_empty(mem_cfg, i);
else
init_dqs_upds(mem_cfg, i, board_cfg->dqs_map[i][0],
board_cfg->dqs_map[i][1]);
}
}