Compare commits
4 Commits
upstream-5
...
upstream-8
Author | SHA1 | Date | |
---|---|---|---|
|
d87fb53d73 | ||
|
67a7ee7eb8 | ||
|
c097c4788b | ||
|
1f97d801ce |
@@ -36,6 +36,18 @@
|
||||
#define DDR4_SPD_PART_OFF 329
|
||||
#define DDR4_SPD_PART_LEN 20
|
||||
#define DDR4_SPD_SN_OFF 325
|
||||
#define MAX_SPD_PAGE_SIZE_SPD5 128
|
||||
#define MAX_SPD_SIZE (SPD_PAGE_LEN * 4)
|
||||
#define SPD_HUB_MEMREG(addr) ((u8)(0x80 | (addr)))
|
||||
#define SPD5_MR11 0x0B
|
||||
#define SPD5_MR0 0x00
|
||||
#define SPD5_MEMREG_REG(addr) ((u8)((~0x80) & (addr)))
|
||||
#define SPD5_MR0_SPD5_HUB_DEV 0x51
|
||||
|
||||
struct spd_offset_table {
|
||||
u16 start; /* Offset 0 */
|
||||
u16 end; /* Offset 2 */
|
||||
};
|
||||
|
||||
struct spd_block {
|
||||
u8 addr_map[CONFIG_DIMM_MAX]; /* 7 bit I2C addresses */
|
||||
|
@@ -209,7 +209,9 @@ enum cb_err spd_fill_from_cache(uint8_t *spd_cache, struct spd_block *blk)
|
||||
|
||||
dram_type = *(spd_cache + SC_SPD_OFFSET(i) + SPD_DRAM_TYPE);
|
||||
|
||||
if (dram_type == SPD_DRAM_DDR4)
|
||||
if (dram_type == SPD_DRAM_DDR5)
|
||||
blk->len = CONFIG_DIMM_SPD_SIZE;
|
||||
else if (dram_type == SPD_DRAM_DDR4)
|
||||
blk->len = SPD_PAGE_LEN_DDR4;
|
||||
else
|
||||
blk->len = SPD_PAGE_LEN;
|
||||
|
@@ -6,6 +6,8 @@ chip soc/intel/alderlake
|
||||
register "sagv" = "SaGv_Enabled"
|
||||
|
||||
register "usb2_ports[1]" = "USB2_PORT_EMPTY" # Disable USB2 Port 1
|
||||
register "usb2_ports[2]" = "USB2_PORT_MID(OC_SKIP)" # Type-A Port A3
|
||||
register "usb2_ports[3]" = "USB2_PORT_MID(OC_SKIP)" # Type-A Port A2
|
||||
register "usb2_ports[4]" = "USB2_PORT_EMPTY" # Disable USB2 Port 4
|
||||
register "usb2_ports[5]" = "USB2_PORT_EMPTY" # Disable USB2 Port 5
|
||||
register "usb2_ports[6]" = "USB2_PORT_EMPTY" # Disable USB2 Port 6
|
||||
|
@@ -112,7 +112,7 @@ int cpu_cl_poll_mailbox_ready(u32 cl_mailbox_addr)
|
||||
u16 stall_cnt = 0;
|
||||
|
||||
do {
|
||||
cl_mailbox_interface.data = read32((u32 *)cl_mailbox_addr);
|
||||
cl_mailbox_interface.data = read32((u32 *)(uintptr_t)cl_mailbox_addr);
|
||||
udelay(CPU_CRASHLOG_WAIT_STALL);
|
||||
stall_cnt++;
|
||||
} while ((cl_mailbox_interface.fields.busy == 1)
|
||||
@@ -140,7 +140,7 @@ int cpu_cl_mailbox_cmd(u8 cmd, u8 param)
|
||||
cl_mailbox_intf.fields.param = param;
|
||||
cl_mailbox_intf.fields.busy = 1;
|
||||
|
||||
write32((u32 *)(cl_base_addr + cl_get_cpu_mb_int_addr()),
|
||||
write32((u32 *)(uintptr_t)(cl_base_addr + cl_get_cpu_mb_int_addr()),
|
||||
cl_mailbox_intf.data);
|
||||
|
||||
cpu_cl_poll_mailbox_ready(cl_base_addr + cl_get_cpu_mb_int_addr());
|
||||
@@ -167,7 +167,7 @@ int pmc_cl_gen_descriptor_table(u32 desc_table_addr,
|
||||
pmc_crashlog_desc_table_t *descriptor_table)
|
||||
{
|
||||
int total_data_size = 0;
|
||||
descriptor_table->numb_regions = read32((u32 *)desc_table_addr);
|
||||
descriptor_table->numb_regions = read32((u32 *)(uintptr_t)desc_table_addr);
|
||||
printk(BIOS_DEBUG, "CL PMC desc table: numb of regions is 0x%x at addr 0x%x\n",
|
||||
descriptor_table->numb_regions, desc_table_addr);
|
||||
for (int i = 0; i < descriptor_table->numb_regions; i++) {
|
||||
@@ -178,7 +178,7 @@ int pmc_cl_gen_descriptor_table(u32 desc_table_addr,
|
||||
break;
|
||||
}
|
||||
desc_table_addr += 4;
|
||||
descriptor_table->regions[i].data = read32((u32 *)(desc_table_addr));
|
||||
descriptor_table->regions[i].data = read32((u32 *)(uintptr_t)(desc_table_addr));
|
||||
total_data_size += descriptor_table->regions[i].bits.size * sizeof(u32);
|
||||
printk(BIOS_DEBUG, "CL PMC desc table: region 0x%x has size 0x%x at offset 0x%x\n",
|
||||
i, descriptor_table->regions[i].bits.size,
|
||||
@@ -295,7 +295,7 @@ bool cl_copy_data_from_sram(u32 src_bar,
|
||||
|
||||
u32 src_addr = src_bar + offset;
|
||||
|
||||
u32 data = read32((u32 *)src_addr);
|
||||
u32 data = read32((u32 *)(uintptr_t)src_addr);
|
||||
|
||||
/* First 32bits of the record must not be 0xdeadbeef */
|
||||
if (data == INVALID_CRASHLOG_RECORD) {
|
||||
@@ -320,7 +320,7 @@ bool cl_copy_data_from_sram(u32 src_bar,
|
||||
u32 copied = 0;
|
||||
while (copied < size) {
|
||||
/* DW by DW copy: byte access to PMC SRAM not allowed */
|
||||
*dest_addr = read32((u32 *)src_addr);
|
||||
*dest_addr = read32((u32 *)(uintptr_t)src_addr);
|
||||
dest_addr++;
|
||||
src_addr += 4;
|
||||
copied++;
|
||||
|
@@ -13,8 +13,11 @@ static void update_spd_len(struct spd_block *blk)
|
||||
if (blk->spd_array[i] != NULL)
|
||||
j |= blk->spd_array[i][SPD_DRAM_TYPE];
|
||||
|
||||
/* If spd used is DDR5, then its length is 1024 byte. */
|
||||
if (j == SPD_DRAM_DDR5)
|
||||
blk->len = CONFIG_DIMM_SPD_SIZE;
|
||||
/* If spd used is DDR4, then its length is 512 byte. */
|
||||
if (j == SPD_DRAM_DDR4)
|
||||
else if (j == SPD_DRAM_DDR4)
|
||||
blk->len = SPD_PAGE_LEN_DDR4;
|
||||
else
|
||||
blk->len = SPD_PAGE_LEN;
|
||||
@@ -37,6 +40,61 @@ static void smbus_read_spd(u8 *spd, u8 addr)
|
||||
}
|
||||
}
|
||||
|
||||
static void switch_page(u8 spd_addr, u8 new_page)
|
||||
{
|
||||
u32 offset;
|
||||
/*
|
||||
* By default,an SPD5 hub accepts 1 byte addressing pointing
|
||||
* to the first 128 bytes of memory. MR11[2:0] selects the page
|
||||
* pointer to address the entire 1024 bytes of non-volatile memory.
|
||||
*/
|
||||
offset = SPD5_MEMREG_REG(SPD5_MR11);
|
||||
smbus_write_byte(spd_addr, offset, new_page);
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the SPD data over the SMBus, at the specified SPD address,
|
||||
* starting at the specified starting offset and read the given amount of data.
|
||||
*/
|
||||
static void smbus_read_spd5(u8 *spd, u8 spd_addr, u16 size)
|
||||
{
|
||||
u8 page = ~0;
|
||||
u32 max_page_size = MAX_SPD_PAGE_SIZE_SPD5;
|
||||
|
||||
if (size > MAX_SPD_SIZE) {
|
||||
printk(BIOS_ERR, "Maximum SPD size reached\n");
|
||||
return;
|
||||
}
|
||||
for (int i = 0; i < size; i++) {
|
||||
u8 next_page = (u8) (i / max_page_size);
|
||||
if (next_page != page) {
|
||||
switch_page(spd_addr, next_page);
|
||||
page = next_page;
|
||||
}
|
||||
unsigned int byte_addr = SPD_HUB_MEMREG(i % max_page_size);
|
||||
spd[i] = smbus_read_byte(spd_addr, byte_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/* Read SPD5 MR0 and check if SPD Byte 0 matches the SPD5 HUB MR0 identifier.*/
|
||||
static int is_spd5_hub(u8 spd_addr)
|
||||
{
|
||||
u8 spd_hub_byte;
|
||||
|
||||
spd_hub_byte = smbus_read_byte(spd_addr, SPD5_MEMREG_REG(SPD5_MR0));
|
||||
return spd_hub_byte == SPD5_MR0_SPD5_HUB_DEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset the SPD page back to page 0 on an SPD5 Hub device at the
|
||||
* input SPD SMbus address.
|
||||
*/
|
||||
static void reset_page_spd5(u8 spd_addr)
|
||||
{
|
||||
/* Set SPD5 MR11[2:0] = 0 (Page 0) */
|
||||
smbus_write_byte(spd_addr, SPD5_MEMREG_REG(SPD5_MR11), 0);
|
||||
}
|
||||
|
||||
/* return -1 if SMBus errors otherwise return 0 */
|
||||
static int get_spd(u8 *spd, u8 addr)
|
||||
{
|
||||
@@ -52,22 +110,31 @@ static int get_spd(u8 *spd, u8 addr)
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (i2c_eeprom_read(addr, 0, SPD_PAGE_LEN, spd) < 0) {
|
||||
printk(BIOS_INFO, "do_i2c_eeprom_read failed, using fallback\n");
|
||||
smbus_read_spd(spd, addr);
|
||||
}
|
||||
if (is_spd5_hub(addr)) {
|
||||
smbus_read_spd5(spd, addr, CONFIG_DIMM_SPD_SIZE);
|
||||
|
||||
/* Check if module is DDR4, DDR4 spd is 512 byte. */
|
||||
if (spd[SPD_DRAM_TYPE] == SPD_DRAM_DDR4 && CONFIG_DIMM_SPD_SIZE > SPD_PAGE_LEN) {
|
||||
/* Switch to page 1 */
|
||||
smbus_write_byte(SPD_PAGE_1, 0, 0);
|
||||
/* Reset the page for the next loop iteration */
|
||||
reset_page_spd5(addr);
|
||||
} else {
|
||||
|
||||
if (i2c_eeprom_read(addr, 0, SPD_PAGE_LEN, spd + SPD_PAGE_LEN) < 0) {
|
||||
if (i2c_eeprom_read(addr, 0, SPD_PAGE_LEN, spd) < 0) {
|
||||
printk(BIOS_INFO, "do_i2c_eeprom_read failed, using fallback\n");
|
||||
smbus_read_spd(spd + SPD_PAGE_LEN, addr);
|
||||
smbus_read_spd(spd, addr);
|
||||
}
|
||||
|
||||
/* Check if module is DDR4, DDR4 spd is 512 byte. */
|
||||
if (spd[SPD_DRAM_TYPE] == SPD_DRAM_DDR4 &&
|
||||
CONFIG_DIMM_SPD_SIZE > SPD_PAGE_LEN) {
|
||||
/* Switch to page 1 */
|
||||
smbus_write_byte(SPD_PAGE_1, 0, 0);
|
||||
|
||||
if (i2c_eeprom_read(addr, 0, SPD_PAGE_LEN, spd + SPD_PAGE_LEN) < 0) {
|
||||
printk(BIOS_INFO, "do_i2c_eeprom_read failed, using fallback\n");
|
||||
smbus_read_spd(spd + SPD_PAGE_LEN, addr);
|
||||
}
|
||||
/* Restore to page 0 */
|
||||
smbus_write_byte(SPD_PAGE_0, 0, 0);
|
||||
}
|
||||
/* Restore to page 0 */
|
||||
smbus_write_byte(SPD_PAGE_0, 0, 0);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@@ -34,7 +34,7 @@ static u32 disc_tab_addr;
|
||||
|
||||
static u64 get_disc_tab_header(void)
|
||||
{
|
||||
return read64((void *)disc_tab_addr);
|
||||
return read64((void *)(uintptr_t)disc_tab_addr);
|
||||
}
|
||||
|
||||
/* Get the SRAM BAR. */
|
||||
@@ -338,7 +338,7 @@ static bool cpu_cl_gen_discovery_table(void)
|
||||
|
||||
disc_tab_addr = bar_addr + get_disc_table_offset();
|
||||
|
||||
u32 dw0 = read32((u32 *)disc_tab_addr);
|
||||
u32 dw0 = read32((u32 *)(uintptr_t)disc_tab_addr);
|
||||
if (!is_crashlog_data_valid(dw0))
|
||||
return false;
|
||||
|
||||
@@ -351,7 +351,7 @@ static bool cpu_cl_gen_discovery_table(void)
|
||||
for (int i = 0; i < cpu_cl_disc_tab.header.fields.count; i++) {
|
||||
cur_offset = 8 + 24 * i;
|
||||
|
||||
dw0 = read32((u32 *)disc_tab_addr + cur_offset);
|
||||
dw0 = read32((u32 *)(uintptr_t)disc_tab_addr + cur_offset);
|
||||
if (!is_crashlog_data_valid(dw0))
|
||||
continue;
|
||||
|
||||
@@ -361,7 +361,7 @@ static bool cpu_cl_gen_discovery_table(void)
|
||||
break;
|
||||
}
|
||||
|
||||
cpu_cl_disc_tab.buffers[i].data = read64((void *)(disc_tab_addr + cur_offset));
|
||||
cpu_cl_disc_tab.buffers[i].data = read64((void *)(uintptr_t)(disc_tab_addr + cur_offset));
|
||||
printk(BIOS_DEBUG, "cpu_crashlog_discovery_table buffer: 0x%x size: "
|
||||
"0x%x offset: 0x%x\n", i, cpu_cl_disc_tab.buffers[i].fields.size,
|
||||
cpu_cl_disc_tab.buffers[i].fields.offset);
|
||||
@@ -450,7 +450,7 @@ void cpu_cl_rearm(void)
|
||||
cl_punit_control_interface_t punit_ctrl_intfc;
|
||||
memset(&punit_ctrl_intfc, 0, sizeof(cl_punit_control_interface_t));
|
||||
punit_ctrl_intfc.fields.set_re_arm = 1;
|
||||
write32((u32 *)(ctrl_sts_intfc_addr), punit_ctrl_intfc.data);
|
||||
write32((u32 *)(uintptr_t)(ctrl_sts_intfc_addr), punit_ctrl_intfc.data);
|
||||
|
||||
if (!wait_and_check(CRASHLOG_RE_ARM_STATUS_MASK))
|
||||
printk(BIOS_ERR, "CPU crashlog re_arm not asserted\n");
|
||||
@@ -480,7 +480,7 @@ void cpu_cl_cleanup(void)
|
||||
cl_punit_control_interface_t punit_ctrl_intfc;
|
||||
memset(&punit_ctrl_intfc, 0, sizeof(cl_punit_control_interface_t));
|
||||
punit_ctrl_intfc.fields.set_storage_off = 1;
|
||||
write32((u32 *)(ctrl_sts_intfc_addr), punit_ctrl_intfc.data);
|
||||
write32((u32 *)(uintptr_t)(ctrl_sts_intfc_addr), punit_ctrl_intfc.data);
|
||||
|
||||
if (!wait_and_check(CRASHLOG_PUNIT_STORAGE_OFF_MASK))
|
||||
printk(BIOS_ERR, "CPU crashlog storage_off not asserted\n");
|
||||
|
@@ -8,7 +8,7 @@
|
||||
#define LPX_PHYSICAL_CH_WIDTH 16
|
||||
#define LPX_CHANNELS CHANNEL_COUNT(LPX_PHYSICAL_CH_WIDTH)
|
||||
|
||||
#define DDR5_PHYSICAL_CH_WIDTH 32
|
||||
#define DDR5_PHYSICAL_CH_WIDTH 64 /* 32*2 */
|
||||
#define DDR5_CHANNELS CHANNEL_COUNT(DDR5_PHYSICAL_CH_WIDTH)
|
||||
|
||||
static void set_rcomp_config(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg)
|
||||
@@ -38,18 +38,16 @@ static const struct soc_mem_cfg soc_mem_cfg[] = {
|
||||
.num_phys_channels = DDR5_CHANNELS,
|
||||
.phys_to_mrc_map = {
|
||||
[0] = 0,
|
||||
[1] = 1,
|
||||
[2] = 4,
|
||||
[3] = 5,
|
||||
[1] = 4,
|
||||
},
|
||||
.md_phy_masks = {
|
||||
/*
|
||||
* Physical channels 0 and 1 are populated in case of
|
||||
* half-populated configurations.
|
||||
* Only channel 0 is populated in case of half-populated
|
||||
* configuration.
|
||||
*/
|
||||
.half_channel = BIT(0) | BIT(1),
|
||||
/* In mixed topologies, channels 2 and 3 are always memory-down. */
|
||||
.mixed_topo = BIT(2) | BIT(3),
|
||||
.half_channel = BIT(0),
|
||||
/* In mixed topologies, either channel 0 or 1 can be memory-down. */
|
||||
.mixed_topo = BIT(0) | BIT(1),
|
||||
},
|
||||
},
|
||||
[MEM_TYPE_LP5X] = {
|
||||
@@ -75,7 +73,8 @@ static const struct soc_mem_cfg soc_mem_cfg[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static void mem_init_spd_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data)
|
||||
static void mem_init_spd_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
|
||||
bool expand_channels)
|
||||
{
|
||||
efi_uintn_t *spd_upds[MRC_CHANNELS][CONFIG_DIMMS_PER_CHANNEL] = {
|
||||
[0] = { &mem_cfg->MemorySpdPtr000, &mem_cfg->MemorySpdPtr001, },
|
||||
@@ -108,7 +107,16 @@ static void mem_init_spd_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_da
|
||||
for (dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
|
||||
efi_uintn_t *spd_ptr = spd_upds[ch][dimm];
|
||||
|
||||
*spd_ptr = data->spd[ch][dimm];
|
||||
// In DDR5 systems, we need to copy the SPD data such that:
|
||||
// Channel 0 data is used by channel 0 and 1
|
||||
// Channel 2 data is used by channel 2 and 3
|
||||
// Channel 4 data is used by channel 4 and 5
|
||||
// Channel 6 data is used by channel 6 and 7
|
||||
if (expand_channels)
|
||||
*spd_ptr = data->spd[ch & 6][dimm];
|
||||
else
|
||||
*spd_ptr = data->spd[ch][dimm];
|
||||
|
||||
if (*spd_ptr)
|
||||
enable_channel = 1;
|
||||
}
|
||||
@@ -174,27 +182,12 @@ static void mem_init_dqs_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_da
|
||||
mem_init_dq_dqs_upds(dqs_upds, mb_cfg->dqs_map, upd_size, data, auto_detect);
|
||||
}
|
||||
|
||||
#define DDR5_CH_DIMM_OFFSET(ch, dimm) ((ch) * CONFIG_DIMMS_PER_CHANNEL + (dimm))
|
||||
|
||||
static void ddr5_fill_dimm_module_info(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg,
|
||||
const struct mem_spd *spd_info)
|
||||
{
|
||||
for (size_t ch = 0; ch < soc_mem_cfg[MEM_TYPE_DDR5].num_phys_channels; ch++) {
|
||||
for (size_t dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
|
||||
size_t mrc_ch = soc_mem_cfg[MEM_TYPE_DDR5].phys_to_mrc_map[ch];
|
||||
mem_cfg->SpdAddressTable[DDR5_CH_DIMM_OFFSET(mrc_ch, dimm)] =
|
||||
spd_info->smbus[ch].addr_dimm[dimm] << 1;
|
||||
}
|
||||
}
|
||||
mem_init_dq_upds(mem_cfg, NULL, mb_cfg, true);
|
||||
mem_init_dqs_upds(mem_cfg, NULL, mb_cfg, true);
|
||||
}
|
||||
|
||||
void memcfg_init(FSPM_UPD *memupd, const struct mb_cfg *mb_cfg,
|
||||
const struct mem_spd *spd_info, bool half_populated)
|
||||
{
|
||||
struct mem_channel_data data;
|
||||
bool dq_dqs_auto_detect = false;
|
||||
bool expand_channels = false;
|
||||
FSP_M_CONFIG *mem_cfg = &memupd->FspmConfig;
|
||||
|
||||
mem_cfg->ECT = mb_cfg->ect;
|
||||
@@ -205,14 +198,7 @@ void memcfg_init(FSPM_UPD *memupd, const struct mb_cfg *mb_cfg,
|
||||
case MEM_TYPE_DDR5:
|
||||
meminit_ddr(mem_cfg, &mb_cfg->ddr_config);
|
||||
dq_dqs_auto_detect = true;
|
||||
/*
|
||||
* TODO: Drop this workaround once SMBus driver in coreboot is updated to
|
||||
* support DDR5 EEPROM reading.
|
||||
*/
|
||||
if (spd_info->topo == MEM_TOPO_DIMM_MODULE) {
|
||||
ddr5_fill_dimm_module_info(mem_cfg, mb_cfg, spd_info);
|
||||
return;
|
||||
}
|
||||
expand_channels = true;
|
||||
break;
|
||||
case MEM_TYPE_LP5X:
|
||||
meminit_lp5x(mem_cfg, &mb_cfg->lp5x_config);
|
||||
@@ -221,9 +207,9 @@ void memcfg_init(FSPM_UPD *memupd, const struct mb_cfg *mb_cfg,
|
||||
die("Unsupported memory type(%d)\n", mb_cfg->type);
|
||||
}
|
||||
|
||||
mem_populate_channel_data(memupd, &soc_mem_cfg[mb_cfg->type], spd_info,
|
||||
half_populated, &data);
|
||||
mem_init_spd_upds(mem_cfg, &data);
|
||||
mem_populate_channel_data(memupd, &soc_mem_cfg[mb_cfg->type], spd_info, half_populated,
|
||||
&data);
|
||||
mem_init_spd_upds(mem_cfg, &data, expand_channels);
|
||||
mem_init_dq_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);
|
||||
mem_init_dqs_upds(mem_cfg, &data, mb_cfg, dq_dqs_auto_detect);
|
||||
}
|
||||
|
Reference in New Issue
Block a user