soc/intel/xeon_sp: Add fill_pd_distances

Update a simple algorithm to cover some basic case for proximity
domain distance handling. In the same time, the local variable
usage of fill_pds() is optimized.

TEST=Build and boot on intel/archercity CRB

ACPI SRAT, SLIT and DMAR (Remapping Hardware Static Affinity) are
generated correctly for 2S system.

Change-Id: I2b666dc2a140d1bb1fdff9bc7b835d5cf5b4bbc5
Signed-off-by: Shuo Liu <shuo.liu@intel.com>
Co-authored-by: Ziang Wang <ziang.wang@intel.com>
Co-authored-by: Gang Chen <gang.c.chen@intel.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/81442
Reviewed-by: Patrick Rudolph <patrick.rudolph@9elements.com>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Shuo Liu 2024-03-27 04:26:16 +08:00 committed by Felix Held
parent 49e5d3dc26
commit 9580e7fba8
3 changed files with 36 additions and 19 deletions

View File

@ -59,6 +59,7 @@ extern struct proximity_domains pds;
void dump_pds(void);
void fill_pds(void);
void fill_pd_distances(void);
/*
* Return the total size of memory regions in generic initiator affinity

View File

@ -50,8 +50,8 @@ void fill_pds(void)
memset(pds.pds, 0, sizeof(struct proximity_domain) * pds.num_pds);
/* Fill in processor domains */
uint8_t i, j, socket;
for (socket = 0, i = 0; i < num_sockets; socket++) {
uint8_t i = 0;
for (uint8_t socket = 0; socket < num_sockets; socket++) {
if (!soc_cpu_is_enabled(socket))
continue;
pds.pds[i].pd_type = PD_TYPE_PROCESSOR;
@ -59,13 +59,6 @@ void fill_pds(void)
pds.pds[i].distances = malloc(sizeof(uint8_t) * pds.num_pds);
if (!pds.pds[i].distances)
die("%s %d out of memory.", __FILE__, __LINE__);
/* hard code the distances for now, till we know how to calculate them. */
for (j = 0; j < pds.num_pds; j++) {
if (j == i)
pds.pds[i].distances[j] = 0x0a;
else
pds.pds[i].distances[j] = 0x0e;
}
i++;
}
@ -75,10 +68,9 @@ void fill_pds(void)
#if CONFIG(SOC_INTEL_HAS_CXL)
/* There are CXL nodes, fill in generic initiator domain after the processors pds */
uint8_t skt_id, cxl_id;
const CXL_NODE_SOCKET *cxl_hob = get_cxl_node();
for (skt_id = 0, i = num_sockets; skt_id < MAX_SOCKET; skt_id++, i++) {
for (cxl_id = 0; cxl_id < cxl_hob[skt_id].CxlNodeCount; ++cxl_id) {
for (uint8_t skt_id = 0; skt_id < MAX_SOCKET; skt_id++) {
for (uint8_t cxl_id = 0; cxl_id < cxl_hob[skt_id].CxlNodeCount; ++cxl_id) {
const CXL_NODE_INFO node = cxl_hob[skt_id].CxlNodeInfo[cxl_id];
pds.pds[i].pd_type = PD_TYPE_GENERIC_INITIATOR;
pds.pds[i].socket_bitmap = node.SocketBitmap;
@ -89,13 +81,7 @@ void fill_pds(void)
pds.pds[i].distances = malloc(sizeof(uint8_t) * pds.num_pds);
if (!pds.pds[i].distances)
die("%s %d out of memory.", __FILE__, __LINE__);
/* hard code the distances until we know how to calculate them */
for (j = 0; j < pds.num_pds; j++) {
if (j == i)
pds.pds[i].distances[j] = 0x0a;
else
pds.pds[i].distances[j] = 0x0e;
}
i++;
}
}
#endif
@ -158,3 +144,32 @@ uint32_t memory_to_pd(const struct SystemMemoryMapElement *mem)
{
return socket_to_pd(mem->SocketId);
}
#define PD_DISTANCE_SELF 0x0A
#define PD_DISTANCE_SAME_SOCKET 0x0C
#define PD_DISTANCE_CROSS_SOCKET 0x14
#define PD_DISTANCE_MAX 0xFF
#define PD_DISTANCE_IO_EXTRA 0x01
void fill_pd_distances(void)
{
for (int i = 0; i < pds.num_pds; i++) {
for (int j = 0; j < pds.num_pds; j++) {
if (i == j) {
pds.pds[i].distances[j] = PD_DISTANCE_SELF;
continue;
}
if (pds.pds[i].socket_bitmap == pds.pds[j].socket_bitmap)
pds.pds[i].distances[j] = PD_DISTANCE_SAME_SOCKET;
else
pds.pds[i].distances[j] = PD_DISTANCE_CROSS_SOCKET;
if (pds.pds[i].pd_type == PD_TYPE_GENERIC_INITIATOR)
pds.pds[i].distances[j] += PD_DISTANCE_IO_EXTRA;
if (pds.pds[j].pd_type == PD_TYPE_GENERIC_INITIATOR)
pds.pds[i].distances[j] += PD_DISTANCE_IO_EXTRA;
}
}
}

View File

@ -340,6 +340,7 @@ static void mmapvtd_read_resources(struct device *dev)
if (!once) {
/* Construct NUMA data structure. This is needed for CXL. */
fill_pds();
fill_pd_distances();
dump_pds();
once = true;
}