soc/intel/xeon_sp: Unshare Xeon-SP chip common codes

GraniteRapids (6th Gen Xeon-SP) FSP contains changes in IIO stack
descriptors impacting the way of coreboot's creation of domains.
Separates the codes as preparation for 6th Gen and later platforms.

Change-Id: Iab6acaa5e5c090c8d821bd7c2d3e0e0ad7486bdc
Signed-off-by: Shuo Liu <shuo.liu@intel.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/81312
Reviewed-by: Nico Huber <nico.h@gmx.de>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Lean Sheng Tan <sheng.tan@9elements.com>
This commit is contained in:
Shuo Liu
2024-03-11 07:14:07 +08:00
committed by Lean Sheng Tan
parent f7e456748f
commit ec58bebbd6
7 changed files with 221 additions and 194 deletions

View File

@ -12,19 +12,6 @@
#include <soc/util.h>
#include <stdlib.h>
static const STACK_RES *domain_to_stack_res(const struct device *dev)
{
assert(dev->path.type == DEVICE_PATH_DOMAIN);
const union xeon_domain_path dn = {
.domain_path = dev->path.domain.domain
};
const IIO_UDS *hob = get_iio_uds();
assert(hob != NULL);
return &hob->PlatformData.IIO_resource[dn.socket].StackRes[dn.stack];
}
/**
* Find all device of a given vendor and type for the specified socket.
* The function iterates over all PCI domains of the specified socket
@ -189,81 +176,7 @@ int iio_pci_domain_stack_from_dev(struct device *dev)
return dn.stack;
}
void iio_pci_domain_read_resources(struct device *dev)
{
struct resource *res;
const STACK_RES *sr = domain_to_stack_res(dev);
if (!sr)
return;
int index = 0;
if (is_domain0(dev)) {
/* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
res = new_resource(dev, index++);
res->base = 0;
res->size = 0x1000;
res->limit = 0xfff;
res->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
}
if (sr->PciResourceIoBase < sr->PciResourceIoLimit) {
res = new_resource(dev, index++);
res->base = sr->PciResourceIoBase;
res->limit = sr->PciResourceIoLimit;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
}
if (sr->PciResourceMem32Base < sr->PciResourceMem32Limit) {
res = new_resource(dev, index++);
res->base = sr->PciResourceMem32Base;
res->limit = sr->PciResourceMem32Limit;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
}
if (sr->PciResourceMem64Base < sr->PciResourceMem64Limit) {
res = new_resource(dev, index++);
res->base = sr->PciResourceMem64Base;
res->limit = sr->PciResourceMem64Limit;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
}
}
/*
* Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
* all the bus numbers on the IIO stack can be used for this bridge
*/
static struct device_operations iio_pcie_domain_ops = {
.read_resources = iio_pci_domain_read_resources,
.set_resources = pci_domain_set_resources,
.scan_bus = pci_host_bridge_scan_bus,
#if CONFIG(HAVE_ACPI_TABLES)
.acpi_name = soc_acpi_name,
.write_acpi_tables = northbridge_write_acpi_tables,
.acpi_fill_ssdt = pci_domain_fill_ssdt,
#endif
};
/*
* Used by UBOX stacks. Those contain multiple PCI host bridges, each having
* only one bus with UBOX devices. UBOX devices have no resources.
*/
static struct device_operations ubox_pcie_domain_ops = {
.read_resources = noop_read_resources,
.set_resources = noop_set_resources,
.scan_bus = pci_host_bridge_scan_bus,
#if CONFIG(HAVE_ACPI_TABLES)
.acpi_name = soc_acpi_name,
.write_acpi_tables = northbridge_write_acpi_tables,
.acpi_fill_ssdt = pci_domain_fill_ssdt,
#endif
};
static void soc_create_domains(const union xeon_domain_path dp, struct bus *upstream,
void create_domain(const union xeon_domain_path dp, struct bus *upstream,
int bus_base, int bus_limit, const char *type,
struct device_operations *ops,
const size_t pci_segment_group)
@ -285,92 +198,6 @@ static void soc_create_domains(const union xeon_domain_path dp, struct bus *upst
bus->segment_group = pci_segment_group;
}
static void soc_create_pcie_domains(const union xeon_domain_path dp, struct bus *upstream,
const STACK_RES *sr, const size_t pci_segment_group)
{
soc_create_domains(dp, upstream, sr->BusBase, sr->BusLimit, DOMAIN_TYPE_PCIE,
&iio_pcie_domain_ops, pci_segment_group);
}
/*
* On the first Xeon-SP generations there are no separate UBOX stacks,
* and the UBOX devices reside on the first and second IIO. Starting
* with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
*/
static void soc_create_ubox_domains(const union xeon_domain_path dp, struct bus *upstream,
const STACK_RES *sr, const size_t pci_segment_group)
{
/* Only expect 2 UBOX buses here */
assert(sr->BusBase + 1 == sr->BusLimit);
soc_create_domains(dp, upstream, sr->BusBase, sr->BusBase, DOMAIN_TYPE_UBX0,
&ubox_pcie_domain_ops, pci_segment_group);
soc_create_domains(dp, upstream, sr->BusLimit, sr->BusLimit, DOMAIN_TYPE_UBX1,
&ubox_pcie_domain_ops, pci_segment_group);
}
#if CONFIG(SOC_INTEL_HAS_CXL)
void iio_cxl_domain_read_resources(struct device *dev)
{
struct resource *res;
const STACK_RES *sr = domain_to_stack_res(dev);
if (!sr)
return;
int index = 0;
if (sr->IoBase < sr->PciResourceIoBase) {
res = new_resource(dev, index++);
res->base = sr->IoBase;
res->limit = sr->PciResourceIoBase - 1;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
}
if (sr->Mmio32Base < sr->PciResourceMem32Base) {
res = new_resource(dev, index++);
res->base = sr->Mmio32Base;
res->limit = sr->PciResourceMem32Base - 1;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
}
if (sr->Mmio64Base < sr->PciResourceMem64Base) {
res = new_resource(dev, index++);
res->base = sr->Mmio64Base;
res->limit = sr->PciResourceMem64Base - 1;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
}
}
static struct device_operations iio_cxl_domain_ops = {
.read_resources = iio_cxl_domain_read_resources,
.set_resources = pci_domain_set_resources,
.scan_bus = pci_host_bridge_scan_bus,
#if CONFIG(HAVE_ACPI_TABLES)
.acpi_name = soc_acpi_name,
.write_acpi_tables = northbridge_write_acpi_tables,
.acpi_fill_ssdt = pci_domain_fill_ssdt,
#endif
};
void soc_create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
const STACK_RES *sr, const size_t pci_segment_group)
{
assert(sr->BusBase + 1 <= sr->BusLimit);
/* 1st domain contains PCIe RCiEPs */
soc_create_domains(dp, bus, sr->BusBase, sr->BusBase, DOMAIN_TYPE_PCIE,
&iio_pcie_domain_ops, pci_segment_group);
/* 2nd domain contains CXL 1.1 end-points */
soc_create_domains(dp, bus, sr->BusBase + 1, sr->BusLimit, DOMAIN_TYPE_CXL,
&iio_cxl_domain_ops, pci_segment_group);
}
#endif //CONFIG(SOC_INTEL_HAS_CXL)
/* Attach stack as domains */
void attach_iio_stacks(void)
{
@ -394,14 +221,7 @@ void attach_iio_stacks(void)
dn.socket = s;
dn.stack = x;
if (is_ubox_stack_res(ri))
soc_create_ubox_domains(dn, root_bus, ri, seg);
else if (CONFIG(SOC_INTEL_HAS_CXL) && is_iio_cxl_stack_res(ri))
soc_create_cxl_domains(dn, root_bus, ri, seg);
else if (is_pcie_iio_stack_res(ri))
soc_create_pcie_domains(dn, root_bus, ri, seg);
else if (CONFIG(HAVE_IOAT_DOMAINS) && is_ioat_iio_stack_res(ri))
soc_create_ioat_domains(dn, root_bus, ri, seg);
create_xeonsp_domains(dn, root_bus, ri, seg);
}
}
}

View File

@ -0,0 +1,201 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include <acpi/acpigen_pci.h>
#include <assert.h>
#include <console/console.h>
#include <device/pci.h>
#include <intelblocks/acpi.h>
#include <post.h>
#include <soc/acpi.h>
#include <soc/chip_common.h>
#include <soc/soc_util.h>
#include <soc/util.h>
#include <stdlib.h>
static const STACK_RES *domain_to_stack_res(const struct device *dev)
{
assert(dev->path.type == DEVICE_PATH_DOMAIN);
const union xeon_domain_path dn = {
.domain_path = dev->path.domain.domain
};
const IIO_UDS *hob = get_iio_uds();
assert(hob != NULL);
return &hob->PlatformData.IIO_resource[dn.socket].StackRes[dn.stack];
}
static void iio_pci_domain_read_resources(struct device *dev)
{
struct resource *res;
const STACK_RES *sr = domain_to_stack_res(dev);
if (!sr)
return;
int index = 0;
if (is_domain0(dev)) {
/* The 0 - 0xfff IO range is not reported by the HOB but still gets decoded */
res = new_resource(dev, index++);
res->base = 0;
res->size = 0x1000;
res->limit = 0xfff;
res->flags = IORESOURCE_IO | IORESOURCE_SUBTRACTIVE | IORESOURCE_ASSIGNED;
}
if (sr->PciResourceIoBase < sr->PciResourceIoLimit) {
res = new_resource(dev, index++);
res->base = sr->PciResourceIoBase;
res->limit = sr->PciResourceIoLimit;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
}
if (sr->PciResourceMem32Base < sr->PciResourceMem32Limit) {
res = new_resource(dev, index++);
res->base = sr->PciResourceMem32Base;
res->limit = sr->PciResourceMem32Limit;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
}
if (sr->PciResourceMem64Base < sr->PciResourceMem64Limit) {
res = new_resource(dev, index++);
res->base = sr->PciResourceMem64Base;
res->limit = sr->PciResourceMem64Limit;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
}
}
/*
* Used by IIO stacks for PCIe bridges. Those contain 1 PCI host bridges,
* all the bus numbers on the IIO stack can be used for this bridge
*/
static struct device_operations iio_pcie_domain_ops = {
.read_resources = iio_pci_domain_read_resources,
.set_resources = pci_domain_set_resources,
.scan_bus = pci_host_bridge_scan_bus,
#if CONFIG(HAVE_ACPI_TABLES)
.acpi_name = soc_acpi_name,
.write_acpi_tables = northbridge_write_acpi_tables,
.acpi_fill_ssdt = pci_domain_fill_ssdt,
#endif
};
/*
* Used by UBOX stacks. Those contain multiple PCI host bridges, each having
* only one bus with UBOX devices. UBOX devices have no resources.
*/
static struct device_operations ubox_pcie_domain_ops = {
.read_resources = noop_read_resources,
.set_resources = noop_set_resources,
.scan_bus = pci_host_bridge_scan_bus,
#if CONFIG(HAVE_ACPI_TABLES)
.acpi_name = soc_acpi_name,
.write_acpi_tables = northbridge_write_acpi_tables,
.acpi_fill_ssdt = pci_domain_fill_ssdt,
#endif
};
static void create_pcie_domains(const union xeon_domain_path dp, struct bus *upstream,
const STACK_RES *sr, const size_t pci_segment_group)
{
create_domain(dp, upstream, sr->BusBase, sr->BusLimit, DOMAIN_TYPE_PCIE,
&iio_pcie_domain_ops, pci_segment_group);
}
/*
* On the first Xeon-SP generations there are no separate UBOX stacks,
* and the UBOX devices reside on the first and second IIO. Starting
* with 3rd gen Xeon-SP the UBOX devices are located on their own IIO.
*/
static void create_ubox_domains(const union xeon_domain_path dp, struct bus *upstream,
const STACK_RES *sr, const size_t pci_segment_group)
{
/* Only expect 2 UBOX buses here */
assert(sr->BusBase + 1 == sr->BusLimit);
create_domain(dp, upstream, sr->BusBase, sr->BusBase, DOMAIN_TYPE_UBX0,
&ubox_pcie_domain_ops, pci_segment_group);
create_domain(dp, upstream, sr->BusLimit, sr->BusLimit, DOMAIN_TYPE_UBX1,
&ubox_pcie_domain_ops, pci_segment_group);
}
void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
const STACK_RES *sr, const size_t pci_segment_group);
#if CONFIG(SOC_INTEL_HAS_CXL)
static void iio_cxl_domain_read_resources(struct device *dev)
{
struct resource *res;
const STACK_RES *sr = domain_to_stack_res(dev);
if (!sr)
return;
int index = 0;
if (sr->IoBase < sr->PciResourceIoBase) {
res = new_resource(dev, index++);
res->base = sr->IoBase;
res->limit = sr->PciResourceIoBase - 1;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_IO | IORESOURCE_ASSIGNED;
}
if (sr->Mmio32Base < sr->PciResourceMem32Base) {
res = new_resource(dev, index++);
res->base = sr->Mmio32Base;
res->limit = sr->PciResourceMem32Base - 1;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
}
if (sr->Mmio64Base < sr->PciResourceMem64Base) {
res = new_resource(dev, index++);
res->base = sr->Mmio64Base;
res->limit = sr->PciResourceMem64Base - 1;
res->size = res->limit - res->base + 1;
res->flags = IORESOURCE_MEM | IORESOURCE_ASSIGNED;
}
}
static struct device_operations iio_cxl_domain_ops = {
.read_resources = iio_cxl_domain_read_resources,
.set_resources = pci_domain_set_resources,
.scan_bus = pci_host_bridge_scan_bus,
#if CONFIG(HAVE_ACPI_TABLES)
.acpi_name = soc_acpi_name,
.write_acpi_tables = northbridge_write_acpi_tables,
.acpi_fill_ssdt = pci_domain_fill_ssdt,
#endif
};
void create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
const STACK_RES *sr, const size_t pci_segment_group)
{
assert(sr->BusBase + 1 <= sr->BusLimit);
/* 1st domain contains PCIe RCiEPs */
create_domain(dp, bus, sr->BusBase, sr->BusBase, DOMAIN_TYPE_PCIE,
&iio_pcie_domain_ops, pci_segment_group);
/* 2nd domain contains CXL 1.1 end-points */
create_domain(dp, bus, sr->BusBase + 1, sr->BusLimit, DOMAIN_TYPE_CXL,
&iio_cxl_domain_ops, pci_segment_group);
}
#endif //CONFIG(SOC_INTEL_HAS_CXL)
void create_xeonsp_domains(const union xeon_domain_path dp, struct bus *bus,
const STACK_RES *sr, const size_t pci_segment_group)
{
if (is_ubox_stack_res(sr))
create_ubox_domains(dp, bus, sr, pci_segment_group);
else if (CONFIG(SOC_INTEL_HAS_CXL) && is_iio_cxl_stack_res(sr))
create_cxl_domains(dp, bus, sr, pci_segment_group);
else if (is_pcie_iio_stack_res(sr))
create_pcie_domains(dp, bus, sr, pci_segment_group);
else if (CONFIG(HAVE_IOAT_DOMAINS) && is_ioat_iio_stack_res(sr))
create_ioat_domains(dp, bus, sr, pci_segment_group);
}

View File

@ -10,6 +10,7 @@ romstage-$(CONFIG_DISPLAY_UPD_DATA) += upd_display.c
romstage-$(CONFIG_DISPLAY_HOBS) += hob_display.c
ramstage-y += chip.c cpu.c soc_util.c soc_acpi.c
ramstage-y += ../chip_gen1.c
ramstage-$(CONFIG_DISPLAY_HOBS) += hob_display.c
ramstage-$(CONFIG_DISPLAY_UPD_DATA) += upd_display.c

View File

@ -49,16 +49,19 @@ static inline void init_xeon_domain_path(struct device_path *path, int socket,
#define DOMAIN_TYPE_UBX1 "UD"
#define DOMAIN_TYPE_CXL "CX"
void iio_pci_domain_read_resources(struct device *dev);
void iio_cxl_domain_read_resources(struct device *dev);
void attach_iio_stacks(void);
void soc_create_cxl_domains(const union xeon_domain_path dp, struct bus *bus,
const STACK_RES *sr, const size_t pci_segment_group);
void soc_create_ioat_domains(union xeon_domain_path path,
struct bus *bus,
const STACK_RES *sr,
const size_t pci_segment_group);
void create_ioat_domains(union xeon_domain_path path,
struct bus *bus,
const STACK_RES *sr,
const size_t pci_segment_group);
void create_xeonsp_domains(const union xeon_domain_path dp, struct bus *bus,
const STACK_RES *sr, const size_t pci_segment_group);
void create_domain(const union xeon_domain_path dp, struct bus *upstream,
int bus_base, int bus_limit, const char *type,
struct device_operations *ops, const size_t pci_segment_group);
struct device *dev_find_device_on_socket(uint8_t socket, u16 vendor, u16 device);
struct device *dev_find_all_devices_on_socket(uint8_t socket,

View File

@ -16,6 +16,7 @@ romstage-$(CONFIG_DISPLAY_HOBS) += hob_display.c
ramstage-y += soc_acpi.c
ramstage-y += chip.c
ramstage-y += ../chip_gen1.c
ramstage-y += soc_util.c
ramstage-y += cpu.c
ramstage-$(CONFIG_DISPLAY_UPD_DATA) += upd_display.c

View File

@ -14,6 +14,7 @@ romstage-$(CONFIG_DISPLAY_UPD_DATA) += upd_display.c
ramstage-y += chip.c cpu.c soc_util.c ramstage.c soc_acpi.c reset.c
ramstage-y += crashlog.c ioat.c
ramstage-y += ../chip_gen1.c
ramstage-$(CONFIG_DISPLAY_HOBS) += hob_display.c
ramstage-$(CONFIG_DISPLAY_UPD_DATA) += upd_display.c
CPPFLAGS_common += -I$(src)/soc/intel/xeon_sp/spr/include -I$(src)/soc/intel/xeon_sp/spr

View File

@ -79,10 +79,10 @@ static void create_ioat_domain(const union xeon_domain_path dp, struct bus *cons
}
}
void soc_create_ioat_domains(const union xeon_domain_path path,
struct bus *const bus,
const STACK_RES *const sr,
const size_t pci_segment_group)
void create_ioat_domains(const union xeon_domain_path path,
struct bus *const bus,
const STACK_RES *const sr,
const size_t pci_segment_group)
{
if (sr->BusLimit < sr->BusBase + HQM_BUS_OFFSET + HQM_RESERVED_BUS) {
printk(BIOS_WARNING,