|
|
|
@@ -8,6 +8,7 @@
|
|
|
|
|
#include <device/mmio.h>
|
|
|
|
|
#include <device/pci_def.h>
|
|
|
|
|
#include <device/pci_ops.h>
|
|
|
|
|
#include <fmap.h>
|
|
|
|
|
#include <intelblocks/cse.h>
|
|
|
|
|
#include <intelblocks/systemagent.h>
|
|
|
|
|
#include <intelblocks/vtd.h>
|
|
|
|
@@ -29,6 +30,13 @@
|
|
|
|
|
|
|
|
|
|
#define CPU_PID_PCIE_PHYX16_BROADCAST 0x55
|
|
|
|
|
|
|
|
|
|
struct hsphy_cache {
|
|
|
|
|
uint32_t hsphy_size;
|
|
|
|
|
uint8_t hash_algo;
|
|
|
|
|
uint8_t digest[MAX_HASH_SIZE];
|
|
|
|
|
uint8_t hsphy_fw[0];
|
|
|
|
|
} __packed;
|
|
|
|
|
|
|
|
|
|
struct ip_push_model {
|
|
|
|
|
uint16_t count;
|
|
|
|
|
uint16_t address;
|
|
|
|
@@ -105,7 +113,7 @@ static int heci_get_hsphy_payload(void *buf, uint32_t *buf_size, uint8_t *hash_b
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int verify_hsphy_hash(void *buf, uint32_t buf_size, uint8_t *hash_buf, uint8_t hash_alg)
|
|
|
|
|
static bool verify_hsphy_hash(void *buf, uint32_t buf_size, uint8_t *hash_buf, uint8_t hash_alg)
|
|
|
|
|
{
|
|
|
|
|
struct vb2_hash hash;
|
|
|
|
|
|
|
|
|
@@ -125,14 +133,13 @@ static int verify_hsphy_hash(void *buf, uint32_t buf_size, uint8_t *hash_buf, ui
|
|
|
|
|
hash.algo = VB2_HASH_SHA384;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memcpy(hash.raw, hash_buf, vb2_digest_size(hash.algo));
|
|
|
|
|
|
|
|
|
|
if (vb2_hash_verify(vboot_hwcrypto_allowed(), buf, buf_size, &hash) != VB2_SUCCESS) {
|
|
|
|
|
printk(BIOS_ERR, "HSPHY SHA hashes do not match\n");
|
|
|
|
|
return -1;
|
|
|
|
|
}
|
|
|
|
|
if (vb2_hash_verify(vboot_hwcrypto_allowed(), buf, buf_size, &hash) != VB2_SUCCESS)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void upload_hsphy_to_cpu_pcie(void *buf, uint32_t buf_size)
|
|
|
|
@@ -156,31 +163,168 @@ static void upload_hsphy_to_cpu_pcie(void *buf, uint32_t buf_size)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void load_and_init_hsphy(void)
|
|
|
|
|
static bool hsphy_cache_valid(struct hsphy_cache *hsphy_fw_cache)
|
|
|
|
|
{
|
|
|
|
|
void *hsphy_buf;
|
|
|
|
|
uint8_t hsphy_hash[MAX_HASH_SIZE] = { 0 };
|
|
|
|
|
uint8_t hash_type;
|
|
|
|
|
uint32_t buf_size = HSPHY_PAYLOAD_SIZE;
|
|
|
|
|
size_t dma_buf_size;
|
|
|
|
|
pci_devfn_t dev = PCH_DEV_CSE;
|
|
|
|
|
const uint16_t pci_cmd_bme_mem = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
|
|
|
|
|
uint32_t status;
|
|
|
|
|
if (!hsphy_fw_cache) {
|
|
|
|
|
printk(BIOS_WARNING, "Failed to mmap HSPHY cache\n");
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_0) &&
|
|
|
|
|
!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_1)) {
|
|
|
|
|
printk(BIOS_DEBUG, "All HSPHY ports disabled, skipping HSPHY loading\n");
|
|
|
|
|
if (hsphy_fw_cache->hsphy_size == 0 ||
|
|
|
|
|
hsphy_fw_cache->hsphy_size > HSPHY_PAYLOAD_SIZE ||
|
|
|
|
|
hsphy_fw_cache->hash_algo <= HASHALG_SHA1 ||
|
|
|
|
|
hsphy_fw_cache->hash_algo > HASHALG_SHA512)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
if (!verify_hsphy_hash(hsphy_fw_cache->hsphy_fw, hsphy_fw_cache->hsphy_size,
|
|
|
|
|
hsphy_fw_cache->digest, hsphy_fw_cache->hash_algo))
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool load_hsphy_from_cache(void)
|
|
|
|
|
{
|
|
|
|
|
struct region_device rdev;
|
|
|
|
|
struct hsphy_cache *hsphy_fw_cache;
|
|
|
|
|
|
|
|
|
|
if (fmap_locate_area_as_rdev("HSPHY_FW", &rdev) < 0) {
|
|
|
|
|
printk(BIOS_ERR, "HSPHY: Cannot find HSPHY_FW region\n");
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hsphy_fw_cache = (struct hsphy_cache *)rdev_mmap_full(&rdev);
|
|
|
|
|
|
|
|
|
|
if (!hsphy_cache_valid(hsphy_fw_cache)) {
|
|
|
|
|
printk(BIOS_ERR, "HSPHY: HSPHY cache invalid\n");
|
|
|
|
|
if (hsphy_fw_cache)
|
|
|
|
|
rdev_munmap(&rdev, hsphy_fw_cache);
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
printk(BIOS_INFO, "Loading HSPHY FW from cache\n");
|
|
|
|
|
upload_hsphy_to_cpu_pcie(hsphy_fw_cache->hsphy_fw, hsphy_fw_cache->hsphy_size);
|
|
|
|
|
|
|
|
|
|
rdev_munmap(&rdev, hsphy_fw_cache);
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void cache_hsphy_fw_in_flash(void *buf, uint32_t buf_size, uint8_t *hash_buf,
|
|
|
|
|
uint8_t hash_alg)
|
|
|
|
|
{
|
|
|
|
|
struct region_device rdev;
|
|
|
|
|
struct hsphy_cache *hsphy_fw_cache;
|
|
|
|
|
size_t ret;
|
|
|
|
|
|
|
|
|
|
if (!buf || buf_size == 0 || buf_size > (HSPHY_PAYLOAD_SIZE - sizeof(*hsphy_fw_cache))
|
|
|
|
|
|| !hash_buf || hash_alg <= HASHALG_SHA1 || hash_alg > HASHALG_SHA512) {
|
|
|
|
|
printk(BIOS_ERR, "Invalid parameters, HSPHY will not be cached in flash.\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Locate the area as RO rdev, otherwise mmap will fail */
|
|
|
|
|
if (fmap_locate_area_as_rdev("HSPHY_FW", &rdev) < 0) {
|
|
|
|
|
printk(BIOS_ERR, "HSPHY: Could not find HSPHY_FW region\n");
|
|
|
|
|
printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hsphy_fw_cache = (struct hsphy_cache *)rdev_mmap_full(&rdev);
|
|
|
|
|
|
|
|
|
|
if (hsphy_cache_valid(hsphy_fw_cache)) {
|
|
|
|
|
/* If the cache is valid, check the buffer against the cache hash */
|
|
|
|
|
if (verify_hsphy_hash(buf, buf_size, hsphy_fw_cache->digest,
|
|
|
|
|
hsphy_fw_cache->hash_algo)) {
|
|
|
|
|
printk(BIOS_INFO, "HSPHY: cache does not need update\n");
|
|
|
|
|
rdev_munmap(&rdev, hsphy_fw_cache);
|
|
|
|
|
return;
|
|
|
|
|
} else {
|
|
|
|
|
printk(BIOS_INFO, "HSPHY: cache needs update\n");
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
printk(BIOS_INFO, "HSPHY: cache invalid, updating\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (region_device_sz(&rdev) < (buf_size + sizeof(*hsphy_fw_cache))) {
|
|
|
|
|
printk(BIOS_ERR, "HSPHY: HSPHY_FW region too small: %zx < %zx\n",
|
|
|
|
|
region_device_sz(&rdev), buf_size + sizeof(*hsphy_fw_cache));
|
|
|
|
|
printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
|
|
|
|
|
rdev_munmap(&rdev, hsphy_fw_cache);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
rdev_munmap(&rdev, hsphy_fw_cache);
|
|
|
|
|
hsphy_fw_cache = malloc(sizeof(*hsphy_fw_cache));
|
|
|
|
|
|
|
|
|
|
if (!hsphy_fw_cache) {
|
|
|
|
|
printk(BIOS_ERR, "HSPHY: Could not allocate memory for HSPHY cache buffer\n");
|
|
|
|
|
printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
hsphy_fw_cache->hsphy_size = buf_size;
|
|
|
|
|
hsphy_fw_cache->hash_algo = hash_alg;
|
|
|
|
|
|
|
|
|
|
switch (hash_alg) {
|
|
|
|
|
case HASHALG_SHA256:
|
|
|
|
|
hash_alg = VB2_HASH_SHA256;
|
|
|
|
|
break;
|
|
|
|
|
case HASHALG_SHA384:
|
|
|
|
|
hash_alg = VB2_HASH_SHA384;
|
|
|
|
|
break;
|
|
|
|
|
case HASHALG_SHA512:
|
|
|
|
|
hash_alg = VB2_HASH_SHA512;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memset(hsphy_fw_cache->digest, 0, sizeof(hsphy_fw_cache->digest));
|
|
|
|
|
memcpy(hsphy_fw_cache->digest, hash_buf, vb2_digest_size(hash_alg));
|
|
|
|
|
|
|
|
|
|
/* Now that we want to write to flash, locate the area as RW rdev */
|
|
|
|
|
if (fmap_locate_area_as_rdev_rw("HSPHY_FW", &rdev) < 0) {
|
|
|
|
|
printk(BIOS_ERR, "HSPHY: Could not find HSPHY_FW region\n");
|
|
|
|
|
printk(BIOS_ERR, "HSPHY will not be cached in flash\n");
|
|
|
|
|
free(hsphy_fw_cache);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (rdev_eraseat(&rdev, 0, region_device_sz(&rdev)) < 0) {
|
|
|
|
|
printk(BIOS_ERR, "Failed to erase HSPHY cache region\n");
|
|
|
|
|
free(hsphy_fw_cache);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = rdev_writeat(&rdev, hsphy_fw_cache, 0, sizeof(*hsphy_fw_cache));
|
|
|
|
|
if (ret != sizeof(*hsphy_fw_cache)) {
|
|
|
|
|
printk(BIOS_ERR, "Failed to write HSPHY cache metadata\n");
|
|
|
|
|
free(hsphy_fw_cache);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ret = rdev_writeat(&rdev, buf, sizeof(*hsphy_fw_cache), buf_size);
|
|
|
|
|
if (ret != buf_size) {
|
|
|
|
|
printk(BIOS_ERR, "Failed to write HSPHY FW to cache\n");
|
|
|
|
|
free(hsphy_fw_cache);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
printk(BIOS_INFO, "HSPHY cached to flash successfully\n");
|
|
|
|
|
|
|
|
|
|
free(hsphy_fw_cache);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void *allocate_hsphy_buf(void)
|
|
|
|
|
{
|
|
|
|
|
void *hsphy_buf;
|
|
|
|
|
size_t dma_buf_size;
|
|
|
|
|
|
|
|
|
|
if (CONFIG(ENABLE_EARLY_DMA_PROTECTION)) {
|
|
|
|
|
hsphy_buf = vtd_get_dma_buffer(&dma_buf_size);
|
|
|
|
|
if (!hsphy_buf || dma_buf_size < HSPHY_PAYLOAD_SIZE) {
|
|
|
|
|
printk(BIOS_ERR, "DMA protection enabled but DMA buffer does not"
|
|
|
|
|
" exist or is too small\n");
|
|
|
|
|
printk(BIOS_ERR, "Aborting HSPHY firmware loading, "
|
|
|
|
|
"PCIe Gen5 won't work.\n");
|
|
|
|
|
return;
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Rather impossible scenario, but check alignment anyways */
|
|
|
|
@@ -192,20 +336,73 @@ void load_and_init_hsphy(void)
|
|
|
|
|
hsphy_buf = memalign(4 * KiB, HSPHY_PAYLOAD_SIZE);
|
|
|
|
|
|
|
|
|
|
if (!hsphy_buf) {
|
|
|
|
|
printk(BIOS_ERR, "Could not allocate memory for HSPHY blob\n");
|
|
|
|
|
printk(BIOS_ERR, "Aborting HSPHY firmware loading, "
|
|
|
|
|
"PCIe Gen5 won't work.\n");
|
|
|
|
|
printk(BIOS_ERR, "Failed to allocate memory for HSPHY blob\n");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return hsphy_buf;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void load_and_init_hsphy(void)
|
|
|
|
|
{
|
|
|
|
|
void *hsphy_buf;
|
|
|
|
|
uint8_t hsphy_hash[MAX_HASH_SIZE] = { 0 };
|
|
|
|
|
uint8_t hash_type;
|
|
|
|
|
uint32_t buf_size = HSPHY_PAYLOAD_SIZE;
|
|
|
|
|
pci_devfn_t dev = PCH_DEV_CSE;
|
|
|
|
|
const uint16_t pci_cmd_bme_mem = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
|
|
|
|
|
uint32_t status;
|
|
|
|
|
|
|
|
|
|
if (!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_0) &&
|
|
|
|
|
!is_devfn_enabled(SA_DEVFN_CPU_PCIE1_1)) {
|
|
|
|
|
printk(BIOS_DEBUG, "All HSPHY ports disabled, skipping HSPHY loading\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* Try to get HSPHY payload from CSME first, so we can always keep our
|
|
|
|
|
* HSPHY cache up to date. If we cannot allocate the buffer for it, the
|
|
|
|
|
* cache is our last resort.
|
|
|
|
|
*/
|
|
|
|
|
hsphy_buf = allocate_hsphy_buf();
|
|
|
|
|
if (!hsphy_buf) {
|
|
|
|
|
printk(BIOS_ERR, "Could not allocate memory for HSPHY blob\n");
|
|
|
|
|
if (CONFIG(INCLUDE_HSPHY_IN_FMAP)) {
|
|
|
|
|
printk(BIOS_INFO, "Trying to load HSPHY FW from cache\n");
|
|
|
|
|
if (load_hsphy_from_cache()) {
|
|
|
|
|
printk(BIOS_INFO, "Successfully loaded HSPHY FW from cache\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
printk(BIOS_ERR, "Failed to load HSPHY FW from cache\n");
|
|
|
|
|
}
|
|
|
|
|
printk(BIOS_ERR, "Aborting HSPHY FW loading, PCIe Gen5 won't work.\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memset(hsphy_buf, 0, HSPHY_PAYLOAD_SIZE);
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
* If CSME is not present, try cached HSPHY FW. We still want to use
|
|
|
|
|
* CSME just in case CSME is updated along with HSPHY FW, so that we
|
|
|
|
|
* can update our cache if needed.
|
|
|
|
|
*/
|
|
|
|
|
if (!is_cse_enabled()) {
|
|
|
|
|
if (CONFIG(INCLUDE_HSPHY_IN_FMAP)) {
|
|
|
|
|
printk(BIOS_INFO, "Trying to load HSPHY FW from cache"
|
|
|
|
|
" because CSME is not enabled or not visible\n");
|
|
|
|
|
if (load_hsphy_from_cache()) {
|
|
|
|
|
printk(BIOS_INFO, "Successfully loaded HSPHY FW from cache\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
printk(BIOS_ERR, "Failed to load HSPHY FW from cache\n");
|
|
|
|
|
}
|
|
|
|
|
printk(BIOS_ERR, "%s: CSME not enabled or not visible, but required\n",
|
|
|
|
|
__func__);
|
|
|
|
|
printk(BIOS_ERR, "Aborting HSPHY firmware loading, PCIe Gen5 won't work.\n");
|
|
|
|
|
goto hsphy_exit;
|
|
|
|
|
printk(BIOS_ERR, "Aborting HSPHY FW loading, PCIe Gen5 won't work.\n");
|
|
|
|
|
if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
|
|
|
|
|
free(hsphy_buf);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Ensure BAR, BME and memory space are enabled */
|
|
|
|
@@ -219,19 +416,32 @@ void load_and_init_hsphy(void)
|
|
|
|
|
pci_or_config16(dev, PCI_COMMAND, pci_cmd_bme_mem);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (heci_get_hsphy_payload(hsphy_buf, &buf_size, hsphy_hash, &hash_type, &status)) {
|
|
|
|
|
printk(BIOS_ERR, "Aborting HSPHY firmware loading, PCIe Gen5 won't work.\n");
|
|
|
|
|
goto hsphy_exit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Try to get HSPHY payload from CSME and cache it if possible. */
|
|
|
|
|
if (!heci_get_hsphy_payload(hsphy_buf, &buf_size, hsphy_hash, &hash_type, &status)) {
|
|
|
|
|
if (verify_hsphy_hash(hsphy_buf, buf_size, hsphy_hash, hash_type)) {
|
|
|
|
|
printk(BIOS_ERR, "Aborting HSPHY firmware loading, PCIe Gen5 won't work.\n");
|
|
|
|
|
goto hsphy_exit;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
upload_hsphy_to_cpu_pcie(hsphy_buf, buf_size);
|
|
|
|
|
if (CONFIG(INCLUDE_HSPHY_IN_FMAP))
|
|
|
|
|
cache_hsphy_fw_in_flash(hsphy_buf, buf_size, hsphy_hash,
|
|
|
|
|
hash_type);
|
|
|
|
|
|
|
|
|
|
hsphy_exit:
|
|
|
|
|
if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
|
|
|
|
|
free(hsphy_buf);
|
|
|
|
|
return;
|
|
|
|
|
} else {
|
|
|
|
|
printk(BIOS_ERR, "Failed to verify HSPHY FW hash.\n");
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
printk(BIOS_ERR, "Failed to get HSPHY FW over HECI.\n");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!CONFIG(ENABLE_EARLY_DMA_PROTECTION))
|
|
|
|
|
free(hsphy_buf);
|
|
|
|
|
|
|
|
|
|
/* We failed to get HSPHY payload from CSME, cache is our last chance. */
|
|
|
|
|
if (CONFIG(INCLUDE_HSPHY_IN_FMAP) && load_hsphy_from_cache()) {
|
|
|
|
|
printk(BIOS_INFO, "Successfully loaded HSPHY FW from cache\n");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
printk(BIOS_ERR, "Failed to load HSPHY FW, PCIe Gen5 won't work.\n");
|
|
|
|
|
}
|
|
|
|
|