Remove CACHE_ROM.

With the recent improvement 3d6ffe76f8,
speedup by CACHE_ROM is reduced a lot.
On the other hand this makes coreboot run out of MTRRs depending on
system configuration, hence screwing up I/O access and cache
coherency in worst cases.

CACHE_ROM requires the user to sanity check their boot output because
the feature is brittle. The working configuration is dependent on I/O
hole size, ram size, and chipset. Because of this the current
implementation can leave a system configured in an inconsistent state
leading to unexpected results such as poor performance and/or
inconsistent cache-coherency

Remove this as a buggy feature until we figure out how to do it properly
if necessary.

Change-Id: I858d78a907bf042fcc21fdf7a2bf899e9f6b591d
Signed-off-by: Vladimir Serbinenko <phcoder@gmail.com>
Reviewed-on: http://review.coreboot.org/5146
Tested-by: build bot (Jenkins)
Reviewed-by: Aaron Durbin <adurbin@google.com>
This commit is contained in:
Vladimir Serbinenko
2014-02-05 19:46:45 +01:00
parent 20f83d5656
commit 4337020b95
16 changed files with 0 additions and 163 deletions

View File

@@ -789,9 +789,6 @@ void bsp_init_and_start_aps(struct bus *cpu_bus)
/* Restore the default SMM region. */
restore_default_smm_area(smm_save_area);
/* Enable ROM caching if option was selected. */
x86_mtrr_enable_rom_caching();
}
static struct device_operations cpu_dev_ops = {

View File

@@ -143,14 +143,6 @@ void release_aps_for_smm_relocation(int do_parallel)
printk(BIOS_DEBUG, "Timed out waiting for AP SMM relocation\n");
}
/* The mtrr code sets up ROM caching on the BSP, but not the others. However,
* the boot loader payload disables this. In order for Linux not to complain
* ensure the caching is disabled for the APs before going to sleep. */
static void cleanup_rom_caching(void)
{
x86_mtrr_disable_rom_caching();
}
/* By the time APs call ap_init() caching has been setup, and microcode has
* been loaded. */
static void asmlinkage ap_init(unsigned int cpu, void *microcode_ptr)
@@ -184,13 +176,6 @@ static void asmlinkage ap_init(unsigned int cpu, void *microcode_ptr)
/* After SMM relocation a 2nd microcode load is required. */
intel_microcode_load_unlocked(microcode_ptr);
/* The MTRR resources are core scoped. Therefore, there is no need
* to do the same work twice. Additionally, this check keeps the
* ROM cache enabled on the BSP since its hyperthread sibling won't
* call cleanup_rom_caching(). */
if ((lapicid() & 1) == 0)
cleanup_rom_caching();
/* FIXME(adurbin): park CPUs properly -- preferably somewhere in a
* reserved part of memory that the OS cannot get to. */
stop_this_cpu();

View File

@@ -76,13 +76,6 @@ config LOGICAL_CPUS
bool
default y
config CACHE_ROM
bool "Allow for caching system ROM."
default n
help
When selected a variable range MTRR is allocated for coreboot and
the bootloader enables caching of the system ROM for faster access.
config SMM_TSEG
bool
default n

View File

@@ -199,16 +199,6 @@ static struct memranges *get_physical_address_space(void)
memranges_add_resources_filter(addr_space, mask, match, MTRR_TYPE_WRCOMB,
filter_vga_wrcomb);
#if CONFIG_CACHE_ROM
/* Add a write-protect region covering the ROM size
* when CONFIG_CACHE_ROM is enabled. The ROM is assumed
* to be located at 4GiB - rom size. */
resource_t rom_base = RANGE_TO_PHYS_ADDR(
RANGE_4GB - PHYS_TO_RANGE_ADDR(CACHE_ROM_SIZE));
memranges_insert(addr_space, rom_base, CACHE_ROM_SIZE,
MTRR_TYPE_WRPROT);
#endif
/* The address space below 4GiB is special. It needs to be
* covered entirly by range entries so that MTRR calculations
* can be properly done for the full 32-bit address space.
@@ -380,61 +370,6 @@ void x86_setup_fixed_mtrrs(void)
enable_fixed_mtrr();
}
/* Keep track of the MTRR that covers the ROM for caching purposes. */
#if CONFIG_CACHE_ROM
static long rom_cache_mtrr = -1;
long x86_mtrr_rom_cache_var_index(void)
{
return rom_cache_mtrr;
}
void x86_mtrr_enable_rom_caching(void)
{
msr_t msr_val;
unsigned long index;
if (rom_cache_mtrr < 0)
return;
index = rom_cache_mtrr;
disable_cache();
msr_val = rdmsr(MTRRphysBase_MSR(index));
msr_val.lo &= ~0xff;
msr_val.lo |= MTRR_TYPE_WRPROT;
wrmsr(MTRRphysBase_MSR(index), msr_val);
enable_cache();
}
void x86_mtrr_disable_rom_caching(void)
{
msr_t msr_val;
unsigned long index;
if (rom_cache_mtrr < 0)
return;
index = rom_cache_mtrr;
disable_cache();
msr_val = rdmsr(MTRRphysBase_MSR(index));
msr_val.lo &= ~0xff;
wrmsr(MTRRphysBase_MSR(index), msr_val);
enable_cache();
}
static void disable_cache_rom(void *unused)
{
x86_mtrr_disable_rom_caching();
}
BOOT_STATE_INIT_ENTRIES(disable_rom_cache_bscb) = {
BOOT_STATE_INIT_ENTRY(BS_OS_RESUME, BS_ON_ENTRY,
disable_cache_rom, NULL),
BOOT_STATE_INIT_ENTRY(BS_PAYLOAD_LOAD, BS_ON_EXIT,
disable_cache_rom, NULL),
};
#endif
struct var_mtrr_state {
struct memranges *addr_space;
int above4gb;
@@ -482,17 +417,6 @@ static void write_var_mtrr(struct var_mtrr_state *var_state,
mask = (1ULL << var_state->address_bits) - 1;
rsize = rsize & mask;
#if CONFIG_CACHE_ROM
/* CONFIG_CACHE_ROM allocates an MTRR specifically for allowing
* one to turn on caching for faster ROM access. However, it is
* left to the MTRR callers to enable it. */
if (mtrr_type == MTRR_TYPE_WRPROT) {
mtrr_type = MTRR_TYPE_UNCACHEABLE;
if (rom_cache_mtrr < 0)
rom_cache_mtrr = var_state->mtrr_index;
}
#endif
printk(BIOS_DEBUG, "MTRR: %d base 0x%016llx mask 0x%016llx type %d\n",
var_state->mtrr_index, rbase, rsize, mtrr_type);