SMM for AMD K8 Part 1/2
Signed-off-by: Stefan Reinauer <stepan@coresystems.de> Acked-by: Stefan Reinauer <stepan@coresystems.de> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@6201 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
committed by
Stefan Reinauer
parent
405721d45c
commit
cadc545838
@ -499,11 +499,6 @@ static void model_fxx_init(device_t dev)
|
||||
|
||||
k8_errata();
|
||||
|
||||
/* Set SMMLOCK to avoid exploits messing with SMM */
|
||||
msr = rdmsr(HWCR_MSR);
|
||||
msr.lo |= (1 << 0);
|
||||
wrmsr(HWCR_MSR, msr);
|
||||
|
||||
enable_cache();
|
||||
|
||||
/* Set the processor name string */
|
||||
|
2
src/cpu/amd/smm/Makefile.inc
Normal file
2
src/cpu/amd/smm/Makefile.inc
Normal file
@ -0,0 +1,2 @@
|
||||
|
||||
ramstage-$(CONFIG_HAVE_SMI_HANDLER) += smm_init.c
|
137
src/cpu/amd/smm/smm_init.c
Normal file
137
src/cpu/amd/smm/smm_init.c
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright (C) 2010 coresystems GmbH
|
||||
* Copyright (C) 2010 Rudolf Marek
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; version 2 of the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <console/console.h>
|
||||
#include <arch/io.h>
|
||||
#include <cpu/cpu.h>
|
||||
#include <cpu/x86/lapic.h>
|
||||
#include <cpu/x86/msr.h>
|
||||
#include <cpu/x86/mtrr.h>
|
||||
#include <cpu/amd/mtrr.h>
|
||||
#include <cpu/amd/model_fxx_msr.h>
|
||||
#include <cpu/x86/cache.h>
|
||||
#include <cpu/x86/smm.h>
|
||||
#include <string.h>
|
||||
|
||||
#define SMM_BASE_MSR 0xc0010111
|
||||
#define SMM_ADDR_MSR 0xc0010112
|
||||
#define SMM_MASK_MSR 0xc0010113
|
||||
#define SMM_BASE 0xa0000
|
||||
|
||||
extern unsigned char _binary_smm_start;
|
||||
extern unsigned char _binary_smm_size;
|
||||
|
||||
static int smm_handler_copied = 0;
|
||||
|
||||
void smm_init(void)
|
||||
{
|
||||
msr_t msr;
|
||||
|
||||
msr = rdmsr(HWCR_MSR);
|
||||
if (msr.lo & (1 << 0)) {
|
||||
// This sounds like a bug... ?
|
||||
printk(BIOS_DEBUG, "SMM is still locked from last boot, using old handler.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Only copy SMM handler once, not once per CPU */
|
||||
if (!smm_handler_copied) {
|
||||
msr_t syscfg_orig, mtrr_aseg_orig;
|
||||
|
||||
smm_handler_copied = 1;
|
||||
|
||||
/* MTRR changes don't like an enabled cache */
|
||||
disable_cache();
|
||||
|
||||
/* Back up MSRs for later restore */
|
||||
syscfg_orig = rdmsr(SYSCFG_MSR);
|
||||
mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR);
|
||||
|
||||
msr = syscfg_orig;
|
||||
/* Allow changes to MTRR extended attributes */
|
||||
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
|
||||
/* turn the extended attributes off until we fix
|
||||
* them so A0000 is routed to memory
|
||||
*/
|
||||
msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
|
||||
wrmsr(SYSCFG_MSR, msr);
|
||||
|
||||
/* set DRAM access to 0xa0000 */
|
||||
/* A0000 is memory */
|
||||
msr.lo = 0x18181818;
|
||||
msr.hi = 0x18181818;
|
||||
wrmsr(MTRRfix16K_A0000_MSR, msr);
|
||||
enable_cache();
|
||||
|
||||
/* disable the extended features */
|
||||
msr = syscfg_orig;
|
||||
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
|
||||
msr.lo |= SYSCFG_MSR_MtrrFixDramEn;
|
||||
wrmsr(SYSCFG_MSR, msr);
|
||||
|
||||
/* enable the SMM memory window */
|
||||
// TODO does "Enable ASEG SMRAM Range" have to happen on
|
||||
// every CPU core?
|
||||
msr = rdmsr(SMM_MASK_MSR);
|
||||
msr.lo |= (1 << 0); // Enable ASEG SMRAM Range
|
||||
msr.lo &= ~(1 << 2); // Open ASEG SMRAM Range
|
||||
wrmsr(SMM_MASK_MSR, msr);
|
||||
|
||||
/* copy the real SMM handler */
|
||||
memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size);
|
||||
wbinvd();
|
||||
|
||||
msr = rdmsr(SMM_MASK_MSR);
|
||||
msr.lo |= ~(1 << 2); // Close ASEG SMRAM Range
|
||||
wrmsr(SMM_MASK_MSR, msr);
|
||||
|
||||
/* Change SYSCFG so we can restore the MTRR */
|
||||
msr = syscfg_orig;
|
||||
msr.lo |= SYSCFG_MSR_MtrrFixDramModEn;
|
||||
msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn;
|
||||
wrmsr(SYSCFG_MSR, msr);
|
||||
|
||||
/* Restore MTRR */
|
||||
disable_cache();
|
||||
wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig);
|
||||
|
||||
/* Restore SYSCFG */
|
||||
wrmsr(SYSCFG_MSR, syscfg_orig);
|
||||
enable_cache();
|
||||
}
|
||||
|
||||
/* But set SMM base address on all CPUs/cores */
|
||||
msr = rdmsr(SMM_BASE_MSR);
|
||||
msr.lo = SMM_BASE - (lapicid() * 0x400);
|
||||
wrmsr(SMM_BASE_MSR, msr);
|
||||
}
|
||||
|
||||
void smm_lock(void)
|
||||
{
|
||||
// TODO I think this should be running on each CPU
|
||||
msr_t msr;
|
||||
|
||||
printk(BIOS_DEBUG, "Locking SMM.\n");
|
||||
|
||||
/* Set SMMLOCK to avoid exploits messing with SMM */
|
||||
msr = rdmsr(HWCR_MSR);
|
||||
msr.lo |= (1 << 0);
|
||||
wrmsr(HWCR_MSR, msr);
|
||||
}
|
@ -8,5 +8,7 @@ subdirs-y += ../../x86/lapic
|
||||
subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -8,5 +8,7 @@ subdirs-y += ../../x86/lapic
|
||||
subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -8,5 +8,7 @@ subdirs-y += ../../x86/lapic
|
||||
subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -9,5 +9,6 @@ subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -7,7 +7,8 @@ subdirs-y += ../../x86/tsc
|
||||
subdirs-y += ../../x86/lapic
|
||||
subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -7,7 +7,8 @@ subdirs-y += ../../x86/tsc
|
||||
subdirs-y += ../../x86/lapic
|
||||
subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -7,7 +7,8 @@ subdirs-y += ../../x86/tsc
|
||||
subdirs-y += ../../x86/lapic
|
||||
subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -9,5 +9,6 @@ subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -9,5 +9,6 @@ subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -7,7 +7,8 @@ subdirs-y += ../../x86/tsc
|
||||
subdirs-y += ../../x86/lapic
|
||||
subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/pae
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../../x86/mtrr
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../smm
|
||||
|
||||
cpu_incs += $(src)/cpu/amd/car/cache_as_ram.inc
|
||||
|
@ -87,6 +87,24 @@ static void smi_set_eos(void)
|
||||
southbridge_smi_set_eos();
|
||||
}
|
||||
|
||||
static u32 pci_orig;
|
||||
|
||||
/**
|
||||
* @brief Backup PCI address to make sure we do not mess up the OS
|
||||
*/
|
||||
static void smi_backup_pci_address(void)
|
||||
{
|
||||
pci_orig = inl(0xcf8);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Restore PCI address previously backed up
|
||||
*/
|
||||
static void smi_restore_pci_address(void)
|
||||
{
|
||||
outl(pci_orig, 0xcf8);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Interrupt handler for SMI#
|
||||
*
|
||||
@ -107,6 +125,8 @@ void smi_handler(u32 smm_revision)
|
||||
return;
|
||||
}
|
||||
|
||||
smi_backup_pci_address();
|
||||
|
||||
node=nodeid();
|
||||
|
||||
console_init();
|
||||
@ -147,6 +167,8 @@ void smi_handler(u32 smm_revision)
|
||||
if (southbridge_smi_handler)
|
||||
southbridge_smi_handler(node, &state_save);
|
||||
|
||||
smi_restore_pci_address();
|
||||
|
||||
smi_release_lock();
|
||||
|
||||
/* De-assert SMI# signal to allow another SMI */
|
||||
|
@ -22,6 +22,8 @@
|
||||
// Make sure no stage 2 code is included:
|
||||
#define __PRE_RAM__
|
||||
|
||||
#if !defined(CONFIG_NORTHBRIDGE_AMD_AMDK8) && !defined(CONFIG_NORTHBRIDGE_AMD_FAM10)
|
||||
|
||||
// FIXME: Is this piece of code southbridge specific, or
|
||||
// can it be cleaned up so this include is not required?
|
||||
// It's needed right now because we get our DEFAULT_PMBASE from
|
||||
@ -177,4 +179,4 @@ smm_relocate:
|
||||
/* That's it. return */
|
||||
rsm
|
||||
smm_relocation_end:
|
||||
|
||||
#endif
|
||||
|
Reference in New Issue
Block a user