cpu/x86/smm/smm_stub: Add x86_64 support
Enable long mode in SMM handler. x86_32 isn't affected by this change. * Enter long mode * Add 64bit entry to GDT * Use x86_64 SysV ABI calling conventions for C code entry * Change smm_module_params' cpu to size_t as 'push' is native integer * Drop to protected mode after c handler NOTE: This commit does NOT introduce a new security model. It uses the same page tables as the remaining firmware does. This can be a security risk if someone is able to manipulate the page tables stored in ROM at runtime. USE FOR TESTING ONLY! Tested on Lenovo T410 with additional x86_64 patches. Change-Id: I26300492e4be62ddd5d80525022c758a019d63a1 Signed-off-by: Patrick Rudolph <patrick.rudolph@9elements.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/37392 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-by: Eugene Myers <cedarhouse1@comcast.net>
This commit is contained in:
parent
5eeead2d73
commit
24bb8036c9
@ -10,6 +10,7 @@
|
||||
*/
|
||||
|
||||
#include <cpu/x86/cr.h>
|
||||
#include <cpu/x86/msr.h>
|
||||
|
||||
.code32
|
||||
.section ".module_parameters", "aw", @progbits
|
||||
@ -148,8 +149,8 @@ smm_trampoline32:
|
||||
pushl $0x0
|
||||
mov %esp, %ebp
|
||||
|
||||
/* Allocate locals (fxsave) */
|
||||
subl $0x4, %esp
|
||||
/* Allocate locals (fxsave, efer_backup) */
|
||||
subl $0xc, %esp
|
||||
|
||||
/* calculate fxsave location */
|
||||
mov fxsave_area, %edi
|
||||
@ -177,22 +178,65 @@ smm_trampoline32:
|
||||
/* Align stack to 16 bytes. Another 32 bytes are pushed below. */
|
||||
andl $0xfffffff0, %esp
|
||||
|
||||
#ifdef __x86_64__
|
||||
mov %ecx, %edi
|
||||
/* Backup IA32_EFER. Preserves ebx. */
|
||||
movl $(IA32_EFER), %ecx
|
||||
rdmsr
|
||||
movl %eax, -0x8(%ebp)
|
||||
movl %edx, -0xc(%ebp)
|
||||
|
||||
/* entry64.inc preserves ebx, esi, edi */
|
||||
#include <cpu/x86/64bit/entry64.inc>
|
||||
mov %edi, %ecx
|
||||
|
||||
#endif
|
||||
|
||||
/* Call into the c-based SMM relocation function with the platform
|
||||
* parameters. Equivalent to:
|
||||
* struct arg = { c_handler_params, cpu_num, smm_runtime, canary };
|
||||
* c_handler(&arg)
|
||||
*/
|
||||
#ifdef __x86_64__
|
||||
push %rbx /* uintptr_t *canary */
|
||||
push $(smm_runtime)
|
||||
push %rcx /* size_t cpu */
|
||||
push c_handler_arg /* void *arg */
|
||||
|
||||
mov %rsp, %rdi /* *arg */
|
||||
|
||||
movl c_handler, %eax
|
||||
call *%rax
|
||||
|
||||
/*
|
||||
* The only reason to go back to protected mode is that RSM doesn't restore
|
||||
* MSR registers and MSR IA32_EFER was modified by entering long mode.
|
||||
* Drop to protected mode to safely operate on the IA32_EFER MSR.
|
||||
*/
|
||||
|
||||
/* Disable long mode. */
|
||||
#include <cpu/x86/64bit/exit32.inc>
|
||||
|
||||
/* Restore IA32_EFER as RSM doesn't restore MSRs. */
|
||||
movl $(IA32_EFER), %ecx
|
||||
rdmsr
|
||||
movl -0x8(%ebp), %eax
|
||||
movl -0xc(%ebp), %edx
|
||||
|
||||
wrmsr
|
||||
|
||||
#else
|
||||
push $0x0 /* Padding */
|
||||
push $0x0 /* Padding */
|
||||
push $0x0 /* Padding */
|
||||
push %ebx /* uintptr_t *canary */
|
||||
push $(smm_runtime)
|
||||
push %ecx /* int cpu */
|
||||
push %ecx /* size_t cpu */
|
||||
push c_handler_arg /* void *arg */
|
||||
push %esp /* smm_module_params *arg (allocated on stack). */
|
||||
mov c_handler, %eax
|
||||
call *%eax
|
||||
|
||||
#endif
|
||||
/* Retrieve fxsave location. */
|
||||
mov -4(%ebp), %edi
|
||||
test %edi, %edi
|
||||
|
@ -75,7 +75,7 @@ struct smm_runtime {
|
||||
|
||||
struct smm_module_params {
|
||||
void *arg;
|
||||
int cpu;
|
||||
size_t cpu;
|
||||
const struct smm_runtime *runtime;
|
||||
/* A canary value that has been placed at the end of the stack.
|
||||
* If (uintptr_t)canary != *canary then a stack overflow has occurred.
|
||||
|
Loading…
x
Reference in New Issue
Block a user