Files
system76-edk2/UefiCpuPkg/Library/SmmRelocationLib/X64/SmmInit.nasm
Jiaxin Wu 51fcd2023b UefiCpuPkg/SmmRelocationLib: Add SmmRelocationLib library instance
This patch just separates the smbase relocation logic from
PiSmmCpuDxeSmm driver, and moves to the SmmRelocationInit
interface. It maintains the original implementation of most
functions and leaves the definitions of global variables
intact. Further refinements to the code are planned for
subsequent patches.

Platform shall consume the interface for the smbase
relocation if need SMM support.

Note:
Before using SmmRelocationLib, the PiSmmCpuDxeSmm driver
allocates the SMRAM to be used for SMI handler and Save
state area of each processor from Smst->AllocatePages().
With SmmRelocationLib, the SMRAM allocation for SMI
handlers and Save state areas is moved to early PEI
phase (Smst->AllocatePages() service is not available).
So, the allocation is done by splitting the SMRAM out of
the SMRAM regions reported from gEfiSmmSMramMemoryGuid.

So, Platform must produce the gEfiSmmSMramMemoryGuid HOB
for SmmRelocationLib usage.

Cc: Ray Ni <ray.ni@intel.com>
Cc: Zeng Star <star.zeng@intel.com>
Cc: Gerd Hoffmann <kraxel@redhat.com>
Cc: Rahul Kumar <rahul1.kumar@intel.com>
Signed-off-by: Jiaxin Wu <jiaxin.wu@intel.com>
Reviewed-by: Ray Ni <ray.ni@intel.com>
2024-05-08 01:53:58 +00:00

202 lines
6.1 KiB
NASM

;------------------------------------------------------------------------------ ;
; Copyright (c) 2024, Intel Corporation. All rights reserved.<BR>
; SPDX-License-Identifier: BSD-2-Clause-Patent
;
; Module Name:
;
; SmmInit.nasm
;
; Abstract:
;
; Functions for relocating SMBASE's for all processors
;
;-------------------------------------------------------------------------------
%include "StuffRsbNasm.inc"
extern ASM_PFX(SmmInitHandler)
extern ASM_PFX(mRebasedFlag)
extern ASM_PFX(mSmmRelocationOriginalAddress)
global ASM_PFX(gPatchSmmCr3)
global ASM_PFX(gPatchSmmCr4)
global ASM_PFX(gPatchSmmCr0)
global ASM_PFX(gPatchSmmInitStack)
global ASM_PFX(gcSmiInitGdtr)
global ASM_PFX(gcSmmInitSize)
global ASM_PFX(gcSmmInitTemplate)
global ASM_PFX(gPatchRebasedFlagAddr32)
global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)
%define LONG_MODE_CS 0x38
SECTION .data
NullSeg: DQ 0 ; reserved by architecture
CodeSeg32:
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 0x9b
DB 0xcf ; LimitHigh
DB 0 ; BaseHigh
ProtModeCodeSeg32:
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 0x9b
DB 0xcf ; LimitHigh
DB 0 ; BaseHigh
ProtModeSsSeg32:
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 0x93
DB 0xcf ; LimitHigh
DB 0 ; BaseHigh
DataSeg32:
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 0x93
DB 0xcf ; LimitHigh
DB 0 ; BaseHigh
CodeSeg16:
DW -1
DW 0
DB 0
DB 0x9b
DB 0x8f
DB 0
DataSeg16:
DW -1
DW 0
DB 0
DB 0x93
DB 0x8f
DB 0
CodeSeg64:
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 0x9b
DB 0xaf ; LimitHigh
DB 0 ; BaseHigh
GDT_SIZE equ $ - NullSeg
ASM_PFX(gcSmiInitGdtr):
DW GDT_SIZE - 1
DQ NullSeg
DEFAULT REL
SECTION .text
global ASM_PFX(SmmStartup)
BITS 16
ASM_PFX(SmmStartup):
mov eax, 0x80000001 ; read capability
cpuid
mov ebx, edx ; rdmsr will change edx. keep it in ebx.
mov eax, strict dword 0 ; source operand will be patched
ASM_PFX(gPatchSmmCr3):
mov cr3, eax
o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
mov eax, strict dword 0 ; source operand will be patched
ASM_PFX(gPatchSmmCr4):
or ah, 2 ; enable XMM registers access
mov cr4, eax
mov ecx, 0xc0000080 ; IA32_EFER MSR
rdmsr
or ah, BIT0 ; set LME bit
test ebx, BIT20 ; check NXE capability
jz .1
or ah, BIT3 ; set NXE bit
.1:
wrmsr
mov eax, strict dword 0 ; source operand will be patched
ASM_PFX(gPatchSmmCr0):
mov cr0, eax ; enable protected mode & paging
jmp LONG_MODE_CS : dword 0 ; offset will be patched to @LongMode
@PatchLongModeOffset:
BITS 64
@LongMode: ; long-mode starts here
mov rsp, strict qword 0 ; source operand will be patched
ASM_PFX(gPatchSmmInitStack):
and sp, 0xfff0 ; make sure RSP is 16-byte aligned
;
; According to X64 calling convention, XMM0~5 are volatile, we need to save
; them before calling C-function.
;
sub rsp, 0x60
movdqa [rsp], xmm0
movdqa [rsp + 0x10], xmm1
movdqa [rsp + 0x20], xmm2
movdqa [rsp + 0x30], xmm3
movdqa [rsp + 0x40], xmm4
movdqa [rsp + 0x50], xmm5
add rsp, -0x20
call ASM_PFX(SmmInitHandler)
add rsp, 0x20
;
; Restore XMM0~5 after calling C-function.
;
movdqa xmm0, [rsp]
movdqa xmm1, [rsp + 0x10]
movdqa xmm2, [rsp + 0x20]
movdqa xmm3, [rsp + 0x30]
movdqa xmm4, [rsp + 0x40]
movdqa xmm5, [rsp + 0x50]
StuffRsb64
rsm
BITS 16
ASM_PFX(gcSmmInitTemplate):
mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]
sub ebp, 0x30000
jmp ebp
@L1:
DQ 0; ASM_PFX(SmmStartup)
ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
BITS 64
global ASM_PFX(SmmRelocationSemaphoreComplete)
ASM_PFX(SmmRelocationSemaphoreComplete):
push rax
mov rax, [ASM_PFX(mRebasedFlag)]
mov byte [rax], 1
pop rax
jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
;
; Semaphore code running in 32-bit mode
;
BITS 32
global ASM_PFX(SmmRelocationSemaphoreComplete32)
ASM_PFX(SmmRelocationSemaphoreComplete32):
push eax
mov eax, strict dword 0 ; source operand will be patched
ASM_PFX(gPatchRebasedFlagAddr32):
mov byte [eax], 1
pop eax
jmp dword [dword 0] ; destination will be patched
ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):
BITS 64
global ASM_PFX(SmmInitFixupAddress)
ASM_PFX(SmmInitFixupAddress):
lea rax, [@LongMode]
lea rcx, [@PatchLongModeOffset - 6]
mov dword [rcx], eax
lea rax, [ASM_PFX(SmmStartup)]
lea rcx, [@L1]
mov qword [rcx], rax
ret