(1) SmmRelocationSemaphoreComplete32() runs in 32-bit mode, so wrap it in a (BITS 32 ... BITS 64) bracket. (2) SmmRelocationSemaphoreComplete32() currently compiles to: > 000002AE C6050000000001 mov byte [dword 0x0],0x1 > 000002B5 FF2500000000 jmp dword [dword 0x0] where the first instruction is patched with the contents of "mRebasedFlag" (so that (*mRebasedFlag) is set to 1), and the second instruction is patched with the address of "mSmmRelocationOriginalAddress" (so that we jump to "mSmmRelocationOriginalAddress"). In its current form the first instruction could not be patched with PatchInstructionX86(), given that the operand to patch is not encoded in the trailing bytes of the instruction. Therefore, adopt an EAX-based version, inspired by both the IA32 and X64 variants of SmmRelocationSemaphoreComplete(): > 000002AE 50 push eax > 000002AF B800000000 mov eax,0x0 > 000002B4 C60001 mov byte [eax],0x1 > 000002B7 58 pop eax > 000002B8 FF2500000000 jmp dword [dword 0x0] Here both instructions can be patched with PatchInstructionX86(), and the DBs can be replaced with native NASM syntax. (3) Turn the "mRebasedFlagAddr32" and "mSmmRelocationOriginalAddressPtr32" variables into markers that suit PatchInstructionX86(). Cc: Eric Dong <eric.dong@intel.com> Cc: Michael D Kinney <michael.d.kinney@intel.com> Ref: https://bugzilla.tianocore.org/show_bug.cgi?id=866 Contributed-under: TianoCore Contribution Agreement 1.1 Signed-off-by: Laszlo Ersek <lersek@redhat.com> Reviewed-by: Liming Gao <liming.gao@intel.com>
150 lines
4.6 KiB
NASM
150 lines
4.6 KiB
NASM
;------------------------------------------------------------------------------ ;
|
|
; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
|
|
; This program and the accompanying materials
|
|
; are licensed and made available under the terms and conditions of the BSD License
|
|
; which accompanies this distribution. The full text of the license may be found at
|
|
; http://opensource.org/licenses/bsd-license.php.
|
|
;
|
|
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
|
|
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
|
|
;
|
|
; Module Name:
|
|
;
|
|
; SmmInit.nasm
|
|
;
|
|
; Abstract:
|
|
;
|
|
; Functions for relocating SMBASE's for all processors
|
|
;
|
|
;-------------------------------------------------------------------------------
|
|
|
|
extern ASM_PFX(SmmInitHandler)
|
|
extern ASM_PFX(mRebasedFlag)
|
|
extern ASM_PFX(mSmmRelocationOriginalAddress)
|
|
|
|
global ASM_PFX(gPatchSmmCr3)
|
|
global ASM_PFX(gPatchSmmCr4)
|
|
global ASM_PFX(gPatchSmmCr0)
|
|
global ASM_PFX(gPatchSmmInitStack)
|
|
global ASM_PFX(gcSmiInitGdtr)
|
|
global ASM_PFX(gcSmmInitSize)
|
|
global ASM_PFX(gcSmmInitTemplate)
|
|
global ASM_PFX(gPatchRebasedFlagAddr32)
|
|
global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)
|
|
|
|
%define LONG_MODE_CS 0x38
|
|
|
|
DEFAULT REL
|
|
SECTION .text
|
|
|
|
ASM_PFX(gcSmiInitGdtr):
|
|
DW 0
|
|
DQ 0
|
|
|
|
global ASM_PFX(SmmStartup)
|
|
|
|
BITS 16
|
|
ASM_PFX(SmmStartup):
|
|
mov eax, 0x80000001 ; read capability
|
|
cpuid
|
|
mov ebx, edx ; rdmsr will change edx. keep it in ebx.
|
|
mov eax, strict dword 0 ; source operand will be patched
|
|
ASM_PFX(gPatchSmmCr3):
|
|
mov cr3, eax
|
|
o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
|
|
mov eax, strict dword 0 ; source operand will be patched
|
|
ASM_PFX(gPatchSmmCr4):
|
|
or ah, 2 ; enable XMM registers access
|
|
mov cr4, eax
|
|
mov ecx, 0xc0000080 ; IA32_EFER MSR
|
|
rdmsr
|
|
or ah, BIT0 ; set LME bit
|
|
test ebx, BIT20 ; check NXE capability
|
|
jz .1
|
|
or ah, BIT3 ; set NXE bit
|
|
.1:
|
|
wrmsr
|
|
mov eax, strict dword 0 ; source operand will be patched
|
|
ASM_PFX(gPatchSmmCr0):
|
|
mov cr0, eax ; enable protected mode & paging
|
|
jmp LONG_MODE_CS : dword 0 ; offset will be patched to @LongMode
|
|
@PatchLongModeOffset:
|
|
|
|
BITS 64
|
|
@LongMode: ; long-mode starts here
|
|
mov rsp, strict qword 0 ; source operand will be patched
|
|
ASM_PFX(gPatchSmmInitStack):
|
|
and sp, 0xfff0 ; make sure RSP is 16-byte aligned
|
|
;
|
|
; Accoring to X64 calling convention, XMM0~5 are volatile, we need to save
|
|
; them before calling C-function.
|
|
;
|
|
sub rsp, 0x60
|
|
movdqa [rsp], xmm0
|
|
movdqa [rsp + 0x10], xmm1
|
|
movdqa [rsp + 0x20], xmm2
|
|
movdqa [rsp + 0x30], xmm3
|
|
movdqa [rsp + 0x40], xmm4
|
|
movdqa [rsp + 0x50], xmm5
|
|
|
|
add rsp, -0x20
|
|
call ASM_PFX(SmmInitHandler)
|
|
add rsp, 0x20
|
|
|
|
;
|
|
; Restore XMM0~5 after calling C-function.
|
|
;
|
|
movdqa xmm0, [rsp]
|
|
movdqa xmm1, [rsp + 0x10]
|
|
movdqa xmm2, [rsp + 0x20]
|
|
movdqa xmm3, [rsp + 0x30]
|
|
movdqa xmm4, [rsp + 0x40]
|
|
movdqa xmm5, [rsp + 0x50]
|
|
|
|
rsm
|
|
|
|
BITS 16
|
|
ASM_PFX(gcSmmInitTemplate):
|
|
mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]
|
|
sub ebp, 0x30000
|
|
jmp ebp
|
|
@L1:
|
|
DQ 0; ASM_PFX(SmmStartup)
|
|
|
|
ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
|
|
|
|
BITS 64
|
|
global ASM_PFX(SmmRelocationSemaphoreComplete)
|
|
ASM_PFX(SmmRelocationSemaphoreComplete):
|
|
push rax
|
|
mov rax, [ASM_PFX(mRebasedFlag)]
|
|
mov byte [rax], 1
|
|
pop rax
|
|
jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
|
|
|
|
;
|
|
; Semaphore code running in 32-bit mode
|
|
;
|
|
BITS 32
|
|
global ASM_PFX(SmmRelocationSemaphoreComplete32)
|
|
ASM_PFX(SmmRelocationSemaphoreComplete32):
|
|
push eax
|
|
mov eax, strict dword 0 ; source operand will be patched
|
|
ASM_PFX(gPatchRebasedFlagAddr32):
|
|
mov byte [eax], 1
|
|
pop eax
|
|
jmp dword [dword 0] ; destination will be patched
|
|
ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):
|
|
|
|
BITS 64
|
|
global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
|
|
ASM_PFX(PiSmmCpuSmmInitFixupAddress):
|
|
lea rax, [@LongMode]
|
|
lea rcx, [@PatchLongModeOffset - 6]
|
|
mov dword [rcx], eax
|
|
|
|
lea rax, [ASM_PFX(SmmStartup)]
|
|
lea rcx, [@L1]
|
|
mov qword [rcx], rax
|
|
ret
|