UefiCpuPkg/PiSmmCpuDxeSmm: remove *.S and *.asm assembly files

All edk2 toolchains use NASM for compiling X86 assembly source code. We
plan to remove X86 *.S and *.asm files globally, in order to reduce
maintenance and confusion:

http://mid.mail-archive.com/4A89E2EF3DFEDB4C8BFDE51014F606A14E1B9F76@SHSMSX104.ccr.corp.intel.com
https://lists.01.org/pipermail/edk2-devel/2018-March/022690.html
https://bugzilla.tianocore.org/show_bug.cgi?id=881

Let's start with UefiCpuPkg/PiSmmCpuDxeSmm: remove the *.S and *.asm
dialects (both Ia32 and X64) of the SmmInit, SmiEntry, SmiException and
MpFuncs sources.

Cc: Eric Dong <eric.dong@intel.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Ref: https://bugzilla.tianocore.org/show_bug.cgi?id=866
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Andrew Fish <afish@apple.com>
Reviewed-by: Liming Gao <liming.gao@intel.com>
This commit is contained in:
Laszlo Ersek
2018-02-01 22:35:18 +01:00
parent 8596c14090
commit 38a5df04ef
17 changed files with 0 additions and 4294 deletions

View File

@@ -1,204 +0,0 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php.
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Module Name:
#
# MpFuncs.S
#
# Abstract:
#
# This is the assembly code for Multi-processor S3 support
#
#------------------------------------------------------------------------------
.equ VacantFlag, 0x0
.equ NotVacantFlag, 0xff
.equ LockLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart
.equ StackStartAddressLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x08
.equ StackSizeLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x10
.equ CProcedureLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x18
.equ GdtrLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x20
.equ IdtrLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x2A
.equ BufferStartLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x34
.equ Cr3OffsetLocation, RendezvousFunnelProcEnd - RendezvousFunnelProcStart + 0x38
#-------------------------------------------------------------------------------------
#RendezvousFunnelProc procedure follows. All APs execute their procedure. This
#procedure serializes all the AP processors through an Init sequence. It must be
#noted that APs arrive here very raw...ie: real mode, no stack.
#ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
#IS IN MACHINE CODE.
#-------------------------------------------------------------------------------------
#RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
.code:
ASM_GLOBAL ASM_PFX(RendezvousFunnelProc)
ASM_PFX(RendezvousFunnelProc):
RendezvousFunnelProcStart:
# At this point CS = 0x(vv00) and ip= 0x0.
.byte 0x8c,0xc8 # mov ax, cs
.byte 0x8e,0xd8 # mov ds, ax
.byte 0x8e,0xc0 # mov es, ax
.byte 0x8e,0xd0 # mov ss, ax
.byte 0x33,0xc0 # xor ax, ax
.byte 0x8e,0xe0 # mov fs, ax
.byte 0x8e,0xe8 # mov gs, ax
flat32Start:
.byte 0xBE
.word BufferStartLocation
.byte 0x66,0x8B,0x14 # mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer
.byte 0xBE
.word Cr3OffsetLocation
.byte 0x66,0x8B,0xC # mov ecx,dword ptr [si] ; ECX is keeping the value of CR3
.byte 0xBE
.word GdtrLocation
.byte 0x66 # db 66h
.byte 0x2E,0xF,0x1,0x14 # lgdt fword ptr cs:[si]
.byte 0xBE
.word IdtrLocation
.byte 0x66 # db 66h
.byte 0x2E,0xF,0x1,0x1C # lidt fword ptr cs:[si]
.byte 0x33,0xC0 # xor ax, ax
.byte 0x8E,0xD8 # mov ds, ax
.byte 0xF,0x20,0xC0 # mov eax, cr0 ; Get control register 0
.byte 0x66,0x83,0xC8,0x1 # or eax, 000000001h ; Set PE bit (bit #0)
.byte 0xF,0x22,0xC0 # mov cr0, eax
FLAT32_JUMP:
.byte 0x66,0x67,0xEA # far jump
.long 0x0 # 32-bit offset
.word 0x20 # 16-bit selector
PMODE_ENTRY: # protected mode entry point
.byte 0x66,0xB8,0x18,0x0 # mov ax, 18h
.byte 0x66,0x8E,0xD8 # mov ds, ax
.byte 0x66,0x8E,0xC0 # mov es, ax
.byte 0x66,0x8E,0xE0 # mov fs, ax
.byte 0x66,0x8E,0xE8 # mov gs, ax
.byte 0x66,0x8E,0xD0 # mov ss, ax ; Flat mode setup.
.byte 0xF,0x20,0xE0 # mov eax, cr4
.byte 0xF,0xBA,0xE8,0x5 # bts eax, 5
.byte 0xF,0x22,0xE0 # mov cr4, eax
.byte 0xF,0x22,0xD9 # mov cr3, ecx
.byte 0x8B,0xF2 # mov esi, edx ; Save wakeup buffer address
.byte 0xB9
.long 0xC0000080 # mov ecx, 0c0000080h ; EFER MSR number.
.byte 0xF,0x32 # rdmsr ; Read EFER.
.byte 0xF,0xBA,0xE8,0x8 # bts eax, 8 ; Set LME=1.
.byte 0xF,0x30 # wrmsr ; Write EFER.
.byte 0xF,0x20,0xC0 # mov eax, cr0 ; Read CR0.
.byte 0xF,0xBA,0xE8,0x1F # bts eax, 31 ; Set PG=1.
.byte 0xF,0x22,0xC0 # mov cr0, eax ; Write CR0.
LONG_JUMP:
.byte 0x67,0xEA # far jump
.long 0x0 # 32-bit offset
.word 0x38 # 16-bit selector
LongModeStart:
movw $0x30,%ax
.byte 0x66
movw %ax,%ds
.byte 0x66
movw %ax,%es
.byte 0x66
movw %ax,%ss
movl %esi,%edi
addl $LockLocation, %edi
movb $NotVacantFlag, %al
TestLock:
xchgb (%edi), %al
cmpb $NotVacantFlag, %al
jz TestLock
ProgramStack:
movl %esi,%edi
addl $StackSizeLocation, %edi
movq (%edi), %rax
movl %esi,%edi
addl $StackStartAddressLocation, %edi
addq (%edi), %rax
movq %rax, %rsp
movq %rax, (%edi)
Releaselock:
movb $VacantFlag, %al
movl %esi,%edi
addl $LockLocation, %edi
xchgb (%edi), %al
#
# Call assembly function to initialize FPU.
#
movabsq $ASM_PFX(InitializeFloatingPointUnits), %rax
subq $0x20, %rsp
call *%rax
addq $0x20, %rsp
#
# Call C Function
#
movl %esi,%edi
addl $CProcedureLocation, %edi
movq (%edi), %rax
testq %rax, %rax
jz GoToSleep
subq $0x20, %rsp
call *%rax
addq $0x20, %rsp
GoToSleep:
cli
hlt
jmp .-2
RendezvousFunnelProcEnd:
#-------------------------------------------------------------------------------------
# AsmGetAddressMap (&AddressMap);
#-------------------------------------------------------------------------------------
# comments here for definition of address map
ASM_GLOBAL ASM_PFX(AsmGetAddressMap)
ASM_PFX(AsmGetAddressMap):
movabsq $RendezvousFunnelProcStart, %rax
movq %rax, (%rcx)
movq $(PMODE_ENTRY - RendezvousFunnelProcStart), 0x08(%rcx)
movq $(FLAT32_JUMP - RendezvousFunnelProcStart), 0x10(%rcx)
movq $(RendezvousFunnelProcEnd - RendezvousFunnelProcStart), 0x18(%rcx)
movq $(LongModeStart - RendezvousFunnelProcStart), 0x20(%rcx)
movq $(LONG_JUMP - RendezvousFunnelProcStart), 0x28(%rcx)
ret

View File

@@ -1,206 +0,0 @@
;------------------------------------------------------------------------------ ;
; Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php.
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;
; Module Name:
;
; MpFuncs.asm
;
; Abstract:
;
; This is the assembly code for Multi-processor S3 support
;
;-------------------------------------------------------------------------------
EXTERN InitializeFloatingPointUnits:PROC
VacantFlag Equ 00h
NotVacantFlag Equ 0ffh
LockLocation equ RendezvousFunnelProcEnd - RendezvousFunnelProcStart
StackStartAddressLocation equ LockLocation + 08h
StackSizeLocation equ LockLocation + 10h
CProcedureLocation equ LockLocation + 18h
GdtrLocation equ LockLocation + 20h
IdtrLocation equ LockLocation + 2Ah
BufferStartLocation equ LockLocation + 34h
Cr3OffsetLocation equ LockLocation + 38h
;-------------------------------------------------------------------------------------
;RendezvousFunnelProc procedure follows. All APs execute their procedure. This
;procedure serializes all the AP processors through an Init sequence. It must be
;noted that APs arrive here very raw...ie: real mode, no stack.
;ALSO THIS PROCEDURE IS EXECUTED BY APs ONLY ON 16 BIT MODE. HENCE THIS PROC
;IS IN MACHINE CODE.
;-------------------------------------------------------------------------------------
;RendezvousFunnelProc (&WakeUpBuffer,MemAddress);
;text SEGMENT
.code
RendezvousFunnelProc PROC
RendezvousFunnelProcStart::
; At this point CS = 0x(vv00) and ip= 0x0.
db 8ch, 0c8h ; mov ax, cs
db 8eh, 0d8h ; mov ds, ax
db 8eh, 0c0h ; mov es, ax
db 8eh, 0d0h ; mov ss, ax
db 33h, 0c0h ; xor ax, ax
db 8eh, 0e0h ; mov fs, ax
db 8eh, 0e8h ; mov gs, ax
flat32Start::
db 0BEh
dw BufferStartLocation ; mov si, BufferStartLocation
db 66h, 8Bh, 14h ; mov edx,dword ptr [si] ; EDX is keeping the start address of wakeup buffer
db 0BEh
dw Cr3OffsetLocation ; mov si, Cr3Location
db 66h, 8Bh, 0Ch ; mov ecx,dword ptr [si] ; ECX is keeping the value of CR3
db 0BEh
dw GdtrLocation ; mov si, GdtrProfile
db 66h ; db 66h
db 2Eh, 0Fh, 01h, 14h ; lgdt fword ptr cs:[si]
db 0BEh
dw IdtrLocation ; mov si, IdtrProfile
db 66h ; db 66h
db 2Eh, 0Fh, 01h, 1Ch ; lidt fword ptr cs:[si]
db 33h, 0C0h ; xor ax, ax
db 8Eh, 0D8h ; mov ds, ax
db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Get control register 0
db 66h, 83h, 0C8h, 01h ; or eax, 000000001h ; Set PE bit (bit #0)
db 0Fh, 22h, 0C0h ; mov cr0, eax
FLAT32_JUMP::
db 66h, 67h, 0EAh ; far jump
dd 0h ; 32-bit offset
dw 20h ; 16-bit selector
PMODE_ENTRY:: ; protected mode entry point
db 66h, 0B8h, 18h, 00h ; mov ax, 18h
db 66h, 8Eh, 0D8h ; mov ds, ax
db 66h, 8Eh, 0C0h ; mov es, ax
db 66h, 8Eh, 0E0h ; mov fs, ax
db 66h, 8Eh, 0E8h ; mov gs, ax
db 66h, 8Eh, 0D0h ; mov ss, ax ; Flat mode setup.
db 0Fh, 20h, 0E0h ; mov eax, cr4
db 0Fh, 0BAh, 0E8h, 05h ; bts eax, 5
db 0Fh, 22h, 0E0h ; mov cr4, eax
db 0Fh, 22h, 0D9h ; mov cr3, ecx
db 8Bh, 0F2h ; mov esi, edx ; Save wakeup buffer address
db 0B9h
dd 0C0000080h ; mov ecx, 0c0000080h ; EFER MSR number.
db 0Fh, 32h ; rdmsr ; Read EFER.
db 0Fh, 0BAh, 0E8h, 08h ; bts eax, 8 ; Set LME=1.
db 0Fh, 30h ; wrmsr ; Write EFER.
db 0Fh, 20h, 0C0h ; mov eax, cr0 ; Read CR0.
db 0Fh, 0BAh, 0E8h, 1Fh ; bts eax, 31 ; Set PG=1.
db 0Fh, 22h, 0C0h ; mov cr0, eax ; Write CR0.
LONG_JUMP::
db 67h, 0EAh ; far jump
dd 0h ; 32-bit offset
dw 38h ; 16-bit selector
LongModeStart::
mov ax, 30h
mov ds, ax
mov es, ax
mov ss, ax
mov edi, esi
add edi, LockLocation
mov al, NotVacantFlag
TestLock::
xchg byte ptr [edi], al
cmp al, NotVacantFlag
jz TestLock
ProgramStack::
mov edi, esi
add edi, StackSizeLocation
mov rax, qword ptr [edi]
mov edi, esi
add edi, StackStartAddressLocation
add rax, qword ptr [edi]
mov rsp, rax
mov qword ptr [edi], rax
Releaselock::
mov al, VacantFlag
mov edi, esi
add edi, LockLocation
xchg byte ptr [edi], al
;
; Call assembly function to initialize FPU.
;
mov rax, InitializeFloatingPointUnits
sub rsp, 20h
call rax
add rsp, 20h
;
; Call C Function
;
mov edi, esi
add edi, CProcedureLocation
mov rax, qword ptr [edi]
test rax, rax
jz GoToSleep
sub rsp, 20h
call rax
add rsp, 20h
GoToSleep::
cli
hlt
jmp $-2
RendezvousFunnelProcEnd::
RendezvousFunnelProc ENDP
;-------------------------------------------------------------------------------------
; AsmGetAddressMap (&AddressMap);
;-------------------------------------------------------------------------------------
; comments here for definition of address map
AsmGetAddressMap PROC
mov rax, offset RendezvousFunnelProcStart
mov qword ptr [rcx], rax
mov qword ptr [rcx+8h], PMODE_ENTRY - RendezvousFunnelProcStart
mov qword ptr [rcx+10h], FLAT32_JUMP - RendezvousFunnelProcStart
mov qword ptr [rcx+18h], RendezvousFunnelProcEnd - RendezvousFunnelProcStart
mov qword ptr [rcx+20h], LongModeStart - RendezvousFunnelProcStart
mov qword ptr [rcx+28h], LONG_JUMP - RendezvousFunnelProcStart
ret
AsmGetAddressMap ENDP
END

View File

@@ -1,243 +0,0 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php.
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Module Name:
#
# SmiEntry.S
#
# Abstract:
#
# Code template of the SMI handler for a particular processor
#
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(gcSmiHandlerTemplate)
ASM_GLOBAL ASM_PFX(gcSmiHandlerSize)
ASM_GLOBAL ASM_PFX(gSmiCr3)
ASM_GLOBAL ASM_PFX(gSmiStack)
ASM_GLOBAL ASM_PFX(gSmbase)
ASM_GLOBAL ASM_PFX(mXdSupported)
ASM_GLOBAL ASM_PFX(gSmiHandlerIdtr)
.equ MSR_IA32_MISC_ENABLE, 0x1A0
.equ MSR_EFER, 0xc0000080
.equ MSR_EFER_XD, 0x800
#
# Constants relating to PROCESSOR_SMM_DESCRIPTOR
#
.equ DSC_OFFSET, 0xfb00
.equ DSC_GDTPTR, 0x30
.equ DSC_GDTSIZ, 0x38
.equ DSC_CS, 14
.equ DSC_DS, 16
.equ DSC_SS, 18
.equ DSC_OTHERSEG, 20
#
# Constants relating to CPU State Save Area
#
.equ SSM_DR6, 0xffd0
.equ SSM_DR7, 0xffc8
.equ PROTECT_MODE_CS, 0x08
.equ PROTECT_MODE_DS, 0x20
.equ LONG_MODE_CS, 0x38
.equ TSS_SEGMENT, 0x40
.equ GDT_SIZE, 0x50
.text
ASM_PFX(gcSmiHandlerTemplate):
_SmiEntryPoint:
#
# The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-
# bit addressing mode. And that coincidence has been used in the following
# "64-bit like" 16-bit code. Be aware that once RDI is referenced as a
# base address register, it is actually BX that is referenced.
#
.byte 0xbb # mov bx, imm16
.word _GdtDesc - _SmiEntryPoint + 0x8000
#
# fix GDT descriptor
#
.byte 0x2e,0xa1 # mov ax, cs:[offset16]
.word DSC_OFFSET + DSC_GDTSIZ
.byte 0x48 # dec ax
.byte 0x2e
movl %eax, (%rdi) # mov cs:[bx], ax
.byte 0x66,0x2e,0xa1 # mov eax, cs:[offset16]
.word DSC_OFFSET + DSC_GDTPTR
.byte 0x2e
movw %ax, 2(%rdi)
.byte 0x66,0x2e
lgdt (%rdi)
#
# Patch ProtectedMode Segment
#
.byte 0xb8
.word PROTECT_MODE_CS
.byte 0x2e
movl %eax, -2(%rdi)
#
# Patch ProtectedMode entry
#
.byte 0x66, 0xbf # mov edi, SMBASE
ASM_PFX(gSmbase): .space 4
lea ((ProtectedMode - _SmiEntryPoint) + 0x8000)(%edi), %ax
.byte 0x2e
movw %ax, -6(%rdi)
#
# Switch into ProtectedMode
#
movq %cr0, %rbx
.byte 0x66
andl $0x9ffafff3, %ebx
.byte 0x66
orl $0x00000023, %ebx
movq %rbx, %cr0
.byte 0x66, 0xea
.space 6
_GdtDesc: .space 6
ProtectedMode:
movw $PROTECT_MODE_DS, %ax
movl %eax, %ds
movl %eax, %es
movl %eax, %fs
movl %eax, %gs
movl %eax, %ss
.byte 0xbc # mov esp, imm32
ASM_PFX(gSmiStack): .space 4
jmp ProtFlatMode
ProtFlatMode:
.byte 0xb8
ASM_PFX(gSmiCr3): .space 4
movq %rax, %cr3
movl $0x668,%eax # as cr4.PGE is not set here, refresh cr3
movq %rax, %cr4 # in PreModifyMtrrs() to flush TLB.
# Load TSS
subl $8, %esp # reserve room in stack
sgdt (%rsp)
movl 2(%rsp), %eax # eax = GDT base
addl $8, %esp
movb $0x89, %dl
movb %dl, (TSS_SEGMENT + 5)(%rax) # clear busy flag
movl $TSS_SEGMENT, %eax
ltr %ax
# enable NXE if supported
.byte 0xb0 # mov al, imm8
ASM_PFX(mXdSupported): .byte 1
cmpb $0, %al
jz SkipNxe
#
# Check XD disable bit
#
movl $MSR_IA32_MISC_ENABLE, %ecx
rdmsr
subl $4, %esp
pushq %rdx # save MSR_IA32_MISC_ENABLE[63-32]
testl $BIT2, %edx # MSR_IA32_MISC_ENABLE[34]
jz L13
andw $0x0FFFB, %dx # clear XD Disable bit if it is set
wrmsr
L13:
movl $MSR_EFER, %ecx
rdmsr
orw $MSR_EFER_XD,%ax # enable NXE
wrmsr
jmp NxeDone
SkipNxe:
subl $8, %esp
NxeDone:
#
# Switch to LongMode
#
pushq $LONG_MODE_CS # push cs hardcore here
call Base # push return address for retf later
Base:
addl $(LongMode - Base), (%rsp) # offset for far retf, seg is the 1st arg
movl $MSR_EFER, %ecx
rdmsr
orb $1,%ah # enable LME
wrmsr
movq %cr0, %rbx
orl $0x080010023, %ebx # enable paging + WP + NE + MP + PE
movq %rbx, %cr0
retf
LongMode: # long mode (64-bit code) starts here
movabsq $ASM_PFX(gSmiHandlerIdtr), %rax
lidt (%rax)
lea (DSC_OFFSET)(%rdi), %ebx
movw DSC_DS(%rbx), %ax
movl %eax,%ds
movw DSC_OTHERSEG(%rbx), %ax
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
movw DSC_SS(%rbx), %ax
movl %eax,%ss
# jmp _SmiHandler ; instruction is not needed
_SmiHandler:
movq 8(%rsp), %rbx
# Save FP registers
subq $0x200, %rsp
.byte 0x48 # FXSAVE64
fxsave (%rsp)
addq $-0x20, %rsp
movq %rbx, %rcx
movabsq $ASM_PFX(CpuSmmDebugEntry), %rax
call *%rax
movq %rbx, %rcx
movabsq $ASM_PFX(SmiRendezvous), %rax
call *%rax
movq %rbx, %rcx
movabsq $ASM_PFX(CpuSmmDebugExit), %rax
call *%rax
addq $0x20, %rsp
#
# Restore FP registers
#
.byte 0x48 # FXRSTOR64
fxrstor (%rsp)
addq $0x200, %rsp
movabsq $ASM_PFX(mXdSupported), %rax
movb (%rax), %al
cmpb $0, %al
jz L16
popq %rdx # get saved MSR_IA32_MISC_ENABLE[63-32]
testl $BIT2, %edx
jz L16
movl $MSR_IA32_MISC_ENABLE, %ecx
rdmsr
orw $BIT2, %dx # set XD Disable bit if it was set before entering into SMM
wrmsr
L16:
rsm
ASM_PFX(gcSmiHandlerSize): .word . - _SmiEntryPoint

View File

@@ -1,242 +0,0 @@
;------------------------------------------------------------------------------ ;
; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php.
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;
; Module Name:
;
; SmiEntry.asm
;
; Abstract:
;
; Code template of the SMI handler for a particular processor
;
;-------------------------------------------------------------------------------
;
; Variables referenced by C code
;
EXTERNDEF SmiRendezvous:PROC
EXTERNDEF CpuSmmDebugEntry:PROC
EXTERNDEF CpuSmmDebugExit:PROC
EXTERNDEF gcSmiHandlerTemplate:BYTE
EXTERNDEF gcSmiHandlerSize:WORD
EXTERNDEF gSmiCr3:DWORD
EXTERNDEF gSmiStack:DWORD
EXTERNDEF gSmbase:DWORD
EXTERNDEF mXdSupported:BYTE
EXTERNDEF gSmiHandlerIdtr:FWORD
MSR_IA32_MISC_ENABLE EQU 1A0h
MSR_EFER EQU 0c0000080h
MSR_EFER_XD EQU 0800h
;
; Constants relating to PROCESSOR_SMM_DESCRIPTOR
;
DSC_OFFSET EQU 0fb00h
DSC_GDTPTR EQU 30h
DSC_GDTSIZ EQU 38h
DSC_CS EQU 14
DSC_DS EQU 16
DSC_SS EQU 18
DSC_OTHERSEG EQU 20
;
; Constants relating to CPU State Save Area
;
SSM_DR6 EQU 0ffd0h
SSM_DR7 EQU 0ffc8h
PROTECT_MODE_CS EQU 08h
PROTECT_MODE_DS EQU 20h
LONG_MODE_CS EQU 38h
TSS_SEGMENT EQU 40h
GDT_SIZE EQU 50h
.code
gcSmiHandlerTemplate LABEL BYTE
_SmiEntryPoint:
;
; The encoding of BX in 16-bit addressing mode is the same as of RDI in 64-
; bit addressing mode. And that coincidence has been used in the following
; "64-bit like" 16-bit code. Be aware that once RDI is referenced as a
; base address register, it is actually BX that is referenced.
;
DB 0bbh ; mov bx, imm16
DW offset _GdtDesc - _SmiEntryPoint + 8000h ; bx = GdtDesc offset
; fix GDT descriptor
DB 2eh, 0a1h ; mov ax, cs:[offset16]
DW DSC_OFFSET + DSC_GDTSIZ
DB 48h ; dec ax
DB 2eh
mov [rdi], eax ; mov cs:[bx], ax
DB 66h, 2eh, 0a1h ; mov eax, cs:[offset16]
DW DSC_OFFSET + DSC_GDTPTR
DB 2eh
mov [rdi + 2], ax ; mov cs:[bx + 2], eax
DB 66h, 2eh
lgdt fword ptr [rdi] ; lgdt fword ptr cs:[bx]
; Patch ProtectedMode Segment
DB 0b8h ; mov ax, imm16
DW PROTECT_MODE_CS ; set AX for segment directly
DB 2eh
mov [rdi - 2], eax ; mov cs:[bx - 2], ax
; Patch ProtectedMode entry
DB 66h, 0bfh ; mov edi, SMBASE
gSmbase DD ?
lea ax, [edi + (@ProtectedMode - _SmiEntryPoint) + 8000h]
DB 2eh
mov [rdi - 6], ax ; mov cs:[bx - 6], eax
; Switch into @ProtectedMode
mov rbx, cr0
DB 66h
and ebx, 9ffafff3h
DB 66h
or ebx, 00000023h
mov cr0, rbx
DB 66h, 0eah
DD ?
DW ?
_GdtDesc FWORD ?
@ProtectedMode:
mov ax, PROTECT_MODE_DS
mov ds, ax
mov es, ax
mov fs, ax
mov gs, ax
mov ss, ax
DB 0bch ; mov esp, imm32
gSmiStack DD ?
jmp ProtFlatMode
ProtFlatMode:
DB 0b8h ; mov eax, offset gSmiCr3
gSmiCr3 DD ?
mov cr3, rax
mov eax, 668h ; as cr4.PGE is not set here, refresh cr3
mov cr4, rax ; in PreModifyMtrrs() to flush TLB.
; Load TSS
sub esp, 8 ; reserve room in stack
sgdt fword ptr [rsp]
mov eax, [rsp + 2] ; eax = GDT base
add esp, 8
mov dl, 89h
mov [rax + TSS_SEGMENT + 5], dl ; clear busy flag
mov eax, TSS_SEGMENT
ltr ax
; enable NXE if supported
DB 0b0h ; mov al, imm8
mXdSupported DB 1
cmp al, 0
jz @SkipXd
;
; Check XD disable bit
;
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
sub esp, 4
push rdx ; save MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2 ; MSR_IA32_MISC_ENABLE[34]
jz @f
and dx, 0FFFBh ; clear XD Disable bit if it is set
wrmsr
@@:
mov ecx, MSR_EFER
rdmsr
or ax, MSR_EFER_XD ; enable NXE
wrmsr
jmp @XdDone
@SkipXd:
sub esp, 8
@XdDone:
; Switch into @LongMode
push LONG_MODE_CS ; push cs hardcore here
call Base ; push return address for retf later
Base:
add dword ptr [rsp], @LongMode - Base; offset for far retf, seg is the 1st arg
mov ecx, MSR_EFER
rdmsr
or ah, 1 ; enable LME
wrmsr
mov rbx, cr0
or ebx, 080010023h ; enable paging + WP + NE + MP + PE
mov cr0, rbx
retf
@LongMode: ; long mode (64-bit code) starts here
mov rax, offset gSmiHandlerIdtr
lidt fword ptr [rax]
lea ebx, [rdi + DSC_OFFSET]
mov ax, [rbx + DSC_DS]
mov ds, eax
mov ax, [rbx + DSC_OTHERSEG]
mov es, eax
mov fs, eax
mov gs, eax
mov ax, [rbx + DSC_SS]
mov ss, eax
; jmp _SmiHandler ; instruction is not needed
_SmiHandler:
mov rbx, [rsp] ; rbx <- CpuIndex
;
; Save FP registers
;
sub rsp, 200h
DB 48h ; FXSAVE64
fxsave [rsp]
add rsp, -20h
mov rcx, rbx
mov rax, CpuSmmDebugEntry
call rax
mov rcx, rbx
mov rax, SmiRendezvous ; rax <- absolute addr of SmiRedezvous
call rax
mov rcx, rbx
mov rax, CpuSmmDebugExit
call rax
add rsp, 20h
;
; Restore FP registers
;
DB 48h ; FXRSTOR64
fxrstor [rsp]
add rsp, 200h
mov rax, offset ASM_PFX(mXdSupported)
mov al, [rax]
cmp al, 0
jz @f
pop rdx ; get saved MSR_IA32_MISC_ENABLE[63-32]
test edx, BIT2
jz @f
mov ecx, MSR_IA32_MISC_ENABLE
rdmsr
or dx, BIT2 ; set XD Disable bit if it was set before entering into SMM
wrmsr
@@:
rsm
gcSmiHandlerSize DW $ - _SmiEntryPoint
END

View File

@@ -1,365 +0,0 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php.
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Module Name:
#
# SmiException.S
#
# Abstract:
#
# Exception handlers used in SM mode
#
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(SmiPFHandler)
ASM_GLOBAL ASM_PFX(gcSmiIdtr)
ASM_GLOBAL ASM_PFX(gcSmiGdtr)
ASM_GLOBAL ASM_PFX(gcPsd)
.data
NullSeg: .quad 0 # reserved by architecture
CodeSeg32:
.word -1 # LimitLow
.word 0 # BaseLow
.byte 0 # BaseMid
.byte 0x9b
.byte 0xcf # LimitHigh
.byte 0 # BaseHigh
ProtModeCodeSeg32:
.word -1 # LimitLow
.word 0 # BaseLow
.byte 0 # BaseMid
.byte 0x9b
.byte 0xcf # LimitHigh
.byte 0 # BaseHigh
ProtModeSsSeg32:
.word -1 # LimitLow
.word 0 # BaseLow
.byte 0 # BaseMid
.byte 0x93
.byte 0xcf # LimitHigh
.byte 0 # BaseHigh
DataSeg32:
.word -1 # LimitLow
.word 0 # BaseLow
.byte 0 # BaseMid
.byte 0x93
.byte 0xcf # LimitHigh
.byte 0 # BaseHigh
CodeSeg16:
.word -1
.word 0
.byte 0
.byte 0x9b
.byte 0x8f
.byte 0
DataSeg16:
.word -1
.word 0
.byte 0
.byte 0x93
.byte 0x8f
.byte 0
CodeSeg64:
.word -1 # LimitLow
.word 0 # BaseLow
.byte 0 # BaseMid
.byte 0x9b
.byte 0xaf # LimitHigh
.byte 0 # BaseHigh
# TSS Segment for X64 specially
TssSeg:
.word TSS_DESC_SIZE - 1 # LimitLow
.word 0 # BaseLow
.byte 0 # BaseMid
.byte 0x89
.byte 0x00 # LimitHigh
.byte 0 # BaseHigh
.long 0 # BaseUpper
.long 0 # Reserved
.equ GDT_SIZE, .- NullSeg
TssDescriptor:
.space 104, 0
.equ TSS_DESC_SIZE, .- TssDescriptor
#
# This structure serves as a template for all processors.
#
ASM_PFX(gcPsd):
.ascii "PSDSIG "
.word PSD_SIZE
.word 2
.word 1 << 2
.word CODE_SEL
.word DATA_SEL
.word DATA_SEL
.word DATA_SEL
.word 0
.quad 0
.quad 0
.quad 0 # fixed in InitializeMpServiceData()
.quad NullSeg
.long GDT_SIZE
.long 0
.space 24, 0
.quad 0
.equ PSD_SIZE, . - ASM_PFX(gcPsd)
#
# CODE & DATA segments for SMM runtime
#
.equ CODE_SEL, CodeSeg64 - NullSeg
.equ DATA_SEL, DataSeg32 - NullSeg
.equ CODE32_SEL, CodeSeg32 - NullSeg
ASM_PFX(gcSmiGdtr):
.word GDT_SIZE - 1
.quad NullSeg
ASM_PFX(gcSmiIdtr):
.word 0
.quad 0
.text
#------------------------------------------------------------------------------
# _SmiExceptionEntryPoints is the collection of exception entry points followed
# by a common exception handler.
#
# Stack frame would be as follows as specified in IA32 manuals:
# +---------------------+ <-- 16-byte aligned ensured by processor
# + Old SS +
# +---------------------+
# + Old RSP +
# +---------------------+
# + RFlags +
# +---------------------+
# + CS +
# +---------------------+
# + RIP +
# +---------------------+
# + Error Code +
# +---------------------+
# + Vector Number +
# +---------------------+
# + RBP +
# +---------------------+ <-- RBP, 16-byte aligned
#
# RSP set to odd multiple of 8 at @CommonEntryPoint means ErrCode PRESENT
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(PageFaultIdtHandlerSmmProfile)
ASM_PFX(PageFaultIdtHandlerSmmProfile):
pushq $0x0e # Page Fault
.byte 0x40, 0xf6, 0xc4, 0x08 #test spl, 8
jnz L1
pushq (%rsp)
movq $0, 8(%rsp)
L1:
pushq %rbp
movq %rsp, %rbp
#
# Since here the stack pointer is 16-byte aligned, so
# EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
# is 16-byte aligned
#
## UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
## UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %r11
pushq %r10
pushq %r9
pushq %r8
pushq %rax
pushq %rcx
pushq %rdx
pushq %rbx
pushq 48(%rbp) # RSP
pushq (%rbp) # RBP
pushq %rsi
pushq %rdi
## UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
movzwq 56(%rbp), %rax
pushq %rax # for ss
movzwq 32(%rbp), %rax
pushq %rax # for cs
movq %ds, %rax
pushq %rax
movq %es, %rax
pushq %rax
movq %fs, %rax
pushq %rax
movq %gs, %rax
pushq %rax
## UINT64 Rip;
pushq 24(%rbp)
## UINT64 Gdtr[2], Idtr[2];
subq $16, %rsp
sidt (%rsp)
subq $16, %rsp
sgdt (%rsp)
## UINT64 Ldtr, Tr;
xorq %rax, %rax
strw %ax
pushq %rax
sldtw %ax
pushq %rax
## UINT64 RFlags;
pushq 40(%rbp)
## UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
movq %cr8, %rax
pushq %rax
movq %cr4, %rax
orq $0x208, %rax
movq %rax, %cr4
pushq %rax
movq %cr3, %rax
pushq %rax
movq %cr2, %rax
pushq %rax
xorq %rax, %rax
pushq %rax
movq %cr0, %rax
pushq %rax
## UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
movq %dr7, %rax
pushq %rax
movq %dr6, %rax
pushq %rax
movq %dr3, %rax
pushq %rax
movq %dr2, %rax
pushq %rax
movq %dr1, %rax
pushq %rax
movq %dr0, %rax
pushq %rax
## FX_SAVE_STATE_X64 FxSaveState;
subq $512, %rsp
movq %rsp, %rdi
.byte 0xf, 0xae, 0x7 # fxsave [rdi]
# UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear
cld
## UINT32 ExceptionData;
pushq 16(%rbp)
## call into exception handler
movq 8(%rbp), %rcx
movabsq $ASM_PFX(SmiPFHandler), %rax
## Prepare parameter and call
movq %rsp, %rdx
#
# Per X64 calling convention, allocate maximum parameter stack space
# and make sure RSP is 16-byte aligned
#
subq $4 * 8 + 8, %rsp
call *%rax
addq $4 * 8 + 8, %rsp
jmp L5
L5:
## UINT64 ExceptionData;
addq $8, %rsp
## FX_SAVE_STATE_X64 FxSaveState;
movq %rsp, %rsi
.byte 0xf, 0xae, 0xe # fxrstor [rsi]
addq $512, %rsp
## UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
## Skip restoration of DRx registers to support debuggers
## that set breakpoints in interrupt/exception context
addq $8 * 6, %rsp
## UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
popq %rax
movq %rax, %cr0
addq $8, %rsp # not for Cr1
popq %rax
movq %rax, %cr2
popq %rax
movq %rax, %cr3
popq %rax
movq %rax, %cr4
popq %rax
movq %rax, %cr8
## UINT64 RFlags;
popq 40(%rbp)
## UINT64 Ldtr, Tr;
## UINT64 Gdtr[2], Idtr[2];
## Best not let anyone mess with these particular registers...
addq $48, %rsp
## UINT64 Rip;
popq 24(%rbp)
## UINT64 Gs, Fs, Es, Ds, Cs, Ss;
popq %rax
# mov gs, rax ; not for gs
popq %rax
# mov fs, rax ; not for fs
# (X64 will not use fs and gs, so we do not restore it)
popq %rax
movq %rax, %es
popq %rax
movq %rax, %ds
popq 32(%rbp) # for cs
popq 56(%rbp) # for ss
## UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
## UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
popq %rdi
popq %rsi
addq $8, %rsp # not for rbp
popq 48(%rbp) # for rsp
popq %rbx
popq %rdx
popq %rcx
popq %rax
popq %r8
popq %r9
popq %r10
popq %r11
popq %r12
popq %r13
popq %r14
popq %r15
movq %rbp, %rsp
# Enable TF bit after page fault handler runs
btsl $8, 40(%rsp) #RFLAGS
popq %rbp
addq $16, %rsp # skip INT# & ErrCode
iretq

View File

@@ -1,383 +0,0 @@
;------------------------------------------------------------------------------ ;
; Copyright (c) 2009 - 2016, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php.
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;
; Module Name:
;
; SmiException.asm
;
; Abstract:
;
; Exception handlers used in SM mode
;
;-------------------------------------------------------------------------------
EXTERNDEF SmiPFHandler:PROC
EXTERNDEF gcSmiIdtr:FWORD
EXTERNDEF gcSmiGdtr:FWORD
EXTERNDEF gcPsd:BYTE
.const
NullSeg DQ 0 ; reserved by architecture
CodeSeg32 LABEL QWORD
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 9bh
DB 0cfh ; LimitHigh
DB 0 ; BaseHigh
ProtModeCodeSeg32 LABEL QWORD
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 9bh
DB 0cfh ; LimitHigh
DB 0 ; BaseHigh
ProtModeSsSeg32 LABEL QWORD
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 93h
DB 0cfh ; LimitHigh
DB 0 ; BaseHigh
DataSeg32 LABEL QWORD
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 93h
DB 0cfh ; LimitHigh
DB 0 ; BaseHigh
CodeSeg16 LABEL QWORD
DW -1
DW 0
DB 0
DB 9bh
DB 8fh
DB 0
DataSeg16 LABEL QWORD
DW -1
DW 0
DB 0
DB 93h
DB 8fh
DB 0
CodeSeg64 LABEL QWORD
DW -1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 9bh
DB 0afh ; LimitHigh
DB 0 ; BaseHigh
; TSS Segment for X64 specially
TssSeg LABEL QWORD
DW TSS_DESC_SIZE - 1 ; LimitLow
DW 0 ; BaseLow
DB 0 ; BaseMid
DB 89h
DB 00h ; LimitHigh
DB 0 ; BaseHigh
DD 0 ; BaseUpper
DD 0 ; Reserved
GDT_SIZE = $ - offset NullSeg
; Create TSS Descriptor just after GDT
TssDescriptor LABEL BYTE
DD 0 ; Reserved
DQ 0 ; RSP0
DQ 0 ; RSP1
DQ 0 ; RSP2
DD 0 ; Reserved
DD 0 ; Reserved
DQ 0 ; IST1
DQ 0 ; IST2
DQ 0 ; IST3
DQ 0 ; IST4
DQ 0 ; IST5
DQ 0 ; IST6
DQ 0 ; IST7
DD 0 ; Reserved
DD 0 ; Reserved
DW 0 ; Reserved
DW 0 ; I/O Map Base Address
TSS_DESC_SIZE = $ - offset TssDescriptor
;
; This structure serves as a template for all processors.
;
gcPsd LABEL BYTE
DB 'PSDSIG '
DW PSD_SIZE
DW 2
DW 1 SHL 2
DW CODE_SEL
DW DATA_SEL
DW DATA_SEL
DW DATA_SEL
DW 0
DQ 0
DQ 0
DQ 0 ; fixed in InitializeMpServiceData()
DQ offset NullSeg
DD GDT_SIZE
DD 0
DB 24 dup (0)
DQ 0
PSD_SIZE = $ - offset gcPsd
;
; CODE & DATA segments for SMM runtime
;
CODE_SEL = offset CodeSeg64 - offset NullSeg
DATA_SEL = offset DataSeg32 - offset NullSeg
CODE32_SEL = offset CodeSeg32 - offset NullSeg
gcSmiGdtr LABEL FWORD
DW GDT_SIZE - 1
DQ offset NullSeg
gcSmiIdtr LABEL FWORD
DW 0
DQ 0
.code
;------------------------------------------------------------------------------
; _SmiExceptionEntryPoints is the collection of exception entry points followed
; by a common exception handler.
;
; Stack frame would be as follows as specified in IA32 manuals:
;
; +---------------------+ <-- 16-byte aligned ensured by processor
; + Old SS +
; +---------------------+
; + Old RSP +
; +---------------------+
; + RFlags +
; +---------------------+
; + CS +
; +---------------------+
; + RIP +
; +---------------------+
; + Error Code +
; +---------------------+
; + Vector Number +
; +---------------------+
; + RBP +
; +---------------------+ <-- RBP, 16-byte aligned
;
; RSP set to odd multiple of 8 at @CommonEntryPoint means ErrCode PRESENT
;------------------------------------------------------------------------------
PageFaultIdtHandlerSmmProfile PROC
push 0eh ; Page Fault
test spl, 8 ; odd multiple of 8 => ErrCode present
jnz @F
push [rsp] ; duplicate INT# if no ErrCode
mov qword ptr [rsp + 8], 0
@@:
push rbp
mov rbp, rsp
;
; Since here the stack pointer is 16-byte aligned, so
; EFI_FX_SAVE_STATE_X64 of EFI_SYSTEM_CONTEXT_x64
; is 16-byte aligned
;
;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
push r15
push r14
push r13
push r12
push r11
push r10
push r9
push r8
push rax
push rcx
push rdx
push rbx
push qword ptr [rbp + 48] ; RSP
push qword ptr [rbp] ; RBP
push rsi
push rdi
;; UINT64 Gs, Fs, Es, Ds, Cs, Ss; insure high 16 bits of each is zero
movzx rax, word ptr [rbp + 56]
push rax ; for ss
movzx rax, word ptr [rbp + 32]
push rax ; for cs
mov rax, ds
push rax
mov rax, es
push rax
mov rax, fs
push rax
mov rax, gs
push rax
;; UINT64 Rip;
push qword ptr [rbp + 24]
;; UINT64 Gdtr[2], Idtr[2];
sub rsp, 16
sidt fword ptr [rsp]
sub rsp, 16
sgdt fword ptr [rsp]
;; UINT64 Ldtr, Tr;
xor rax, rax
str ax
push rax
sldt ax
push rax
;; UINT64 RFlags;
push qword ptr [rbp + 40]
;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
mov rax, cr8
push rax
mov rax, cr4
or rax, 208h
mov cr4, rax
push rax
mov rax, cr3
push rax
mov rax, cr2
push rax
xor rax, rax
push rax
mov rax, cr0
push rax
;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
mov rax, dr7
push rax
mov rax, dr6
push rax
mov rax, dr3
push rax
mov rax, dr2
push rax
mov rax, dr1
push rax
mov rax, dr0
push rax
;; FX_SAVE_STATE_X64 FxSaveState;
sub rsp, 512
mov rdi, rsp
db 0fh, 0aeh, 00000111y ;fxsave [rdi]
; UEFI calling convention for x64 requires that Direction flag in EFLAGs is clear
cld
;; UINT32 ExceptionData;
push qword ptr [rbp + 16]
;; call into exception handler
mov rcx, [rbp + 8]
mov rax, SmiPFHandler
;; Prepare parameter and call
mov rdx, rsp
;
; Per X64 calling convention, allocate maximum parameter stack space
; and make sure RSP is 16-byte aligned
;
sub rsp, 4 * 8 + 8
call rax
add rsp, 4 * 8 + 8
jmp @F
@@:
;; UINT64 ExceptionData;
add rsp, 8
;; FX_SAVE_STATE_X64 FxSaveState;
mov rsi, rsp
db 0fh, 0aeh, 00001110y ; fxrstor [rsi]
add rsp, 512
;; UINT64 Dr0, Dr1, Dr2, Dr3, Dr6, Dr7;
;; Skip restoration of DRx registers to support debuggers
;; that set breakpoints in interrupt/exception context
add rsp, 8 * 6
;; UINT64 Cr0, Cr1, Cr2, Cr3, Cr4, Cr8;
pop rax
mov cr0, rax
add rsp, 8 ; not for Cr1
pop rax
mov cr2, rax
pop rax
mov cr3, rax
pop rax
mov cr4, rax
pop rax
mov cr8, rax
;; UINT64 RFlags;
pop qword ptr [rbp + 40]
;; UINT64 Ldtr, Tr;
;; UINT64 Gdtr[2], Idtr[2];
;; Best not let anyone mess with these particular registers...
add rsp, 48
;; UINT64 Rip;
pop qword ptr [rbp + 24]
;; UINT64 Gs, Fs, Es, Ds, Cs, Ss;
pop rax
; mov gs, rax ; not for gs
pop rax
; mov fs, rax ; not for fs
; (X64 will not use fs and gs, so we do not restore it)
pop rax
mov es, rax
pop rax
mov ds, rax
pop qword ptr [rbp + 32] ; for cs
pop qword ptr [rbp + 56] ; for ss
;; UINT64 Rdi, Rsi, Rbp, Rsp, Rbx, Rdx, Rcx, Rax;
;; UINT64 R8, R9, R10, R11, R12, R13, R14, R15;
pop rdi
pop rsi
add rsp, 8 ; not for rbp
pop qword ptr [rbp + 48] ; for rsp
pop rbx
pop rdx
pop rcx
pop rax
pop r8
pop r9
pop r10
pop r11
pop r12
pop r13
pop r14
pop r15
mov rsp, rbp
; Enable TF bit after page fault handler runs
bts dword ptr [rsp + 40], 8 ;RFLAGS
pop rbp
add rsp, 16 ; skip INT# & ErrCode
iretq
PageFaultIdtHandlerSmmProfile ENDP
END

View File

@@ -1,141 +0,0 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php.
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
# Module Name:
#
# SmmInit.S
#
# Abstract:
#
# Functions for relocating SMBASE's for all processors
#
#------------------------------------------------------------------------------
ASM_GLOBAL ASM_PFX(gSmmCr0)
ASM_GLOBAL ASM_PFX(gSmmCr3)
ASM_GLOBAL ASM_PFX(gSmmCr4)
ASM_GLOBAL ASM_PFX(gSmmJmpAddr)
ASM_GLOBAL ASM_PFX(gcSmmInitTemplate)
ASM_GLOBAL ASM_PFX(gcSmmInitSize)
ASM_GLOBAL ASM_PFX(mRebasedFlagAddr32)
ASM_GLOBAL ASM_PFX(SmmRelocationSemaphoreComplete)
ASM_GLOBAL ASM_PFX(SmmRelocationSemaphoreComplete32)
ASM_GLOBAL ASM_PFX(mSmmRelocationOriginalAddressPtr32)
ASM_GLOBAL ASM_PFX(gSmmInitStack)
ASM_GLOBAL ASM_PFX(gcSmiInitGdtr)
.text
ASM_PFX(gcSmiInitGdtr):
.word 0
.quad 0
SmmStartup:
.byte 0x66,0xb8 # mov eax, imm32
ASM_PFX(gSmmCr3): .space 4
movq %rax, %cr3
.byte 0x66,0x2e
lgdt (ASM_PFX(gcSmiInitGdtr) - SmmStartup)(%ebp)
.byte 0x66,0xb8 # mov eax, imm32
ASM_PFX(gSmmCr4): .space 4
orb $2, %ah # enable XMM registers access
movq %rax, %cr4
.byte 0x66
movl $0xc0000080,%ecx # IA32_EFER MSR
rdmsr
orb $1,%ah # set LME bit
wrmsr
.byte 0x66,0xb8 # mov eax, imm32
ASM_PFX(gSmmCr0): .space 4
movq %rax, %cr0
.byte 0x66,0xea # far jmp to long mode
ASM_PFX(gSmmJmpAddr): .quad LongMode
LongMode: # long-mode starts here
.byte 0x48,0xbc # mov rsp, imm64
ASM_PFX(gSmmInitStack): .space 8
andw $0xfff0, %sp # make sure RSP is 16-byte aligned
#
# Accoring to X64 calling convention, XMM0~5 are volatile, we need to save
# them before calling C-function.
#
subq $0x60, %rsp
movdqa %xmm0, 0x0(%rsp)
movdqa %xmm1, 0x10(%rsp)
movdqa %xmm2, 0x20(%rsp)
movdqa %xmm3, 0x30(%rsp)
movdqa %xmm4, 0x40(%rsp)
movdqa %xmm5, 0x50(%rsp)
addq $-0x20, %rsp
call ASM_PFX(SmmInitHandler)
addq $0x20, %rsp
#
# Restore XMM0~5 after calling C-function.
#
movdqa 0x0(%rsp), %xmm0
movdqa 0x10(%rsp), %xmm1
movdqa 0x20(%rsp), %xmm2
movdqa 0x30(%rsp), %xmm3
movdqa 0x40(%rsp), %xmm4
movdqa 0x50(%rsp), %xmm5
rsm
ASM_PFX(gcSmmInitTemplate):
_SmmInitTemplate:
.byte 0x66,0x2e,0x8b,0x2e # mov ebp, cs:[@F]
.word L1 - _SmmInitTemplate + 0x8000
.byte 0x66, 0x81, 0xed, 0, 0, 3, 0 # sub ebp, 0x30000
jmp *%bp # jmp ebp actually
L1:
.quad SmmStartup
ASM_PFX(gcSmmInitSize): .word . - ASM_PFX(gcSmmInitTemplate)
ASM_PFX(SmmRelocationSemaphoreComplete):
# Create a simple stack frame to store RAX and the original RSM location
pushq %rax # Used to store return address
pushq %rax
# Load the original RSM location onto stack
movabsq $ASM_PFX(mSmmRelocationOriginalAddress), %rax
movq (%rax), %rax
movq %rax, 0x08(%rsp)
# Update rebase flag
movabsq $ASM_PFX(mRebasedFlag), %rax
movq (%rax), %rax
movb $1, (%rax)
#restore RAX and return to original RSM location
popq %rax
retq
#
# Semaphore code running in 32-bit mode
#
ASM_PFX(SmmRelocationSemaphoreComplete32):
#
# movb $1, ()
#
.byte 0xc6, 0x05
ASM_PFX(mRebasedFlagAddr32):
.long 0
.byte 1
#
# jmpd ()
#
.byte 0xff, 0x25
ASM_PFX(mSmmRelocationOriginalAddressPtr32):
.long 0

View File

@@ -1,132 +0,0 @@
;------------------------------------------------------------------------------ ;
; Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php.
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;
; Module Name:
;
; SmmInit.Asm
;
; Abstract:
;
; Functions for relocating SMBASE's for all processors
;
;-------------------------------------------------------------------------------
EXTERNDEF SmmInitHandler:PROC
EXTERNDEF gSmmCr0:DWORD
EXTERNDEF gSmmCr3:DWORD
EXTERNDEF gSmmCr4:DWORD
EXTERNDEF gSmmJmpAddr:QWORD
EXTERNDEF gcSmmInitTemplate:BYTE
EXTERNDEF gcSmmInitSize:WORD
EXTERNDEF mRebasedFlag:PTR BYTE
EXTERNDEF mSmmRelocationOriginalAddress:QWORD
EXTERNDEF mRebasedFlagAddr32:DWORD
EXTERNDEF mSmmRelocationOriginalAddressPtr32:DWORD
EXTERNDEF gSmmInitStack:QWORD
EXTERNDEF gcSmiInitGdtr:FWORD
.code
gcSmiInitGdtr LABEL FWORD
DW 0
DQ 0
SmmStartup PROC
DB 66h, 0b8h ; mov eax, imm32
gSmmCr3 DD ?
mov cr3, rax
DB 66h, 2eh
lgdt fword ptr [ebp + (offset gcSmiInitGdtr - SmmStartup)]
DB 66h, 0b8h ; mov eax, imm32
gSmmCr4 DD ?
or ah, 2 ; enable XMM registers access
mov cr4, rax
DB 66h
mov ecx, 0c0000080h ; IA32_EFER MSR
rdmsr
or ah, 1 ; set LME bit
wrmsr
DB 66h, 0b8h ; mov eax, imm32
gSmmCr0 DD ?
mov cr0, rax ; enable protected mode & paging
DB 66h, 0eah ; far jmp to long mode
gSmmJmpAddr DQ @LongMode
@LongMode: ; long-mode starts here
DB 48h, 0bch ; mov rsp, imm64
gSmmInitStack DQ ?
and sp, 0fff0h ; make sure RSP is 16-byte aligned
;
; Accoring to X64 calling convention, XMM0~5 are volatile, we need to save
; them before calling C-function.
;
sub rsp, 60h
movdqa [rsp], xmm0
movdqa [rsp + 10h], xmm1
movdqa [rsp + 20h], xmm2
movdqa [rsp + 30h], xmm3
movdqa [rsp + 40h], xmm4
movdqa [rsp + 50h], xmm5
add rsp, -20h
call SmmInitHandler
add rsp, 20h
;
; Restore XMM0~5 after calling C-function.
;
movdqa xmm0, [rsp]
movdqa xmm1, [rsp + 10h]
movdqa xmm2, [rsp + 20h]
movdqa xmm3, [rsp + 30h]
movdqa xmm4, [rsp + 40h]
movdqa xmm5, [rsp + 50h]
rsm
SmmStartup ENDP
gcSmmInitTemplate LABEL BYTE
_SmmInitTemplate PROC
DB 66h, 2eh, 8bh, 2eh ; mov ebp, cs:[@F]
DW @L1 - _SmmInitTemplate + 8000h
DB 66h, 81h, 0edh, 00h, 00h, 03h, 00 ; sub ebp, 30000h
jmp bp ; jmp ebp actually
@L1:
DQ SmmStartup
_SmmInitTemplate ENDP
gcSmmInitSize DW $ - gcSmmInitTemplate
SmmRelocationSemaphoreComplete PROC
push rax
mov rax, mRebasedFlag
mov byte ptr [rax], 1
pop rax
jmp [mSmmRelocationOriginalAddress]
SmmRelocationSemaphoreComplete ENDP
;
; Semaphore code running in 32-bit mode
;
SmmRelocationSemaphoreComplete32 PROC
;
; mov byte ptr [], 1
;
db 0c6h, 05h
mRebasedFlagAddr32 dd 0
db 1
;
; jmp dword ptr []
;
db 0ffh, 25h
mSmmRelocationOriginalAddressPtr32 dd 0
SmmRelocationSemaphoreComplete32 ENDP
END