Files
system76-edk2/UefiCpuPkg/PiSmmCpuDxeSmm/X64/SmmInit.nasm
Hao Wu 02f7fd158e UefiCpuPkg/PiSmmCpuDxeSmm: [CVE-2017-5715] Stuff RSB before RSM
REF: https://bugzilla.tianocore.org/show_bug.cgi?id=1093

Return Stack Buffer (RSB) is used to predict the target of RET
instructions. When the RSB underflows, some processors may fall back to
using branch predictors. This might impact software using the retpoline
mitigation strategy on those processors.

This commit will add RSB stuffing logic before returning from SMM (the RSM
instruction) to avoid interfering with non-SMM usage of the retpoline
technique.

After the stuffing, RSB entries will contain a trap like:

@SpecTrap:
    pause
    lfence
    jmp     @SpecTrap

A more detailed explanation of the purpose of commit is under the
'Branch target injection mitigation' section of the below link:
https://software.intel.com/security-software-guidance/insights/host-firmware-speculative-execution-side-channel-mitigation

Please note that this commit requires further actions (BZ 1091) to remove
the duplicated 'StuffRsb.inc' files and merge them into one under a
UefiCpuPkg package-level directory (such as UefiCpuPkg/Include/).

REF: https://bugzilla.tianocore.org/show_bug.cgi?id=1091

Cc: Jiewen Yao <jiewen.yao@intel.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Contributed-under: TianoCore Contribution Agreement 1.1
Signed-off-by: Hao Wu <hao.a.wu@intel.com>
Acked-by: Laszlo Ersek <lersek@redhat.com>
Regression-tested-by: Laszlo Ersek <lersek@redhat.com>
Reviewed-by: Eric Dong <eric.dong@intel.com>
2018-08-21 16:10:42 +08:00

153 lines
4.6 KiB
NASM

;------------------------------------------------------------------------------ ;
; Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.<BR>
; This program and the accompanying materials
; are licensed and made available under the terms and conditions of the BSD License
; which accompanies this distribution. The full text of the license may be found at
; http://opensource.org/licenses/bsd-license.php.
;
; THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
; WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
;
; Module Name:
;
; SmmInit.nasm
;
; Abstract:
;
; Functions for relocating SMBASE's for all processors
;
;-------------------------------------------------------------------------------
%include "StuffRsb.inc"
extern ASM_PFX(SmmInitHandler)
extern ASM_PFX(mRebasedFlag)
extern ASM_PFX(mSmmRelocationOriginalAddress)
global ASM_PFX(gPatchSmmCr3)
global ASM_PFX(gPatchSmmCr4)
global ASM_PFX(gPatchSmmCr0)
global ASM_PFX(gPatchSmmInitStack)
global ASM_PFX(gcSmiInitGdtr)
global ASM_PFX(gcSmmInitSize)
global ASM_PFX(gcSmmInitTemplate)
global ASM_PFX(gPatchRebasedFlagAddr32)
global ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32)
%define LONG_MODE_CS 0x38
DEFAULT REL
SECTION .text
ASM_PFX(gcSmiInitGdtr):
DW 0
DQ 0
global ASM_PFX(SmmStartup)
BITS 16
ASM_PFX(SmmStartup):
mov eax, 0x80000001 ; read capability
cpuid
mov ebx, edx ; rdmsr will change edx. keep it in ebx.
mov eax, strict dword 0 ; source operand will be patched
ASM_PFX(gPatchSmmCr3):
mov cr3, eax
o32 lgdt [cs:ebp + (ASM_PFX(gcSmiInitGdtr) - ASM_PFX(SmmStartup))]
mov eax, strict dword 0 ; source operand will be patched
ASM_PFX(gPatchSmmCr4):
or ah, 2 ; enable XMM registers access
mov cr4, eax
mov ecx, 0xc0000080 ; IA32_EFER MSR
rdmsr
or ah, BIT0 ; set LME bit
test ebx, BIT20 ; check NXE capability
jz .1
or ah, BIT3 ; set NXE bit
.1:
wrmsr
mov eax, strict dword 0 ; source operand will be patched
ASM_PFX(gPatchSmmCr0):
mov cr0, eax ; enable protected mode & paging
jmp LONG_MODE_CS : dword 0 ; offset will be patched to @LongMode
@PatchLongModeOffset:
BITS 64
@LongMode: ; long-mode starts here
mov rsp, strict qword 0 ; source operand will be patched
ASM_PFX(gPatchSmmInitStack):
and sp, 0xfff0 ; make sure RSP is 16-byte aligned
;
; Accoring to X64 calling convention, XMM0~5 are volatile, we need to save
; them before calling C-function.
;
sub rsp, 0x60
movdqa [rsp], xmm0
movdqa [rsp + 0x10], xmm1
movdqa [rsp + 0x20], xmm2
movdqa [rsp + 0x30], xmm3
movdqa [rsp + 0x40], xmm4
movdqa [rsp + 0x50], xmm5
add rsp, -0x20
call ASM_PFX(SmmInitHandler)
add rsp, 0x20
;
; Restore XMM0~5 after calling C-function.
;
movdqa xmm0, [rsp]
movdqa xmm1, [rsp + 0x10]
movdqa xmm2, [rsp + 0x20]
movdqa xmm3, [rsp + 0x30]
movdqa xmm4, [rsp + 0x40]
movdqa xmm5, [rsp + 0x50]
StuffRsb64
rsm
BITS 16
ASM_PFX(gcSmmInitTemplate):
mov ebp, [cs:@L1 - ASM_PFX(gcSmmInitTemplate) + 0x8000]
sub ebp, 0x30000
jmp ebp
@L1:
DQ 0; ASM_PFX(SmmStartup)
ASM_PFX(gcSmmInitSize): DW $ - ASM_PFX(gcSmmInitTemplate)
BITS 64
global ASM_PFX(SmmRelocationSemaphoreComplete)
ASM_PFX(SmmRelocationSemaphoreComplete):
push rax
mov rax, [ASM_PFX(mRebasedFlag)]
mov byte [rax], 1
pop rax
jmp [ASM_PFX(mSmmRelocationOriginalAddress)]
;
; Semaphore code running in 32-bit mode
;
BITS 32
global ASM_PFX(SmmRelocationSemaphoreComplete32)
ASM_PFX(SmmRelocationSemaphoreComplete32):
push eax
mov eax, strict dword 0 ; source operand will be patched
ASM_PFX(gPatchRebasedFlagAddr32):
mov byte [eax], 1
pop eax
jmp dword [dword 0] ; destination will be patched
ASM_PFX(gPatchSmmRelocationOriginalAddressPtr32):
BITS 64
global ASM_PFX(PiSmmCpuSmmInitFixupAddress)
ASM_PFX(PiSmmCpuSmmInitFixupAddress):
lea rax, [@LongMode]
lea rcx, [@PatchLongModeOffset - 6]
mov dword [rcx], eax
lea rax, [ASM_PFX(SmmStartup)]
lea rcx, [@L1]
mov qword [rcx], rax
ret