UefiCpuPkg: Apply uncrustify changes

REF: https://bugzilla.tianocore.org/show_bug.cgi?id=3737

Apply uncrustify changes to .c/.h files in the UefiCpuPkg package

Cc: Andrew Fish <afish@apple.com>
Cc: Leif Lindholm <leif@nuviainc.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Signed-off-by: Michael Kubacki <michael.kubacki@microsoft.com>
Reviewed-by: Ray Ni <ray.ni@intel.com>
This commit is contained in:
Michael Kubacki
2021-12-05 14:54:17 -08:00
committed by mergify[bot]
parent 91415a36ae
commit 053e878bfb
143 changed files with 14130 additions and 13035 deletions

File diff suppressed because it is too large Load Diff

View File

@@ -9,8 +9,8 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
#include "PiSmmCpuDxeSmm.h"
X86_ASSEMBLY_PATCH_LABEL gPatchSmmRelocationOriginalAddressPtr32;
X86_ASSEMBLY_PATCH_LABEL gPatchRebasedFlagAddr32;
X86_ASSEMBLY_PATCH_LABEL gPatchSmmRelocationOriginalAddressPtr32;
X86_ASSEMBLY_PATCH_LABEL gPatchRebasedFlagAddr32;
UINTN mSmmRelocationOriginalAddress;
volatile BOOLEAN *mRebasedFlag;
@@ -42,14 +42,14 @@ SemaphoreHook (
SMRAM_SAVE_STATE_MAP *CpuState;
UINTN TempValue;
mRebasedFlag = RebasedFlag;
mRebasedFlag = RebasedFlag;
PatchInstructionX86 (
gPatchRebasedFlagAddr32,
(UINT32)(UINTN)mRebasedFlag,
4
);
CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
CpuState = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET);
mSmmRelocationOriginalAddress = HookReturnFromSmm (
CpuIndex,
CpuState,

View File

@@ -8,18 +8,18 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
#include "PiSmmCpuDxeSmm.h"
EFI_PHYSICAL_ADDRESS mGdtBuffer;
UINTN mGdtBufferSize;
EFI_PHYSICAL_ADDRESS mGdtBuffer;
UINTN mGdtBufferSize;
extern BOOLEAN mCetSupported;
extern UINTN mSmmShadowStackSize;
extern BOOLEAN mCetSupported;
extern UINTN mSmmShadowStackSize;
X86_ASSEMBLY_PATCH_LABEL mPatchCetPl0Ssp;
X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSsp;
X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSspTable;
UINT32 mCetPl0Ssp;
UINT32 mCetInterruptSsp;
UINT32 mCetInterruptSspTable;
X86_ASSEMBLY_PATCH_LABEL mPatchCetPl0Ssp;
X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSsp;
X86_ASSEMBLY_PATCH_LABEL mPatchCetInterruptSspTable;
UINT32 mCetPl0Ssp;
UINT32 mCetInterruptSsp;
UINT32 mCetInterruptSspTable;
UINTN mSmmInterruptSspTables;
@@ -33,14 +33,14 @@ UINTN mSmmInterruptSspTables;
VOID
EFIAPI
InitializeIdtIst (
IN EFI_EXCEPTION_TYPE ExceptionType,
IN UINT8 Ist
IN EFI_EXCEPTION_TYPE ExceptionType,
IN UINT8 Ist
)
{
IA32_IDT_GATE_DESCRIPTOR *IdtGate;
IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
IdtGate += ExceptionType;
IdtGate = (IA32_IDT_GATE_DESCRIPTOR *)gcSmiIdtr.Base;
IdtGate += ExceptionType;
IdtGate->Bits.Reserved_0 = Ist;
}
@@ -59,34 +59,34 @@ InitGdt (
OUT UINTN *GdtStepSize
)
{
UINTN Index;
IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
UINTN TssBase;
UINTN GdtTssTableSize;
UINT8 *GdtTssTables;
UINTN GdtTableStepSize;
UINTN Index;
IA32_SEGMENT_DESCRIPTOR *GdtDescriptor;
UINTN TssBase;
UINTN GdtTssTableSize;
UINT8 *GdtTssTables;
UINTN GdtTableStepSize;
//
// For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention
// on each SMI entry.
//
GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned
mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
GdtTssTables = (UINT8*)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
mGdtBufferSize = GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus;
GdtTssTables = (UINT8 *)AllocateCodePages (EFI_SIZE_TO_PAGES (mGdtBufferSize));
ASSERT (GdtTssTables != NULL);
mGdtBuffer = (UINTN)GdtTssTables;
mGdtBuffer = (UINTN)GdtTssTables;
GdtTableStepSize = GdtTssTableSize;
for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) {
CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID *)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE);
//
// Fixup TSS descriptors
//
TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;
GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);
TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1);
GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2;
GdtDescriptor->Bits.BaseLow = (UINT16)(UINTN)TssBase;
GdtDescriptor->Bits.BaseMid = (UINT8)((UINTN)TssBase >> 16);
GdtDescriptor->Bits.BaseHigh = (UINT8)((UINTN)TssBase >> 24);
if ((FeaturePcdGet (PcdCpuSmmStackGuard)) || ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported)) {
@@ -118,15 +118,17 @@ GetProtectedModeCS (
AsmReadGdtr (&GdtrDesc);
GdtEntryCount = (GdtrDesc.Limit + 1) / sizeof (IA32_SEGMENT_DESCRIPTOR);
GdtEntry = (IA32_SEGMENT_DESCRIPTOR *) GdtrDesc.Base;
GdtEntry = (IA32_SEGMENT_DESCRIPTOR *)GdtrDesc.Base;
for (Index = 0; Index < GdtEntryCount; Index++) {
if (GdtEntry->Bits.L == 0) {
if (GdtEntry->Bits.Type > 8 && GdtEntry->Bits.DB == 1) {
if ((GdtEntry->Bits.Type > 8) && (GdtEntry->Bits.DB == 1)) {
break;
}
}
GdtEntry++;
}
ASSERT (Index != GdtEntryCount);
return Index * 8;
}
@@ -171,9 +173,9 @@ InitShadowStack (
IN VOID *ShadowStack
)
{
UINTN SmmShadowStackSize;
UINT64 *InterruptSspTable;
UINT32 InterruptSsp;
UINTN SmmShadowStackSize;
UINT64 *InterruptSspTable;
UINT32 InterruptSsp;
if ((PcdGet32 (PcdControlFlowEnforcementPropertyMask) != 0) && mCetSupported) {
SmmShadowStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmShadowStackSize)));
@@ -188,14 +190,15 @@ InitShadowStack (
//
SmmShadowStackSize += EFI_PAGES_TO_SIZE (1);
}
mCetPl0Ssp = (UINT32)((UINTN)ShadowStack + SmmShadowStackSize - sizeof(UINT64));
mCetPl0Ssp = (UINT32)((UINTN)ShadowStack + SmmShadowStackSize - sizeof (UINT64));
PatchInstructionX86 (mPatchCetPl0Ssp, mCetPl0Ssp, 4);
DEBUG ((DEBUG_INFO, "mCetPl0Ssp - 0x%x\n", mCetPl0Ssp));
DEBUG ((DEBUG_INFO, "ShadowStack - 0x%x\n", ShadowStack));
DEBUG ((DEBUG_INFO, " SmmShadowStackSize - 0x%x\n", SmmShadowStackSize));
if (mSmmInterruptSspTables == 0) {
mSmmInterruptSspTables = (UINTN)AllocateZeroPool(sizeof(UINT64) * 8 * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
mSmmInterruptSspTables = (UINTN)AllocateZeroPool (sizeof (UINT64) * 8 * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus);
ASSERT (mSmmInterruptSspTables != 0);
DEBUG ((DEBUG_INFO, "mSmmInterruptSspTables - 0x%x\n", mSmmInterruptSspTables));
}
@@ -209,17 +212,16 @@ InitShadowStack (
// Please refer to UefiCpuPkg/Library/CpuExceptionHandlerLib/X64 for the full stack frame at runtime.
// According to SDM (ver. 075 June 2021), shadow stack should be 32 bytes aligned.
//
InterruptSsp = (UINT32)(((UINTN)ShadowStack + EFI_PAGES_TO_SIZE(1) - (sizeof(UINT64) * 4)) & ~0x1f);
*(UINT64 *)(UINTN)InterruptSsp = (InterruptSsp - sizeof(UINT64) * 4) | 0x2;
mCetInterruptSsp = InterruptSsp - sizeof(UINT64);
InterruptSsp = (UINT32)(((UINTN)ShadowStack + EFI_PAGES_TO_SIZE (1) - (sizeof (UINT64) * 4)) & ~0x1f);
*(UINT64 *)(UINTN)InterruptSsp = (InterruptSsp - sizeof (UINT64) * 4) | 0x2;
mCetInterruptSsp = InterruptSsp - sizeof (UINT64);
mCetInterruptSspTable = (UINT32)(UINTN)(mSmmInterruptSspTables + sizeof(UINT64) * 8 * CpuIndex);
InterruptSspTable = (UINT64 *)(UINTN)mCetInterruptSspTable;
InterruptSspTable[1] = mCetInterruptSsp;
mCetInterruptSspTable = (UINT32)(UINTN)(mSmmInterruptSspTables + sizeof (UINT64) * 8 * CpuIndex);
InterruptSspTable = (UINT64 *)(UINTN)mCetInterruptSspTable;
InterruptSspTable[1] = mCetInterruptSsp;
PatchInstructionX86 (mPatchCetInterruptSsp, mCetInterruptSsp, 4);
PatchInstructionX86 (mPatchCetInterruptSspTable, mCetInterruptSspTable, 4);
DEBUG ((DEBUG_INFO, "mCetInterruptSsp - 0x%x\n", mCetInterruptSsp));
DEBUG ((DEBUG_INFO, "mCetInterruptSspTable - 0x%x\n", mCetInterruptSspTable));
}
}

View File

@@ -14,17 +14,17 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
//
// Current page index.
//
UINTN mPFPageIndex;
UINTN mPFPageIndex;
//
// Pool for dynamically creating page table in page fault handler.
//
UINT64 mPFPageBuffer;
UINT64 mPFPageBuffer;
//
// Store the uplink information for each page being used.
//
UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];
UINT64 *mPFPageUplink[MAX_PF_PAGE_COUNT];
/**
Create SMM page table for S3 path.
@@ -35,8 +35,8 @@ InitSmmS3Cr3 (
VOID
)
{
EFI_PHYSICAL_ADDRESS Pages;
UINT64 *PTEntry;
EFI_PHYSICAL_ADDRESS Pages;
UINT64 *PTEntry;
//
// Generate PAE page table for the first 4GB memory space
@@ -46,7 +46,7 @@ InitSmmS3Cr3 (
//
// Fill Page-Table-Level4 (PML4) entry
//
PTEntry = (UINT64*)AllocatePageTableMemory (1);
PTEntry = (UINT64 *)AllocatePageTableMemory (1);
ASSERT (PTEntry != NULL);
*PTEntry = Pages | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
ZeroMem (PTEntry + 1, EFI_PAGE_SIZE - sizeof (*PTEntry));
@@ -56,7 +56,7 @@ InitSmmS3Cr3 (
//
mSmmS3ResumeState->SmmS3Cr3 = (UINT32)(UINTN)PTEntry;
return ;
return;
}
/**
@@ -68,7 +68,7 @@ InitPagesForPFHandler (
VOID
)
{
VOID *Address;
VOID *Address;
//
// Pre-Allocate memory for page fault handler
@@ -77,9 +77,9 @@ InitPagesForPFHandler (
Address = AllocatePages (MAX_PF_PAGE_COUNT);
ASSERT (Address != NULL);
mPFPageBuffer = (UINT64)(UINTN) Address;
mPFPageIndex = 0;
ZeroMem ((VOID *) (UINTN) mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);
mPFPageBuffer = (UINT64)(UINTN)Address;
mPFPageIndex = 0;
ZeroMem ((VOID *)(UINTN)mPFPageBuffer, EFI_PAGE_SIZE * MAX_PF_PAGE_COUNT);
ZeroMem (mPFPageUplink, sizeof (mPFPageUplink));
return;
@@ -93,16 +93,16 @@ InitPagesForPFHandler (
**/
VOID
AcquirePage (
UINT64 *Uplink
UINT64 *Uplink
)
{
UINT64 Address;
UINT64 Address;
//
// Get the buffer
//
Address = mPFPageBuffer + EFI_PAGES_TO_SIZE (mPFPageIndex);
ZeroMem ((VOID *) (UINTN) Address, EFI_PAGE_SIZE);
ZeroMem ((VOID *)(UINTN)Address, EFI_PAGE_SIZE);
//
// Cut the previous uplink if it exists and wasn't overwritten
@@ -114,7 +114,7 @@ AcquirePage (
//
// Link & Record the current uplink
//
*Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
*Uplink = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
mPFPageUplink[mPFPageIndex] = Uplink;
mPFPageIndex = (mPFPageIndex + 1) % MAX_PF_PAGE_COUNT;
@@ -134,26 +134,26 @@ AcquirePage (
**/
VOID
RestorePageTableAbove4G (
UINT64 *PageTable,
UINT64 PFAddress,
UINTN CpuIndex,
UINTN ErrorCode,
BOOLEAN *IsValidPFAddress
UINT64 *PageTable,
UINT64 PFAddress,
UINTN CpuIndex,
UINTN ErrorCode,
BOOLEAN *IsValidPFAddress
)
{
UINTN PTIndex;
UINT64 Address;
BOOLEAN Nx;
BOOLEAN Existed;
UINTN Index;
UINTN PFIndex;
IA32_CR4 Cr4;
BOOLEAN Enable5LevelPaging;
UINTN PTIndex;
UINT64 Address;
BOOLEAN Nx;
BOOLEAN Existed;
UINTN Index;
UINTN PFIndex;
IA32_CR4 Cr4;
BOOLEAN Enable5LevelPaging;
ASSERT ((PageTable != NULL) && (IsValidPFAddress != NULL));
Cr4.UintN = AsmReadCr4 ();
Enable5LevelPaging = (BOOLEAN) (Cr4.Bits.LA57 == 1);
Cr4.UintN = AsmReadCr4 ();
Enable5LevelPaging = (BOOLEAN)(Cr4.Bits.LA57 == 1);
//
// If page fault address is 4GB above.
@@ -164,26 +164,28 @@ RestorePageTableAbove4G (
// If it exists in page table but page fault is generated,
// there are 2 possible reasons: 1. present flag is set to 0; 2. instruction fetch in protected memory range.
//
Existed = FALSE;
PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
PTIndex = 0;
Existed = FALSE;
PageTable = (UINT64 *)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
PTIndex = 0;
if (Enable5LevelPaging) {
PTIndex = BitFieldRead64 (PFAddress, 48, 56);
}
if ((!Enable5LevelPaging) || ((PageTable[PTIndex] & IA32_PG_P) != 0)) {
// PML5E
if (Enable5LevelPaging) {
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
}
PTIndex = BitFieldRead64 (PFAddress, 39, 47);
if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
// PML4E
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PTIndex = BitFieldRead64 (PFAddress, 30, 38);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PTIndex = BitFieldRead64 (PFAddress, 30, 38);
if ((PageTable[PTIndex] & IA32_PG_P) != 0) {
// PDPTE
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PTIndex = BitFieldRead64 (PFAddress, 21, 29);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PTIndex = BitFieldRead64 (PFAddress, 21, 29);
// PD
if ((PageTable[PTIndex] & IA32_PG_PS) != 0) {
//
@@ -197,7 +199,7 @@ RestorePageTableAbove4G (
//
// 4KB page
//
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask& PHYSICAL_ADDRESS_MASK);
if (PageTable != 0) {
//
// When there is a valid entry to map to 4KB page, need not create a new entry to map 2MB.
@@ -217,7 +219,6 @@ RestorePageTableAbove4G (
// If page entry does not existed in page table at all, create a new entry.
//
if (!Existed) {
if (IsAddressValid (PFAddress, &Nx)) {
//
// If page fault address above 4GB is in protected range but it causes a page fault exception,
@@ -234,19 +235,20 @@ RestorePageTableAbove4G (
//
// Find the page table entry created just now.
//
PageTable = (UINT64*)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
PageTable = (UINT64 *)(AsmReadCr3 () & PHYSICAL_ADDRESS_MASK);
PFAddress = AsmReadCr2 ();
// PML5E
if (Enable5LevelPaging) {
PTIndex = BitFieldRead64 (PFAddress, 48, 56);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PTIndex = BitFieldRead64 (PFAddress, 48, 56);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
}
// PML4E
PTIndex = BitFieldRead64 (PFAddress, 39, 47);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PTIndex = BitFieldRead64 (PFAddress, 39, 47);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
// PDPTE
PTIndex = BitFieldRead64 (PFAddress, 30, 38);
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PTIndex = BitFieldRead64 (PFAddress, 30, 38);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
// PD
PTIndex = BitFieldRead64 (PFAddress, 21, 29);
Address = PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK;
@@ -257,18 +259,21 @@ RestorePageTableAbove4G (
AcquirePage (&PageTable[PTIndex]);
// PTE
PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
PageTable = (UINT64 *)(UINTN)(PageTable[PTIndex] & ~mAddressEncMask & PHYSICAL_ADDRESS_MASK);
for (Index = 0; Index < 512; Index++) {
PageTable[Index] = Address | mAddressEncMask | PAGE_ATTRIBUTE_BITS;
if (!IsAddressValid (Address, &Nx)) {
PageTable[Index] = PageTable[Index] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
}
if (Nx && mXdSupported) {
PageTable[Index] = PageTable[Index] | IA32_PG_NX;
}
if (Address == (PFAddress & PHYSICAL_ADDRESS_MASK & ~((1ull << 12) - 1))) {
PTIndex = Index;
}
Address += SIZE_4KB;
} // end for PT
} else {
@@ -281,6 +286,7 @@ RestorePageTableAbove4G (
//
PageTable[PTIndex] = PageTable[PTIndex] & (INTN)(INT32)(~PAGE_ATTRIBUTE_BITS);
}
//
// Set XD bit to 1
//
@@ -297,7 +303,7 @@ RestorePageTableAbove4G (
//
ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT);
if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) {
PFIndex = mPFEntryCount[CpuIndex];
PFIndex = mPFEntryCount[CpuIndex];
mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex];
mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex];
mPFEntryCount[CpuIndex]++;
@@ -326,7 +332,7 @@ RestorePageTableAbove4G (
**/
VOID
ClearTrapFlag (
IN OUT EFI_SYSTEM_CONTEXT SystemContext
IN OUT EFI_SYSTEM_CONTEXT SystemContext
)
{
SystemContext.SystemContextX64->Rflags &= (UINTN) ~BIT8;

View File

@@ -12,50 +12,50 @@ SPDX-License-Identifier: BSD-2-Clause-Patent
#pragma pack (1)
typedef struct _MSR_DS_AREA_STRUCT {
UINT64 BTSBufferBase;
UINT64 BTSIndex;
UINT64 BTSAbsoluteMaximum;
UINT64 BTSInterruptThreshold;
UINT64 PEBSBufferBase;
UINT64 PEBSIndex;
UINT64 PEBSAbsoluteMaximum;
UINT64 PEBSInterruptThreshold;
UINT64 PEBSCounterReset[2];
UINT64 Reserved;
UINT64 BTSBufferBase;
UINT64 BTSIndex;
UINT64 BTSAbsoluteMaximum;
UINT64 BTSInterruptThreshold;
UINT64 PEBSBufferBase;
UINT64 PEBSIndex;
UINT64 PEBSAbsoluteMaximum;
UINT64 PEBSInterruptThreshold;
UINT64 PEBSCounterReset[2];
UINT64 Reserved;
} MSR_DS_AREA_STRUCT;
typedef struct _BRANCH_TRACE_RECORD {
UINT64 LastBranchFrom;
UINT64 LastBranchTo;
UINT64 Rsvd0 : 4;
UINT64 BranchPredicted : 1;
UINT64 Rsvd1 : 59;
UINT64 LastBranchFrom;
UINT64 LastBranchTo;
UINT64 Rsvd0 : 4;
UINT64 BranchPredicted : 1;
UINT64 Rsvd1 : 59;
} BRANCH_TRACE_RECORD;
typedef struct _PEBS_RECORD {
UINT64 Rflags;
UINT64 LinearIP;
UINT64 Rax;
UINT64 Rbx;
UINT64 Rcx;
UINT64 Rdx;
UINT64 Rsi;
UINT64 Rdi;
UINT64 Rbp;
UINT64 Rsp;
UINT64 R8;
UINT64 R9;
UINT64 R10;
UINT64 R11;
UINT64 R12;
UINT64 R13;
UINT64 R14;
UINT64 R15;
UINT64 Rflags;
UINT64 LinearIP;
UINT64 Rax;
UINT64 Rbx;
UINT64 Rcx;
UINT64 Rdx;
UINT64 Rsi;
UINT64 Rdi;
UINT64 Rbp;
UINT64 Rsp;
UINT64 R8;
UINT64 R9;
UINT64 R10;
UINT64 R11;
UINT64 R12;
UINT64 R13;
UINT64 R14;
UINT64 R15;
} PEBS_RECORD;
#pragma pack ()
#define PHYSICAL_ADDRESS_MASK ((1ull << 52) - SIZE_4KB)
#define PHYSICAL_ADDRESS_MASK ((1ull << 52) - SIZE_4KB)
/**
Update page table to map the memory correctly in order to make the instruction
@@ -71,11 +71,11 @@ typedef struct _PEBS_RECORD {
**/
VOID
RestorePageTableAbove4G (
UINT64 *PageTable,
UINT64 PFAddress,
UINTN CpuIndex,
UINTN ErrorCode,
BOOLEAN *IsValidPFAddress
UINT64 *PageTable,
UINT64 PFAddress,
UINTN CpuIndex,
UINTN ErrorCode,
BOOLEAN *IsValidPFAddress
);
/**