diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c new file mode 100644 index 0000000000..bbff6e18b4 --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuS3.c @@ -0,0 +1,491 @@ +/** @file +Code for Processor S3 restoration + +Copyright (c) 2006 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#include "PiSmmCpuDxeSmm.h" + +typedef struct { + UINTN Lock; + VOID *StackStart; + UINTN StackSize; + VOID *ApFunction; + IA32_DESCRIPTOR GdtrProfile; + IA32_DESCRIPTOR IdtrProfile; + UINT32 BufferStart; + UINT32 Cr3; +} MP_CPU_EXCHANGE_INFO; + +typedef struct { + UINT8 *RendezvousFunnelAddress; + UINTN PModeEntryOffset; + UINTN FlatJumpOffset; + UINTN Size; + UINTN LModeEntryOffset; + UINTN LongJumpOffset; +} MP_ASSEMBLY_ADDRESS_MAP; + +/** + Get starting address and size of the rendezvous entry for APs. + Information for fixing a jump instruction in the code is also returned. + + @param AddressMap Output buffer for address map information. +**/ +VOID * +EFIAPI +AsmGetAddressMap ( + MP_ASSEMBLY_ADDRESS_MAP *AddressMap + ); + +#define LEGACY_REGION_SIZE (2 * 0x1000) +#define LEGACY_REGION_BASE (0xA0000 - LEGACY_REGION_SIZE) +#define MSR_SPIN_LOCK_INIT_NUM 15 + +ACPI_CPU_DATA mAcpiCpuData; +UINT32 mNumberToFinish; +MP_CPU_EXCHANGE_INFO *mExchangeInfo; +BOOLEAN mRestoreSmmConfigurationInS3 = FALSE; +VOID *mGdtForAp = NULL; +VOID *mIdtForAp = NULL; +VOID *mMachineCheckHandlerForAp = NULL; +MP_MSR_LOCK *mMsrSpinLocks = NULL; +UINTN mMsrSpinLockCount = MSR_SPIN_LOCK_INIT_NUM; +UINTN mMsrCount = 0; + +/** + Get MSR spin lock by MSR index. + + @param MsrIndex MSR index value. + + @return Pointer to MSR spin lock. + +**/ +SPIN_LOCK * +GetMsrSpinLockByIndex ( + IN UINT32 MsrIndex + ) +{ + UINTN Index; + for (Index = 0; Index < mMsrCount; Index++) { + if (MsrIndex == mMsrSpinLocks[Index].MsrIndex) { + return &mMsrSpinLocks[Index].SpinLock; + } + } + return NULL; +} + +/** + Initialize MSR spin lock by MSR index. + + @param MsrIndex MSR index value. + +**/ +VOID +InitMsrSpinLockByIndex ( + IN UINT32 MsrIndex + ) +{ + UINTN NewMsrSpinLockCount; + + if (mMsrSpinLocks == NULL) { + mMsrSpinLocks = (MP_MSR_LOCK *) AllocatePool (sizeof (MP_MSR_LOCK) * mMsrSpinLockCount); + ASSERT (mMsrSpinLocks != NULL); + } + if (GetMsrSpinLockByIndex (MsrIndex) == NULL) { + // + // Initialize spin lock for MSR programming + // + mMsrSpinLocks[mMsrCount].MsrIndex = MsrIndex; + InitializeSpinLock (&mMsrSpinLocks[mMsrCount].SpinLock); + mMsrCount ++; + if (mMsrCount == mMsrSpinLockCount) { + // + // If MSR spin lock buffer is full, enlarge it + // + NewMsrSpinLockCount = mMsrSpinLockCount + MSR_SPIN_LOCK_INIT_NUM; + mMsrSpinLocks = ReallocatePool ( + sizeof (MP_MSR_LOCK) * mMsrSpinLockCount, + sizeof (MP_MSR_LOCK) * NewMsrSpinLockCount, + mMsrSpinLocks + ); + mMsrSpinLockCount = NewMsrSpinLockCount; + } + } +} + +/** + Sync up the MTRR values for all processors. + + @param MtrrTable Table holding fixed/variable MTRR values to be loaded. +**/ +VOID +EFIAPI +LoadMtrrData ( + EFI_PHYSICAL_ADDRESS MtrrTable + ) +/*++ + +Routine Description: + + Sync up the MTRR values for all processors. + +Arguments: + +Returns: + None + +--*/ +{ + MTRR_SETTINGS *MtrrSettings; + + MtrrSettings = (MTRR_SETTINGS *) (UINTN) MtrrTable; + MtrrSetAllMtrrs (MtrrSettings); +} + +/** + Programs registers for the calling processor. + + This function programs registers for the calling processor. + + @param RegisterTable Pointer to register table of the running processor. + +**/ +VOID +SetProcessorRegister ( + IN CPU_REGISTER_TABLE *RegisterTable + ) +{ + CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; + UINTN Index; + UINTN Value; + SPIN_LOCK *MsrSpinLock; + + // + // Traverse Register Table of this logical processor + // + RegisterTableEntry = (CPU_REGISTER_TABLE_ENTRY *) (UINTN) RegisterTable->RegisterTableEntry; + for (Index = 0; Index < RegisterTable->TableLength; Index++, RegisterTableEntry++) { + // + // Check the type of specified register + // + switch (RegisterTableEntry->RegisterType) { + // + // The specified register is Control Register + // + case ControlRegister: + switch (RegisterTableEntry->Index) { + case 0: + Value = AsmReadCr0 (); + Value = (UINTN) BitFieldWrite64 ( + Value, + RegisterTableEntry->ValidBitStart, + RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, + (UINTN) RegisterTableEntry->Value + ); + AsmWriteCr0 (Value); + break; + case 2: + Value = AsmReadCr2 (); + Value = (UINTN) BitFieldWrite64 ( + Value, + RegisterTableEntry->ValidBitStart, + RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, + (UINTN) RegisterTableEntry->Value + ); + AsmWriteCr2 (Value); + break; + case 3: + Value = AsmReadCr3 (); + Value = (UINTN) BitFieldWrite64 ( + Value, + RegisterTableEntry->ValidBitStart, + RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, + (UINTN) RegisterTableEntry->Value + ); + AsmWriteCr3 (Value); + break; + case 4: + Value = AsmReadCr4 (); + Value = (UINTN) BitFieldWrite64 ( + Value, + RegisterTableEntry->ValidBitStart, + RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, + (UINTN) RegisterTableEntry->Value + ); + AsmWriteCr4 (Value); + break; + default: + break; + } + break; + // + // The specified register is Model Specific Register + // + case Msr: + // + // If this function is called to restore register setting after INIT signal, + // there is no need to restore MSRs in register table. + // + if (RegisterTableEntry->ValidBitLength >= 64) { + // + // If length is not less than 64 bits, then directly write without reading + // + AsmWriteMsr64 ( + RegisterTableEntry->Index, + RegisterTableEntry->Value + ); + } else { + // + // Get lock to avoid Package/Core scope MSRs programming issue in parallel execution mode + // to make sure MSR read/write operation is atomic. + // + MsrSpinLock = GetMsrSpinLockByIndex (RegisterTableEntry->Index); + AcquireSpinLock (MsrSpinLock); + // + // Set the bit section according to bit start and length + // + AsmMsrBitFieldWrite64 ( + RegisterTableEntry->Index, + RegisterTableEntry->ValidBitStart, + RegisterTableEntry->ValidBitStart + RegisterTableEntry->ValidBitLength - 1, + RegisterTableEntry->Value + ); + ReleaseSpinLock (MsrSpinLock); + } + break; + // + // Enable or disable cache + // + case CacheControl: + // + // If value of the entry is 0, then disable cache. Otherwise, enable cache. + // + if (RegisterTableEntry->Value == 0) { + AsmDisableCache (); + } else { + AsmEnableCache (); + } + break; + + default: + break; + } + } +} + +/** + AP initialization before SMBASE relocation in the S3 boot path. +**/ +VOID +EarlyMPRendezvousProcedure ( + VOID + ) +{ + CPU_REGISTER_TABLE *RegisterTableList; + UINT32 InitApicId; + UINTN Index; + + LoadMtrrData (mAcpiCpuData.MtrrTable); + + // + // Find processor number for this CPU. + // + RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable; + InitApicId = GetInitialApicId (); + for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) { + if (RegisterTableList[Index].InitialApicId == InitApicId) { + SetProcessorRegister (&RegisterTableList[Index]); + break; + } + } + + // + // Count down the number with lock mechanism. + // + InterlockedDecrement (&mNumberToFinish); +} + +/** + AP initialization after SMBASE relocation in the S3 boot path. +**/ +VOID +MPRendezvousProcedure ( + VOID + ) +{ + CPU_REGISTER_TABLE *RegisterTableList; + UINT32 InitApicId; + UINTN Index; + + ProgramVirtualWireMode (); + DisableLvtInterrupts (); + + RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable; + InitApicId = GetInitialApicId (); + for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) { + if (RegisterTableList[Index].InitialApicId == InitApicId) { + SetProcessorRegister (&RegisterTableList[Index]); + break; + } + } + + // + // Count down the number with lock mechanism. + // + InterlockedDecrement (&mNumberToFinish); +} + +/** + Prepares startup vector for APs. + + This function prepares startup vector for APs. + + @param WorkingBuffer The address of the work buffer. +**/ +VOID +PrepareApStartupVector ( + EFI_PHYSICAL_ADDRESS WorkingBuffer + ) +{ + EFI_PHYSICAL_ADDRESS StartupVector; + MP_ASSEMBLY_ADDRESS_MAP AddressMap; + + // + // Get the address map of startup code for AP, + // including code size, and offset of long jump instructions to redirect. + // + ZeroMem (&AddressMap, sizeof (AddressMap)); + AsmGetAddressMap (&AddressMap); + + StartupVector = WorkingBuffer; + + // + // Copy AP startup code to startup vector, and then redirect the long jump + // instructions for mode switching. + // + CopyMem ((VOID *) (UINTN) StartupVector, AddressMap.RendezvousFunnelAddress, AddressMap.Size); + *(UINT32 *) (UINTN) (StartupVector + AddressMap.FlatJumpOffset + 3) = (UINT32) (StartupVector + AddressMap.PModeEntryOffset); + if (AddressMap.LongJumpOffset != 0) { + *(UINT32 *) (UINTN) (StartupVector + AddressMap.LongJumpOffset + 2) = (UINT32) (StartupVector + AddressMap.LModeEntryOffset); + } + + // + // Get the start address of exchange data between BSP and AP. + // + mExchangeInfo = (MP_CPU_EXCHANGE_INFO *) (UINTN) (StartupVector + AddressMap.Size); + ZeroMem ((VOID *) mExchangeInfo, sizeof (MP_CPU_EXCHANGE_INFO)); + + CopyMem ((VOID *) (UINTN) &mExchangeInfo->GdtrProfile, (VOID *) (UINTN) mAcpiCpuData.GdtrProfile, sizeof (IA32_DESCRIPTOR)); + CopyMem ((VOID *) (UINTN) &mExchangeInfo->IdtrProfile, (VOID *) (UINTN) mAcpiCpuData.IdtrProfile, sizeof (IA32_DESCRIPTOR)); + + // + // Copy AP's GDT, IDT and Machine Check handler from SMRAM to ACPI NVS memory + // + CopyMem ((VOID *) mExchangeInfo->GdtrProfile.Base, mGdtForAp, mExchangeInfo->GdtrProfile.Limit + 1); + CopyMem ((VOID *) mExchangeInfo->IdtrProfile.Base, mIdtForAp, mExchangeInfo->IdtrProfile.Limit + 1); + CopyMem ((VOID *)(UINTN) mAcpiCpuData.ApMachineCheckHandlerBase, mMachineCheckHandlerForAp, mAcpiCpuData.ApMachineCheckHandlerSize); + + mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress; + mExchangeInfo->StackSize = mAcpiCpuData.StackSize; + mExchangeInfo->BufferStart = (UINT32) StartupVector; + mExchangeInfo->Cr3 = (UINT32) (AsmReadCr3 ()); +} + +/** + The function is invoked before SMBASE relocation in S3 path to restores CPU status. + + The function is invoked before SMBASE relocation in S3 path. It does first time microcode load + and restores MTRRs for both BSP and APs. + +**/ +VOID +EarlyInitializeCpu ( + VOID + ) +{ + CPU_REGISTER_TABLE *RegisterTableList; + UINT32 InitApicId; + UINTN Index; + + LoadMtrrData (mAcpiCpuData.MtrrTable); + + // + // Find processor number for this CPU. + // + RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.PreSmmInitRegisterTable; + InitApicId = GetInitialApicId (); + for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) { + if (RegisterTableList[Index].InitialApicId == InitApicId) { + SetProcessorRegister (&RegisterTableList[Index]); + break; + } + } + + ProgramVirtualWireMode (); + + PrepareApStartupVector (mAcpiCpuData.StartupVector); + + mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1; + mExchangeInfo->ApFunction = (VOID *) (UINTN) EarlyMPRendezvousProcedure; + + // + // Send INIT IPI - SIPI to all APs + // + SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector); + + while (mNumberToFinish > 0) { + CpuPause (); + } +} + +/** + The function is invoked after SMBASE relocation in S3 path to restores CPU status. + + The function is invoked after SMBASE relocation in S3 path. It restores configuration according to + data saved by normal boot path for both BSP and APs. + +**/ +VOID +InitializeCpu ( + VOID + ) +{ + CPU_REGISTER_TABLE *RegisterTableList; + UINT32 InitApicId; + UINTN Index; + + RegisterTableList = (CPU_REGISTER_TABLE *) (UINTN) mAcpiCpuData.RegisterTable; + InitApicId = GetInitialApicId (); + for (Index = 0; Index < mAcpiCpuData.NumberOfCpus; Index++) { + if (RegisterTableList[Index].InitialApicId == InitApicId) { + SetProcessorRegister (&RegisterTableList[Index]); + break; + } + } + + mNumberToFinish = mAcpiCpuData.NumberOfCpus - 1; + // + // StackStart was updated when APs were waken up in EarlyInitializeCpu. + // Re-initialize StackAddress to original beginning address. + // + mExchangeInfo->StackStart = (VOID *) (UINTN) mAcpiCpuData.StackAddress; + mExchangeInfo->ApFunction = (VOID *) (UINTN) MPRendezvousProcedure; + + // + // Send INIT IPI - SIPI to all APs + // + SendInitSipiSipiAllExcludingSelf ((UINT32)mAcpiCpuData.StartupVector); + + while (mNumberToFinish > 0) { + CpuPause (); + } +} diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.c new file mode 100644 index 0000000000..40f2a1719d --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.c @@ -0,0 +1,486 @@ +/** @file +Implementation of SMM CPU Services Protocol. + +Copyright (c) 2011 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#include "PiSmmCpuDxeSmm.h" + +// +// SMM CPU Service Protocol instance +// +EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService = { + SmmGetProcessorInfo, + SmmSwitchBsp, + SmmAddProcessor, + SmmRemoveProcessor, + SmmWhoAmI, + SmmRegisterExceptionHandler +}; + +/** + Get Package ID/Core ID/Thread ID of a processor. + + APIC ID must be an initial APIC ID. + + The algorithm below assumes the target system has symmetry across physical package boundaries + with respect to the number of logical processors per package, number of cores per package. + + @param ApicId APIC ID of the target logical processor. + @param Location Returns the processor location information. +**/ +VOID +SmmGetProcessorLocation ( + IN UINT32 ApicId, + OUT EFI_CPU_PHYSICAL_LOCATION *Location + ) +{ + UINTN ThreadBits; + UINTN CoreBits; + UINT32 RegEax; + UINT32 RegEbx; + UINT32 RegEcx; + UINT32 RegEdx; + UINT32 MaxCpuIdIndex; + UINT32 SubIndex; + UINTN LevelType; + UINT32 MaxLogicProcessorsPerPackage; + UINT32 MaxCoresPerPackage; + BOOLEAN TopologyLeafSupported; + + ASSERT (Location != NULL); + + ThreadBits = 0; + CoreBits = 0; + TopologyLeafSupported = FALSE; + + // + // Check if the processor is capable of supporting more than one logical processor. + // + AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx); + ASSERT ((RegEdx & BIT28) != 0); + + // + // Assume three-level mapping of APIC ID: Package:Core:SMT. + // + + // + // Get the max index of basic CPUID + // + AsmCpuid (CPUID_SIGNATURE, &MaxCpuIdIndex, NULL, NULL, NULL); + + // + // If the extended topology enumeration leaf is available, it + // is the preferred mechanism for enumerating topology. + // + if (MaxCpuIdIndex >= CPUID_EXTENDED_TOPOLOGY) { + AsmCpuidEx (CPUID_EXTENDED_TOPOLOGY, 0, &RegEax, &RegEbx, &RegEcx, NULL); + // + // If CPUID.(EAX=0BH, ECX=0H):EBX returns zero and maximum input value for + // basic CPUID information is greater than 0BH, then CPUID.0BH leaf is not + // supported on that processor. + // + if ((RegEbx & 0xffff) != 0) { + TopologyLeafSupported = TRUE; + + // + // Sub-leaf index 0 (ECX= 0 as input) provides enumeration parameters to extract + // the SMT sub-field of x2APIC ID. + // + LevelType = (RegEcx >> 8) & 0xff; + ASSERT (LevelType == CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_SMT); + if ((RegEbx & 0xffff) > 1 ) { + ThreadBits = RegEax & 0x1f; + } else { + // + // HT is not supported + // + ThreadBits = 0; + } + + // + // Software must not assume any "level type" encoding + // value to be related to any sub-leaf index, except sub-leaf 0. + // + SubIndex = 1; + do { + AsmCpuidEx (CPUID_EXTENDED_TOPOLOGY, SubIndex, &RegEax, NULL, &RegEcx, NULL); + LevelType = (RegEcx >> 8) & 0xff; + if (LevelType == CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_CORE) { + CoreBits = (RegEax & 0x1f) - ThreadBits; + break; + } + SubIndex++; + } while (LevelType != CPUID_EXTENDED_TOPOLOGY_LEVEL_TYPE_INVALID); + } + } + + if (!TopologyLeafSupported) { + AsmCpuid (CPUID_VERSION_INFO, NULL, &RegEbx, NULL, NULL); + MaxLogicProcessorsPerPackage = (RegEbx >> 16) & 0xff; + if (MaxCpuIdIndex >= CPUID_CACHE_PARAMS) { + AsmCpuidEx (CPUID_CACHE_PARAMS, 0, &RegEax, NULL, NULL, NULL); + MaxCoresPerPackage = (RegEax >> 26) + 1; + } else { + // + // Must be a single-core processor. + // + MaxCoresPerPackage = 1; + } + + ThreadBits = (UINTN) (HighBitSet32 (MaxLogicProcessorsPerPackage / MaxCoresPerPackage - 1) + 1); + CoreBits = (UINTN) (HighBitSet32 (MaxCoresPerPackage - 1) + 1); + } + + Location->Thread = ApicId & ~((-1) << ThreadBits); + Location->Core = (ApicId >> ThreadBits) & ~((-1) << CoreBits); + Location->Package = (ApicId >> (ThreadBits+ CoreBits)); +} + +/** + Gets processor information on the requested processor at the instant this call is made. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[in] ProcessorNumber The handle number of processor. + @param[out] ProcessorInfoBuffer A pointer to the buffer where information for + the requested processor is deposited. + + @retval EFI_SUCCESS Processor information was returned. + @retval EFI_INVALID_PARAMETER ProcessorInfoBuffer is NULL. + @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid. + @retval EFI_NOT_FOUND The processor with the handle specified by + ProcessorNumber does not exist in the platform. + +**/ +EFI_STATUS +EFIAPI +SmmGetProcessorInfo ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN UINTN ProcessorNumber, + OUT EFI_PROCESSOR_INFORMATION *ProcessorInfoBuffer + ) +{ + // + // Check parameter + // + if (ProcessorNumber >= mMaxNumberOfCpus || ProcessorInfoBuffer == NULL) { + return EFI_INVALID_PARAMETER; + } + + if (gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) { + return EFI_NOT_FOUND; + } + + // + // Fill in processor information + // + CopyMem (ProcessorInfoBuffer, &gSmmCpuPrivate->ProcessorInfo[ProcessorNumber], sizeof (EFI_PROCESSOR_INFORMATION)); + return EFI_SUCCESS; +} + +/** + This service switches the requested AP to be the BSP since the next SMI. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[in] ProcessorNumber The handle number of AP that is to become the new BSP. + + @retval EFI_SUCCESS BSP will be switched in next SMI. + @retval EFI_UNSUPPORTED Switching the BSP or a processor to be hot-removed is not supported. + @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist. + @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid. +**/ +EFI_STATUS +EFIAPI +SmmSwitchBsp ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN UINTN ProcessorNumber + ) +{ + // + // Check parameter + // + if (ProcessorNumber >= mMaxNumberOfCpus) { + return EFI_INVALID_PARAMETER; + } + + if (gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) { + return EFI_NOT_FOUND; + } + + if (gSmmCpuPrivate->Operation[ProcessorNumber] != SmmCpuNone || + gSmst->CurrentlyExecutingCpu == ProcessorNumber) { + return EFI_UNSUPPORTED; + } + + // + // Setting of the BSP for next SMI is pending until all SMI handlers are finished + // + gSmmCpuPrivate->Operation[ProcessorNumber] = SmmCpuSwitchBsp; + return EFI_SUCCESS; +} + +/** + Notify that a processor was hot-added. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[in] ProcessorId Local APIC ID of the hot-added processor. + @param[out] ProcessorNumber The handle number of the hot-added processor. + + @retval EFI_SUCCESS The hot-addition of the specified processors was successfully notified. + @retval EFI_UNSUPPORTED Hot addition of processor is not supported. + @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist. + @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid. + @retval EFI_ALREADY_STARTED The processor is already online in the system. +**/ +EFI_STATUS +EFIAPI +SmmAddProcessor ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN UINT64 ProcessorId, + OUT UINTN *ProcessorNumber + ) +{ + UINTN Index; + + if (!FeaturePcdGet (PcdCpuHotPlugSupport)) { + return EFI_UNSUPPORTED; + } + + // + // Check parameter + // + if (ProcessorNumber == NULL || ProcessorId == INVALID_APIC_ID) { + return EFI_INVALID_PARAMETER; + } + + // + // Check if the processor already exists + // + + for (Index = 0; Index < mMaxNumberOfCpus; Index++) { + if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ProcessorId) { + return EFI_ALREADY_STARTED; + } + } + + // + // Check CPU hot plug data. The CPU RAS handler should have created the mapping + // of the APIC ID to SMBASE. + // + for (Index = 0; Index < mMaxNumberOfCpus; Index++) { + if (mCpuHotPlugData.ApicId[Index] == ProcessorId && + gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == INVALID_APIC_ID) { + gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = ProcessorId; + gSmmCpuPrivate->ProcessorInfo[Index].StatusFlag = 0; + SmmGetProcessorLocation ((UINT32)ProcessorId, &gSmmCpuPrivate->ProcessorInfo[Index].Location); + + *ProcessorNumber = Index; + gSmmCpuPrivate->Operation[Index] = SmmCpuAdd; + return EFI_SUCCESS; + } + } + + return EFI_INVALID_PARAMETER; +} + +/** + Notify that a processor was hot-removed. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[in] ProcessorNumber The handle number of the hot-added processor. + + @retval EFI_SUCCESS The hot-removal of the specified processors was successfully notified. + @retval EFI_UNSUPPORTED Hot removal of processor is not supported. + @retval EFI_UNSUPPORTED Hot removal of BSP is not supported. + @retval EFI_UNSUPPORTED Hot removal of a processor with pending hot-plug operation is not supported. + @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid. +**/ +EFI_STATUS +EFIAPI +SmmRemoveProcessor ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN UINTN ProcessorNumber + ) +{ + if (!FeaturePcdGet (PcdCpuHotPlugSupport)) { + return EFI_UNSUPPORTED; + } + + // + // Check parameter + // + if (ProcessorNumber >= mMaxNumberOfCpus || + gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId == INVALID_APIC_ID) { + return EFI_INVALID_PARAMETER; + } + + // + // Can't remove BSP + // + if (ProcessorNumber == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu) { + return EFI_UNSUPPORTED; + } + + if (gSmmCpuPrivate->Operation[ProcessorNumber] != SmmCpuNone) { + return EFI_UNSUPPORTED; + } + + gSmmCpuPrivate->ProcessorInfo[ProcessorNumber].ProcessorId = INVALID_APIC_ID; + mCpuHotPlugData.ApicId[ProcessorNumber] = INVALID_APIC_ID; + + // + // Removal of the processor from the CPU list is pending until all SMI handlers are finished + // + gSmmCpuPrivate->Operation[ProcessorNumber] = SmmCpuRemove; + return EFI_SUCCESS; +} + +/** + This return the handle number for the calling processor. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[out] ProcessorNumber The handle number of currently executing processor. + + @retval EFI_SUCCESS The current processor handle number was returned + in ProcessorNumber. + @retval EFI_INVALID_PARAMETER ProcessorNumber is NULL. + +**/ +EFI_STATUS +EFIAPI +SmmWhoAmI ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + OUT UINTN *ProcessorNumber + ) +{ + UINTN Index; + UINT64 ApicId; + + // + // Check parameter + // + if (ProcessorNumber == NULL) { + return EFI_INVALID_PARAMETER; + } + + ApicId = GetApicId (); + + for (Index = 0; Index < mMaxNumberOfCpus; Index++) { + if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) { + *ProcessorNumber = Index; + return EFI_SUCCESS; + } + } + // + // This should not happen + // + ASSERT (FALSE); + return EFI_NOT_FOUND; +} + +/** + Update the SMM CPU list per the pending operation. + + This function is called after return from SMI handlers. +**/ +VOID +SmmCpuUpdate ( + VOID + ) +{ + UINTN Index; + + // + // Handle pending BSP switch operations + // + for (Index = 0; Index < mMaxNumberOfCpus; Index++) { + if (gSmmCpuPrivate->Operation[Index] == SmmCpuSwitchBsp) { + gSmmCpuPrivate->Operation[Index] = SmmCpuNone; + mSmmMpSyncData->SwitchBsp = TRUE; + mSmmMpSyncData->CandidateBsp[Index] = TRUE; + } + } + + // + // Handle pending hot-add operations + // + for (Index = 0; Index < mMaxNumberOfCpus; Index++) { + if (gSmmCpuPrivate->Operation[Index] == SmmCpuAdd) { + gSmmCpuPrivate->Operation[Index] = SmmCpuNone; + mNumberOfCpus++; + } + } + + // + // Handle pending hot-remove operations + // + for (Index = 0; Index < mMaxNumberOfCpus; Index++) { + if (gSmmCpuPrivate->Operation[Index] == SmmCpuRemove) { + gSmmCpuPrivate->Operation[Index] = SmmCpuNone; + mNumberOfCpus--; + } + } +} + +/** + Register exception handler. + + @param This A pointer to the SMM_CPU_SERVICE_PROTOCOL instance. + @param ExceptionType Defines which interrupt or exception to hook. Type EFI_EXCEPTION_TYPE and + the valid values for this parameter are defined in EFI_DEBUG_SUPPORT_PROTOCOL + of the UEFI 2.0 specification. + @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER + that is called when a processor interrupt occurs. + If this parameter is NULL, then the handler will be uninstalled. + + @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled. + @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was previously installed. + @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not previously installed. + @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported. + +**/ +EFI_STATUS +EFIAPI +SmmRegisterExceptionHandler ( + IN EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN EFI_EXCEPTION_TYPE ExceptionType, + IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler + ) +{ + return RegisterCpuInterruptHandler (ExceptionType, InterruptHandler); +} + +/** + Initialize SMM CPU Services. + + It installs EFI SMM CPU Services Protocol. + + @param ImageHandle The firmware allocated handle for the EFI image. + + @retval EFI_SUCCESS EFI SMM CPU Services Protocol was installed successfully. +**/ +EFI_STATUS +InitializeSmmCpuServices ( + IN EFI_HANDLE Handle + ) +{ + EFI_STATUS Status; + + Status = gSmst->SmmInstallProtocolInterface ( + &Handle, + &gEfiSmmCpuServiceProtocolGuid, + EFI_NATIVE_INTERFACE, + &mSmmCpuService + ); + ASSERT_EFI_ERROR (Status); + return Status; +} + diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.h b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.h new file mode 100644 index 0000000000..9fe3f45b02 --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/CpuService.h @@ -0,0 +1,181 @@ +/** @file +Include file for SMM CPU Services protocol implementation. + +Copyright (c) 2011 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _CPU_SERVICE_H_ +#define _CPU_SERVICE_H_ + +typedef enum { + SmmCpuNone, + SmmCpuAdd, + SmmCpuRemove, + SmmCpuSwitchBsp +} SMM_CPU_OPERATION; + +// +// SMM CPU Service Protocol function prototypes. +// + +/** + Gets processor information on the requested processor at the instant this call is made. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[in] ProcessorNumber The handle number of processor. + @param[out] ProcessorInfoBuffer A pointer to the buffer where information for + the requested processor is deposited. + + @retval EFI_SUCCESS Processor information was returned. + @retval EFI_INVALID_PARAMETER ProcessorInfoBuffer is NULL. + @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid. + @retval EFI_NOT_FOUND The processor with the handle specified by + ProcessorNumber does not exist in the platform. + +**/ +EFI_STATUS +EFIAPI +SmmGetProcessorInfo ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN UINTN ProcessorNumber, + OUT EFI_PROCESSOR_INFORMATION *ProcessorInfoBuffer + ); + +/** + This service switches the requested AP to be the BSP since the next SMI. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[in] ProcessorNumber The handle number of AP that is to become the new BSP. + + @retval EFI_SUCCESS BSP will be switched in next SMI. + @retval EFI_UNSUPPORTED Switching the BSP or a processor to be hot-removed is not supported. + @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist. + @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid. +**/ +EFI_STATUS +EFIAPI +SmmSwitchBsp ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN UINTN ProcessorNumber + ); + +/** + Notify that a processor was hot-added. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[in] ProcessorId Local APIC ID of the hot-added processor. + @param[out] ProcessorNumber The handle number of the hot-added processor. + + @retval EFI_SUCCESS The hot-addition of the specified processors was successfully notified. + @retval EFI_UNSUPPORTED Hot addition of processor is not supported. + @retval EFI_NOT_FOUND The processor with the handle specified by ProcessorNumber does not exist. + @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid. + @retval EFI_ALREADY_STARTED The processor is already online in the system. +**/ +EFI_STATUS +EFIAPI +SmmAddProcessor ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN UINT64 ProcessorId, + OUT UINTN *ProcessorNumber + ); + +/** + Notify that a processor was hot-removed. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[in] ProcessorNumber The handle number of the hot-added processor. + + @retval EFI_SUCCESS The hot-removal of the specified processors was successfully notified. + @retval EFI_UNSUPPORTED Hot removal of processor is not supported. + @retval EFI_UNSUPPORTED Hot removal of BSP is not supported. + @retval EFI_UNSUPPORTED Hot removal of a processor with pending hot-plug operation is not supported. + @retval EFI_INVALID_PARAMETER ProcessorNumber is invalid. +**/ +EFI_STATUS +EFIAPI +SmmRemoveProcessor ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN UINTN ProcessorNumber + ); + +/** + This return the handle number for the calling processor. + + @param[in] This A pointer to the EFI_SMM_CPU_SERVICE_PROTOCOL instance. + @param[out] ProcessorNumber The handle number of currently executing processor. + + @retval EFI_SUCCESS The current processor handle number was returned + in ProcessorNumber. + @retval EFI_INVALID_PARAMETER ProcessorNumber is NULL. + +**/ +EFI_STATUS +EFIAPI +SmmWhoAmI ( + IN CONST EFI_SMM_CPU_SERVICE_PROTOCOL *This, + OUT UINTN *ProcessorNumber + ); + +/** + Register exception handler. + + @param This A pointer to the SMM_CPU_SERVICE_PROTOCOL instance. + @param ExceptionType Defines which interrupt or exception to hook. Type EFI_EXCEPTION_TYPE and + the valid values for this parameter are defined in EFI_DEBUG_SUPPORT_PROTOCOL + of the UEFI 2.0 specification. + @param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER + that is called when a processor interrupt occurs. + If this parameter is NULL, then the handler will be uninstalled. + + @retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled. + @retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was previously installed. + @retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not previously installed. + @retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported. + +**/ +EFI_STATUS +EFIAPI +SmmRegisterExceptionHandler ( + IN EFI_SMM_CPU_SERVICE_PROTOCOL *This, + IN EFI_EXCEPTION_TYPE ExceptionType, + IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler + ); + +// +// Internal function prototypes +// + +/** + Update the SMM CPU list per the pending operation. + + This function is called after return from SMI handlers. +**/ +VOID +SmmCpuUpdate ( + VOID + ); + +/** + Initialize SMM CPU Services. + + It installs EFI SMM CPU Services Protocol. + + @param ImageHandle The firmware allocated handle for the EFI image. + + @retval EFI_SUCCESS EFI SMM CPU Services Protocol was installed successfully. +**/ +EFI_STATUS +InitializeSmmCpuServices ( + IN EFI_HANDLE Handle + ); + +#endif diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c new file mode 100644 index 0000000000..730c32df0a --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/MpService.c @@ -0,0 +1,1327 @@ +/** @file +SMM MP service implementation + +Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#include "PiSmmCpuDxeSmm.h" + +// +// Slots for all MTRR( FIXED MTRR + VARIABLE MTRR + MTRR_LIB_IA32_MTRR_DEF_TYPE) +// +UINT64 gSmiMtrrs[MTRR_NUMBER_OF_FIXED_MTRR + 2 * MTRR_NUMBER_OF_VARIABLE_MTRR + 1]; +UINT64 gPhyMask; +SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData = NULL; +UINTN mSmmMpSyncDataSize; + +/** + Performs an atomic compare exchange operation to get semaphore. + The compare exchange operation must be performed using + MP safe mechanisms. + + @param Sem IN: 32-bit unsigned integer + OUT: original integer - 1 + @return Original integer - 1 + +**/ +UINT32 +WaitForSemaphore ( + IN OUT volatile UINT32 *Sem + ) +{ + UINT32 Value; + + do { + Value = *Sem; + } while (Value == 0 || + InterlockedCompareExchange32 ( + (UINT32*)Sem, + Value, + Value - 1 + ) != Value); + return Value - 1; +} + + +/** + Performs an atomic compare exchange operation to release semaphore. + The compare exchange operation must be performed using + MP safe mechanisms. + + @param Sem IN: 32-bit unsigned integer + OUT: original integer + 1 + @return Original integer + 1 + +**/ +UINT32 +ReleaseSemaphore ( + IN OUT volatile UINT32 *Sem + ) +{ + UINT32 Value; + + do { + Value = *Sem; + } while (Value + 1 != 0 && + InterlockedCompareExchange32 ( + (UINT32*)Sem, + Value, + Value + 1 + ) != Value); + return Value + 1; +} + +/** + Performs an atomic compare exchange operation to lock semaphore. + The compare exchange operation must be performed using + MP safe mechanisms. + + @param Sem IN: 32-bit unsigned integer + OUT: -1 + @return Original integer + +**/ +UINT32 +LockdownSemaphore ( + IN OUT volatile UINT32 *Sem + ) +{ + UINT32 Value; + + do { + Value = *Sem; + } while (InterlockedCompareExchange32 ( + (UINT32*)Sem, + Value, (UINT32)-1 + ) != Value); + return Value; +} + +/** + Wait all APs to performs an atomic compare exchange operation to release semaphore. + + @param NumberOfAPs AP number + +**/ +VOID +WaitForAllAPs ( + IN UINTN NumberOfAPs + ) +{ + UINTN BspIndex; + + BspIndex = mSmmMpSyncData->BspIndex; + while (NumberOfAPs-- > 0) { + WaitForSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + } +} + +/** + Performs an atomic compare exchange operation to release semaphore + for each AP. + +**/ +VOID +ReleaseAllAPs ( + VOID + ) +{ + UINTN Index; + UINTN BspIndex; + + BspIndex = mSmmMpSyncData->BspIndex; + for (Index = mMaxNumberOfCpus; Index-- > 0;) { + if (Index != BspIndex && mSmmMpSyncData->CpuData[Index].Present) { + ReleaseSemaphore (&mSmmMpSyncData->CpuData[Index].Run); + } + } +} + +/** + Checks if all CPUs (with certain exceptions) have checked in for this SMI run + + @param Exceptions CPU Arrival exception flags. + + @retval TRUE if all CPUs the have checked in. + @retval FALSE if at least one Normal AP hasn't checked in. + +**/ +BOOLEAN +AllCpusInSmmWithExceptions ( + SMM_CPU_ARRIVAL_EXCEPTIONS Exceptions + ) +{ + UINTN Index; + SMM_CPU_DATA_BLOCK *CpuData; + EFI_PROCESSOR_INFORMATION *ProcessorInfo; + + ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus); + + if (mSmmMpSyncData->Counter == mNumberOfCpus) { + return TRUE; + } + + CpuData = mSmmMpSyncData->CpuData; + ProcessorInfo = gSmmCpuPrivate->ProcessorInfo; + for (Index = mMaxNumberOfCpus; Index-- > 0;) { + if (!CpuData[Index].Present && ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) { + if (((Exceptions & ARRIVAL_EXCEPTION_DELAYED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmDelayed) != 0) { + continue; + } + if (((Exceptions & ARRIVAL_EXCEPTION_BLOCKED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmBlocked) != 0) { + continue; + } + if (((Exceptions & ARRIVAL_EXCEPTION_SMI_DISABLED) != 0) && SmmCpuFeaturesGetSmmRegister (Index, SmmRegSmmEnable) != 0) { + continue; + } + return FALSE; + } + } + + + return TRUE; +} + + +/** + Given timeout constraint, wait for all APs to arrive, and insure when this function returns, no AP will execute normal mode code before + entering SMM, except SMI disabled APs. + +**/ +VOID +SmmWaitForApArrival ( + VOID + ) +{ + UINT64 Timer; + UINTN Index; + + ASSERT (mSmmMpSyncData->Counter <= mNumberOfCpus); + + // + // Platform implementor should choose a timeout value appropriately: + // - The timeout value should balance the SMM time constrains and the likelihood that delayed CPUs are excluded in the SMM run. Note + // the SMI Handlers must ALWAYS take into account the cases that not all APs are available in an SMI run. + // - The timeout value must, in the case of 2nd timeout, be at least long enough to give time for all APs to receive the SMI IPI + // and either enter SMM or buffer the SMI, to insure there is no CPU running normal mode code when SMI handling starts. This will + // be TRUE even if a blocked CPU is brought out of the blocked state by a normal mode CPU (before the normal mode CPU received the + // SMI IPI), because with a buffered SMI, and CPU will enter SMM immediately after it is brought out of the blocked state. + // - The timeout value must be longer than longest possible IO operation in the system + // + + // + // Sync with APs 1st timeout + // + for (Timer = StartSyncTimer (); + !IsSyncTimerTimeout (Timer) && + !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED ); + ) { + CpuPause (); + } + + // + // Not all APs have arrived, so we need 2nd round of timeout. IPIs should be sent to ALL none present APs, + // because: + // a) Delayed AP may have just come out of the delayed state. Blocked AP may have just been brought out of blocked state by some AP running + // normal mode code. These APs need to be guaranteed to have an SMI pending to insure that once they are out of delayed / blocked state, they + // enter SMI immediately without executing instructions in normal mode. Note traditional flow requires there are no APs doing normal mode + // work while SMI handling is on-going. + // b) As a consequence of SMI IPI sending, (spurious) SMI may occur after this SMM run. + // c) ** NOTE **: Use SMI disabling feature VERY CAREFULLY (if at all) for traditional flow, because a processor in SMI-disabled state + // will execute normal mode code, which breaks the traditional SMI handlers' assumption that no APs are doing normal + // mode work while SMI handling is on-going. + // d) We don't add code to check SMI disabling status to skip sending IPI to SMI disabled APs, because: + // - In traditional flow, SMI disabling is discouraged. + // - In relaxed flow, CheckApArrival() will check SMI disabling status before calling this function. + // In both cases, adding SMI-disabling checking code increases overhead. + // + if (mSmmMpSyncData->Counter < mNumberOfCpus) { + // + // Send SMI IPIs to bring outside processors in + // + for (Index = mMaxNumberOfCpus; Index-- > 0;) { + if (!mSmmMpSyncData->CpuData[Index].Present && gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId != INVALID_APIC_ID) { + SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId); + } + } + + // + // Sync with APs 2nd timeout. + // + for (Timer = StartSyncTimer (); + !IsSyncTimerTimeout (Timer) && + !AllCpusInSmmWithExceptions (ARRIVAL_EXCEPTION_BLOCKED | ARRIVAL_EXCEPTION_SMI_DISABLED ); + ) { + CpuPause (); + } + } + + return; +} + + +/** + Replace OS MTRR's with SMI MTRR's. + + @param CpuIndex Processor Index + +**/ +VOID +ReplaceOSMtrrs ( + IN UINTN CpuIndex + ) +{ + PROCESSOR_SMM_DESCRIPTOR *Psd; + UINT64 *SmiMtrrs; + MTRR_SETTINGS *BiosMtrr; + + Psd = (PROCESSOR_SMM_DESCRIPTOR*)(mCpuHotPlugData.SmBase[CpuIndex] + SMM_PSD_OFFSET); + SmiMtrrs = (UINT64*)(UINTN)Psd->MtrrBaseMaskPtr; + + SmmCpuFeaturesDisableSmrr (); + + // + // Replace all MTRRs registers + // + BiosMtrr = (MTRR_SETTINGS*)SmiMtrrs; + MtrrSetAllMtrrs(BiosMtrr); +} + +/** + SMI handler for BSP. + + @param CpuIndex BSP processor Index + @param SyncMode SMM MP sync mode + +**/ +VOID +BSPHandler ( + IN UINTN CpuIndex, + IN SMM_CPU_SYNC_MODE SyncMode + ) +{ + UINTN Index; + MTRR_SETTINGS Mtrrs; + UINTN ApCount; + BOOLEAN ClearTopLevelSmiResult; + UINTN PresentCount; + + ASSERT (CpuIndex == mSmmMpSyncData->BspIndex); + ApCount = 0; + + // + // Flag BSP's presence + // + mSmmMpSyncData->InsideSmm = TRUE; + + // + // Initialize Debug Agent to start source level debug in BSP handler + // + InitializeDebugAgent (DEBUG_AGENT_INIT_ENTER_SMI, NULL, NULL); + + // + // Mark this processor's presence + // + mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE; + + // + // Clear platform top level SMI status bit before calling SMI handlers. If + // we cleared it after SMI handlers are run, we would miss the SMI that + // occurs after SMI handlers are done and before SMI status bit is cleared. + // + ClearTopLevelSmiResult = ClearTopLevelSmiStatus(); + ASSERT (ClearTopLevelSmiResult == TRUE); + + // + // Set running processor index + // + gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu = CpuIndex; + + // + // If Traditional Sync Mode or need to configure MTRRs: gather all available APs. + // + if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) { + + // + // Wait for APs to arrive + // + SmmWaitForApArrival(); + + // + // Lock the counter down and retrieve the number of APs + // + mSmmMpSyncData->AllCpusInSync = TRUE; + ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1; + + // + // Wait for all APs to get ready for programming MTRRs + // + WaitForAllAPs (ApCount); + + if (SmmCpuFeaturesNeedConfigureMtrrs()) { + // + // Signal all APs it's time for backup MTRRs + // + ReleaseAllAPs (); + + // + // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at + // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set + // to a large enough value to avoid this situation. + // Note: For HT capable CPUs, threads within a core share the same set of MTRRs. + // We do the backup first and then set MTRR to avoid race condition for threads + // in the same core. + // + MtrrGetAllMtrrs(&Mtrrs); + + // + // Wait for all APs to complete their MTRR saving + // + WaitForAllAPs (ApCount); + + // + // Let all processors program SMM MTRRs together + // + ReleaseAllAPs (); + + // + // WaitForSemaphore() may wait for ever if an AP happens to enter SMM at + // exactly this point. Please make sure PcdCpuSmmMaxSyncLoops has been set + // to a large enough value to avoid this situation. + // + ReplaceOSMtrrs (CpuIndex); + + // + // Wait for all APs to complete their MTRR programming + // + WaitForAllAPs (ApCount); + } + } + + // + // The BUSY lock is initialized to Acquired state + // + AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy); + + // + // Restore SMM Configuration in S3 boot path. + // + if (mRestoreSmmConfigurationInS3) { + // + // Configure SMM Code Access Check feature if available. + // + ConfigSmmCodeAccessCheck (); + mRestoreSmmConfigurationInS3 = FALSE; + } + + // + // Invoke SMM Foundation EntryPoint with the processor information context. + // + gSmmCpuPrivate->SmmCoreEntry (&gSmmCpuPrivate->SmmCoreEntryContext); + + // + // Make sure all APs have completed their pending none-block tasks + // + for (Index = mMaxNumberOfCpus; Index-- > 0;) { + if (Index != CpuIndex && mSmmMpSyncData->CpuData[Index].Present) { + AcquireSpinLock (&mSmmMpSyncData->CpuData[Index].Busy); + ReleaseSpinLock (&mSmmMpSyncData->CpuData[Index].Busy);; + } + } + + // + // Perform the remaining tasks + // + PerformRemainingTasks (); + + // + // If Relaxed-AP Sync Mode: gather all available APs after BSP SMM handlers are done, and + // make those APs to exit SMI synchronously. APs which arrive later will be excluded and + // will run through freely. + // + if (SyncMode != SmmCpuSyncModeTradition && !SmmCpuFeaturesNeedConfigureMtrrs()) { + + // + // Lock the counter down and retrieve the number of APs + // + mSmmMpSyncData->AllCpusInSync = TRUE; + ApCount = LockdownSemaphore (&mSmmMpSyncData->Counter) - 1; + // + // Make sure all APs have their Present flag set + // + while (TRUE) { + PresentCount = 0; + for (Index = mMaxNumberOfCpus; Index-- > 0;) { + if (mSmmMpSyncData->CpuData[Index].Present) { + PresentCount ++; + } + } + if (PresentCount > ApCount) { + break; + } + } + } + + // + // Notify all APs to exit + // + mSmmMpSyncData->InsideSmm = FALSE; + ReleaseAllAPs (); + + // + // Wait for all APs to complete their pending tasks + // + WaitForAllAPs (ApCount); + + if (SmmCpuFeaturesNeedConfigureMtrrs()) { + // + // Signal APs to restore MTRRs + // + ReleaseAllAPs (); + + // + // Restore OS MTRRs + // + SmmCpuFeaturesReenableSmrr (); + MtrrSetAllMtrrs(&Mtrrs); + + // + // Wait for all APs to complete MTRR programming + // + WaitForAllAPs (ApCount); + } + + // + // Stop source level debug in BSP handler, the code below will not be + // debugged. + // + InitializeDebugAgent (DEBUG_AGENT_INIT_EXIT_SMI, NULL, NULL); + + // + // Signal APs to Reset states/semaphore for this processor + // + ReleaseAllAPs (); + + // + // Perform pending operations for hot-plug + // + SmmCpuUpdate (); + + // + // Clear the Present flag of BSP + // + mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE; + + // + // Gather APs to exit SMM synchronously. Note the Present flag is cleared by now but + // WaitForAllAps does not depend on the Present flag. + // + WaitForAllAPs (ApCount); + + // + // Reset BspIndex to -1, meaning BSP has not been elected. + // + if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) { + mSmmMpSyncData->BspIndex = (UINT32)-1; + } + + // + // Allow APs to check in from this point on + // + mSmmMpSyncData->Counter = 0; + mSmmMpSyncData->AllCpusInSync = FALSE; +} + +/** + SMI handler for AP. + + @param CpuIndex AP processor Index. + @param ValidSmi Indicates that current SMI is a valid SMI or not. + @param SyncMode SMM MP sync mode. + +**/ +VOID +APHandler ( + IN UINTN CpuIndex, + IN BOOLEAN ValidSmi, + IN SMM_CPU_SYNC_MODE SyncMode + ) +{ + UINT64 Timer; + UINTN BspIndex; + MTRR_SETTINGS Mtrrs; + + // + // Timeout BSP + // + for (Timer = StartSyncTimer (); + !IsSyncTimerTimeout (Timer) && + !mSmmMpSyncData->InsideSmm; + ) { + CpuPause (); + } + + if (!mSmmMpSyncData->InsideSmm) { + // + // BSP timeout in the first round + // + if (mSmmMpSyncData->BspIndex != -1) { + // + // BSP Index is known + // + BspIndex = mSmmMpSyncData->BspIndex; + ASSERT (CpuIndex != BspIndex); + + // + // Send SMI IPI to bring BSP in + // + SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[BspIndex].ProcessorId); + + // + // Now clock BSP for the 2nd time + // + for (Timer = StartSyncTimer (); + !IsSyncTimerTimeout (Timer) && + !mSmmMpSyncData->InsideSmm; + ) { + CpuPause (); + } + + if (!mSmmMpSyncData->InsideSmm) { + // + // Give up since BSP is unable to enter SMM + // and signal the completion of this AP + WaitForSemaphore (&mSmmMpSyncData->Counter); + return; + } + } else { + // + // Don't know BSP index. Give up without sending IPI to BSP. + // + WaitForSemaphore (&mSmmMpSyncData->Counter); + return; + } + } + + // + // BSP is available + // + BspIndex = mSmmMpSyncData->BspIndex; + ASSERT (CpuIndex != BspIndex); + + // + // Mark this processor's presence + // + mSmmMpSyncData->CpuData[CpuIndex].Present = TRUE; + + if (SyncMode == SmmCpuSyncModeTradition || SmmCpuFeaturesNeedConfigureMtrrs()) { + // + // Notify BSP of arrival at this point + // + ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + } + + if (SmmCpuFeaturesNeedConfigureMtrrs()) { + // + // Wait for the signal from BSP to backup MTRRs + // + WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + + // + // Backup OS MTRRs + // + MtrrGetAllMtrrs(&Mtrrs); + + // + // Signal BSP the completion of this AP + // + ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + + // + // Wait for BSP's signal to program MTRRs + // + WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + + // + // Replace OS MTRRs with SMI MTRRs + // + ReplaceOSMtrrs (CpuIndex); + + // + // Signal BSP the completion of this AP + // + ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + } + + while (TRUE) { + // + // Wait for something to happen + // + WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + + // + // Check if BSP wants to exit SMM + // + if (!mSmmMpSyncData->InsideSmm) { + break; + } + + // + // BUSY should be acquired by SmmStartupThisAp() + // + ASSERT ( + !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy) + ); + + // + // Invoke the scheduled procedure + // + (*mSmmMpSyncData->CpuData[CpuIndex].Procedure) ( + (VOID*)mSmmMpSyncData->CpuData[CpuIndex].Parameter + ); + + // + // Release BUSY + // + ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy); + } + + if (SmmCpuFeaturesNeedConfigureMtrrs()) { + // + // Notify BSP the readiness of this AP to program MTRRs + // + ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + + // + // Wait for the signal from BSP to program MTRRs + // + WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + + // + // Restore OS MTRRs + // + SmmCpuFeaturesReenableSmrr (); + MtrrSetAllMtrrs(&Mtrrs); + } + + // + // Notify BSP the readiness of this AP to Reset states/semaphore for this processor + // + ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + + // + // Wait for the signal from BSP to Reset states/semaphore for this processor + // + WaitForSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + + // + // Reset states/semaphore for this processor + // + mSmmMpSyncData->CpuData[CpuIndex].Present = FALSE; + + // + // Notify BSP the readiness of this AP to exit SMM + // + ReleaseSemaphore (&mSmmMpSyncData->CpuData[BspIndex].Run); + +} + +/** + Create 4G PageTable in SMRAM. + + @param ExtraPages Additional page numbers besides for 4G memory + @return PageTable Address + +**/ +UINT32 +Gen4GPageTable ( + IN UINTN ExtraPages + ) +{ + VOID *PageTable; + UINTN Index; + UINT64 *Pte; + UINTN PagesNeeded; + UINTN Low2MBoundary; + UINTN High2MBoundary; + UINTN Pages; + UINTN GuardPage; + UINT64 *Pdpte; + UINTN PageIndex; + UINTN PageAddress; + + Low2MBoundary = 0; + High2MBoundary = 0; + PagesNeeded = 0; + if (FeaturePcdGet (PcdCpuSmmStackGuard)) { + // + // Add one more page for known good stack, then find the lower 2MB aligned address. + // + Low2MBoundary = (mSmmStackArrayBase + EFI_PAGE_SIZE) & ~(SIZE_2MB-1); + // + // Add two more pages for known good stack and stack guard page, + // then find the lower 2MB aligned address. + // + High2MBoundary = (mSmmStackArrayEnd - mSmmStackSize + EFI_PAGE_SIZE * 2) & ~(SIZE_2MB-1); + PagesNeeded = ((High2MBoundary - Low2MBoundary) / SIZE_2MB) + 1; + } + // + // Allocate the page table + // + PageTable = AllocatePages (ExtraPages + 5 + PagesNeeded); + ASSERT (PageTable != NULL); + + PageTable = (VOID *)((UINTN)PageTable + EFI_PAGES_TO_SIZE (ExtraPages)); + Pte = (UINT64*)PageTable; + + // + // Zero out all page table entries first + // + ZeroMem (Pte, EFI_PAGES_TO_SIZE (1)); + + // + // Set Page Directory Pointers + // + for (Index = 0; Index < 4; Index++) { + Pte[Index] = (UINTN)PageTable + EFI_PAGE_SIZE * (Index + 1) + IA32_PG_P; + } + Pte += EFI_PAGE_SIZE / sizeof (*Pte); + + // + // Fill in Page Directory Entries + // + for (Index = 0; Index < EFI_PAGE_SIZE * 4 / sizeof (*Pte); Index++) { + Pte[Index] = (Index << 21) + IA32_PG_PS + IA32_PG_RW + IA32_PG_P; + } + + if (FeaturePcdGet (PcdCpuSmmStackGuard)) { + Pages = (UINTN)PageTable + EFI_PAGES_TO_SIZE (5); + GuardPage = mSmmStackArrayBase + EFI_PAGE_SIZE; + Pdpte = (UINT64*)PageTable; + for (PageIndex = Low2MBoundary; PageIndex <= High2MBoundary; PageIndex += SIZE_2MB) { + Pte = (UINT64*)(UINTN)(Pdpte[BitFieldRead32 ((UINT32)PageIndex, 30, 31)] & ~(EFI_PAGE_SIZE - 1)); + Pte[BitFieldRead32 ((UINT32)PageIndex, 21, 29)] = (UINT64)Pages + IA32_PG_RW + IA32_PG_P; + // + // Fill in Page Table Entries + // + Pte = (UINT64*)Pages; + PageAddress = PageIndex; + for (Index = 0; Index < EFI_PAGE_SIZE / sizeof (*Pte); Index++) { + if (PageAddress == GuardPage) { + // + // Mark the guard page as non-present + // + Pte[Index] = PageAddress; + GuardPage += mSmmStackSize; + if (GuardPage > mSmmStackArrayEnd) { + GuardPage = 0; + } + } else { + Pte[Index] = PageAddress + IA32_PG_RW + IA32_PG_P; + } + PageAddress+= EFI_PAGE_SIZE; + } + Pages += EFI_PAGE_SIZE; + } + } + + return (UINT32)(UINTN)PageTable; +} + +/** + Set memory cache ability. + + @param PageTable PageTable Address + @param Address Memory Address to change cache ability + @param Cacheability Cache ability to set + +**/ +VOID +SetCacheability ( + IN UINT64 *PageTable, + IN UINTN Address, + IN UINT8 Cacheability + ) +{ + UINTN PTIndex; + VOID *NewPageTableAddress; + UINT64 *NewPageTable; + UINTN Index; + + ASSERT ((Address & EFI_PAGE_MASK) == 0); + + if (sizeof (UINTN) == sizeof (UINT64)) { + PTIndex = (UINTN)RShiftU64 (Address, 39) & 0x1ff; + ASSERT (PageTable[PTIndex] & IA32_PG_P); + PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask); + } + + PTIndex = (UINTN)RShiftU64 (Address, 30) & 0x1ff; + ASSERT (PageTable[PTIndex] & IA32_PG_P); + PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask); + + // + // A perfect implementation should check the original cacheability with the + // one being set, and break a 2M page entry into pieces only when they + // disagreed. + // + PTIndex = (UINTN)RShiftU64 (Address, 21) & 0x1ff; + if ((PageTable[PTIndex] & IA32_PG_PS) != 0) { + // + // Allocate a page from SMRAM + // + NewPageTableAddress = AllocatePages (1); + ASSERT (NewPageTableAddress != NULL); + + NewPageTable = (UINT64 *)NewPageTableAddress; + + for (Index = 0; Index < 0x200; Index++) { + NewPageTable[Index] = PageTable[PTIndex]; + if ((NewPageTable[Index] & IA32_PG_PAT_2M) != 0) { + NewPageTable[Index] &= ~((UINT64)IA32_PG_PAT_2M); + NewPageTable[Index] |= (UINT64)IA32_PG_PAT_4K; + } + NewPageTable[Index] |= (UINT64)(Index << EFI_PAGE_SHIFT); + } + + PageTable[PTIndex] = ((UINTN)NewPageTableAddress & gPhyMask) | IA32_PG_P; + } + + ASSERT (PageTable[PTIndex] & IA32_PG_P); + PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & gPhyMask); + + PTIndex = (UINTN)RShiftU64 (Address, 12) & 0x1ff; + ASSERT (PageTable[PTIndex] & IA32_PG_P); + PageTable[PTIndex] &= ~((UINT64)((IA32_PG_PAT_4K | IA32_PG_CD | IA32_PG_WT))); + PageTable[PTIndex] |= (UINT64)Cacheability; +} + + +/** + Schedule a procedure to run on the specified CPU. + + @param Procedure The address of the procedure to run + @param CpuIndex Target CPU Index + @param ProcArguments The parameter to pass to the procedure + + @retval EFI_INVALID_PARAMETER CpuNumber not valid + @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy + @retval EFI_SUCCESS The procedure has been successfully scheduled + +**/ +EFI_STATUS +EFIAPI +SmmStartupThisAp ( + IN EFI_AP_PROCEDURE Procedure, + IN UINTN CpuIndex, + IN OUT VOID *ProcArguments OPTIONAL + ) +{ + if (CpuIndex >= gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus || + CpuIndex == gSmmCpuPrivate->SmmCoreEntryContext.CurrentlyExecutingCpu || + !mSmmMpSyncData->CpuData[CpuIndex].Present || + gSmmCpuPrivate->Operation[CpuIndex] == SmmCpuRemove || + !AcquireSpinLockOrFail (&mSmmMpSyncData->CpuData[CpuIndex].Busy)) { + return EFI_INVALID_PARAMETER; + } + + mSmmMpSyncData->CpuData[CpuIndex].Procedure = Procedure; + mSmmMpSyncData->CpuData[CpuIndex].Parameter = ProcArguments; + ReleaseSemaphore (&mSmmMpSyncData->CpuData[CpuIndex].Run); + + if (FeaturePcdGet (PcdCpuSmmBlockStartupThisAp)) { + AcquireSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy); + ReleaseSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy); + } + return EFI_SUCCESS; +} + +/** + C function for SMI entry, each processor comes here upon SMI trigger. + + @param CpuIndex CPU Index + +**/ +VOID +EFIAPI +SmiRendezvous ( + IN UINTN CpuIndex + ) +{ + EFI_STATUS Status; + BOOLEAN ValidSmi; + BOOLEAN IsBsp; + BOOLEAN BspInProgress; + UINTN Index; + UINTN Cr2; + + // + // Save Cr2 because Page Fault exception in SMM may override its value + // + Cr2 = AsmReadCr2 (); + + // + // Perform CPU specific entry hooks + // + SmmCpuFeaturesRendezvousEntry (CpuIndex); + + // + // Determine if this is a valid SMI + // + ValidSmi = PlatformValidSmi(); + + // + // Determine if BSP has been already in progress. Note this must be checked after + // ValidSmi because BSP may clear a valid SMI source after checking in. + // + BspInProgress = mSmmMpSyncData->InsideSmm; + + if (!BspInProgress && !ValidSmi) { + // + // If we reach here, it means when we sampled the ValidSmi flag, SMI status had not + // been cleared by BSP in a new SMI run (so we have a truly invalid SMI), or SMI + // status had been cleared by BSP and an existing SMI run has almost ended. (Note + // we sampled ValidSmi flag BEFORE judging BSP-in-progress status.) In both cases, there + // is nothing we need to do. + // + goto Exit; + } else { + // + // Signal presence of this processor + // + if (ReleaseSemaphore (&mSmmMpSyncData->Counter) == 0) { + // + // BSP has already ended the synchronization, so QUIT!!! + // + + // + // Wait for BSP's signal to finish SMI + // + while (mSmmMpSyncData->AllCpusInSync) { + CpuPause (); + } + goto Exit; + } else { + + // + // The BUSY lock is initialized to Released state. + // This needs to be done early enough to be ready for BSP's SmmStartupThisAp() call. + // E.g., with Relaxed AP flow, SmmStartupThisAp() may be called immediately + // after AP's present flag is detected. + // + InitializeSpinLock (&mSmmMpSyncData->CpuData[CpuIndex].Busy); + } + + // + // Try to enable NX + // + if (mXdSupported) { + ActivateXd (); + } + + if (FeaturePcdGet (PcdCpuSmmProfileEnable)) { + ActivateSmmProfile (CpuIndex); + } + + if (BspInProgress) { + // + // BSP has been elected. Follow AP path, regardless of ValidSmi flag + // as BSP may have cleared the SMI status + // + APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode); + } else { + // + // We have a valid SMI + // + + // + // Elect BSP + // + IsBsp = FALSE; + if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) { + if (!mSmmMpSyncData->SwitchBsp || mSmmMpSyncData->CandidateBsp[CpuIndex]) { + // + // Call platform hook to do BSP election + // + Status = PlatformSmmBspElection (&IsBsp); + if (EFI_SUCCESS == Status) { + // + // Platform hook determines successfully + // + if (IsBsp) { + mSmmMpSyncData->BspIndex = (UINT32)CpuIndex; + } + } else { + // + // Platform hook fails to determine, use default BSP election method + // + InterlockedCompareExchange32 ( + (UINT32*)&mSmmMpSyncData->BspIndex, + (UINT32)-1, + (UINT32)CpuIndex + ); + } + } + } + + // + // "mSmmMpSyncData->BspIndex == CpuIndex" means this is the BSP + // + if (mSmmMpSyncData->BspIndex == CpuIndex) { + + // + // Clear last request for SwitchBsp. + // + if (mSmmMpSyncData->SwitchBsp) { + mSmmMpSyncData->SwitchBsp = FALSE; + for (Index = 0; Index < mMaxNumberOfCpus; Index++) { + mSmmMpSyncData->CandidateBsp[Index] = FALSE; + } + } + + if (FeaturePcdGet (PcdCpuSmmProfileEnable)) { + SmmProfileRecordSmiNum (); + } + + // + // BSP Handler is always called with a ValidSmi == TRUE + // + BSPHandler (CpuIndex, mSmmMpSyncData->EffectiveSyncMode); + + } else { + APHandler (CpuIndex, ValidSmi, mSmmMpSyncData->EffectiveSyncMode); + } + } + + ASSERT (mSmmMpSyncData->CpuData[CpuIndex].Run == 0); + + // + // Wait for BSP's signal to exit SMI + // + while (mSmmMpSyncData->AllCpusInSync) { + CpuPause (); + } + } + +Exit: + SmmCpuFeaturesRendezvousExit (CpuIndex); + // + // Restore Cr2 + // + AsmWriteCr2 (Cr2); +} + + +/** + Initialize un-cacheable data. + +**/ +VOID +EFIAPI +InitializeMpSyncData ( + VOID + ) +{ + if (mSmmMpSyncData != NULL) { + ZeroMem (mSmmMpSyncData, mSmmMpSyncDataSize); + mSmmMpSyncData->CpuData = (SMM_CPU_DATA_BLOCK *)((UINT8 *)mSmmMpSyncData + sizeof (SMM_DISPATCHER_MP_SYNC_DATA)); + mSmmMpSyncData->CandidateBsp = (BOOLEAN *)(mSmmMpSyncData->CpuData + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus); + if (FeaturePcdGet (PcdCpuSmmEnableBspElection)) { + // + // Enable BSP election by setting BspIndex to -1 + // + mSmmMpSyncData->BspIndex = (UINT32)-1; + } + mSmmMpSyncData->EffectiveSyncMode = (SMM_CPU_SYNC_MODE) PcdGet8 (PcdCpuSmmSyncMode); + } +} + +/** + Initialize global data for MP synchronization. + + @param Stacks Base address of SMI stack buffer for all processors. + @param StackSize Stack size for each processor in SMM. + +**/ +UINT32 +InitializeMpServiceData ( + IN VOID *Stacks, + IN UINTN StackSize + ) +{ + UINT32 Cr3; + UINTN Index; + MTRR_SETTINGS *Mtrr; + PROCESSOR_SMM_DESCRIPTOR *Psd; + UINTN GdtTssTableSize; + UINT8 *GdtTssTables; + IA32_SEGMENT_DESCRIPTOR *GdtDescriptor; + UINTN TssBase; + UINTN GdtTableStepSize; + + // + // Initialize physical address mask + // NOTE: Physical memory above virtual address limit is not supported !!! + // + AsmCpuid (0x80000008, (UINT32*)&Index, NULL, NULL, NULL); + gPhyMask = LShiftU64 (1, (UINT8)Index) - 1; + gPhyMask &= (1ull << 48) - EFI_PAGE_SIZE; + + // + // Create page tables + // + Cr3 = SmmInitPageTable (); + + GdtTssTables = NULL; + GdtTssTableSize = 0; + GdtTableStepSize = 0; + // + // For X64 SMM, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention + // on each SMI entry. + // + if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED(EFI_IMAGE_MACHINE_X64)) { + GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE + 7) & ~7; // 8 bytes aligned + GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus)); + ASSERT (GdtTssTables != NULL); + GdtTableStepSize = GdtTssTableSize; + + for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) { + CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE); + if (FeaturePcdGet (PcdCpuSmmStackGuard)) { + // + // Setup top of known good stack as IST1 for each processor. + // + *(UINTN *)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1 + TSS_X64_IST1_OFFSET) = (mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize); + } + } + } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) { + + // + // For IA32 SMM, if SMM Stack Guard feature is enabled, we use 2 TSS. + // in this case, we allocate separate GDT/TSS for each CPUs to avoid TSS load contention + // on each SMI entry. + // + + // + // Enlarge GDT to contain 2 TSS descriptors + // + gcSmiGdtr.Limit += (UINT16)(2 * sizeof (IA32_SEGMENT_DESCRIPTOR)); + + GdtTssTableSize = (gcSmiGdtr.Limit + 1 + TSS_SIZE * 2 + 7) & ~7; // 8 bytes aligned + GdtTssTables = (UINT8*)AllocatePages (EFI_SIZE_TO_PAGES (GdtTssTableSize * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus)); + ASSERT (GdtTssTables != NULL); + GdtTableStepSize = GdtTssTableSize; + + for (Index = 0; Index < gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; Index++) { + CopyMem (GdtTssTables + GdtTableStepSize * Index, (VOID*)(UINTN)gcSmiGdtr.Base, gcSmiGdtr.Limit + 1 + TSS_SIZE * 2); + // + // Fixup TSS descriptors + // + TssBase = (UINTN)(GdtTssTables + GdtTableStepSize * Index + gcSmiGdtr.Limit + 1); + GdtDescriptor = (IA32_SEGMENT_DESCRIPTOR *)(TssBase) - 2; + GdtDescriptor->Bits.BaseLow = (UINT16)TssBase; + GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16); + GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24); + + TssBase += TSS_SIZE; + GdtDescriptor++; + GdtDescriptor->Bits.BaseLow = (UINT16)TssBase; + GdtDescriptor->Bits.BaseMid = (UINT8)(TssBase >> 16); + GdtDescriptor->Bits.BaseHigh = (UINT8)(TssBase >> 24); + // + // Fixup TSS segments + // + // ESP as known good stack + // + *(UINTN *)(TssBase + TSS_IA32_ESP_OFFSET) = mSmmStackArrayBase + EFI_PAGE_SIZE + Index * mSmmStackSize; + *(UINT32 *)(TssBase + TSS_IA32_CR3_OFFSET) = Cr3; + } + } + + // + // Initialize PROCESSOR_SMM_DESCRIPTOR for each CPU + // + for (Index = 0; Index < mMaxNumberOfCpus; Index++) { + Psd = (PROCESSOR_SMM_DESCRIPTOR *)(VOID *)(UINTN)(mCpuHotPlugData.SmBase[Index] + SMM_PSD_OFFSET); + CopyMem (Psd, &gcPsd, sizeof (gcPsd)); + if (EFI_IMAGE_MACHINE_TYPE_SUPPORTED (EFI_IMAGE_MACHINE_X64)) { + // + // For X64 SMM, set GDT to the copy allocated above. + // + Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index); + } else if (FeaturePcdGet (PcdCpuSmmStackGuard)) { + // + // For IA32 SMM, if SMM Stack Guard feature is enabled, set GDT to the copy allocated above. + // + Psd->SmmGdtPtr = (UINT64)(UINTN)(GdtTssTables + GdtTableStepSize * Index); + Psd->SmmGdtSize = gcSmiGdtr.Limit + 1; + } + + // + // Install SMI handler + // + InstallSmiHandler ( + Index, + (UINT32)mCpuHotPlugData.SmBase[Index], + (VOID*)((UINTN)Stacks + (StackSize * Index)), + StackSize, + (UINTN)Psd->SmmGdtPtr, + Psd->SmmGdtSize, + gcSmiIdtr.Base, + gcSmiIdtr.Limit + 1, + Cr3 + ); + } + + // + // Initialize mSmmMpSyncData + // + mSmmMpSyncDataSize = sizeof (SMM_DISPATCHER_MP_SYNC_DATA) + + (sizeof (SMM_CPU_DATA_BLOCK) + sizeof (BOOLEAN)) * gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; + mSmmMpSyncData = (SMM_DISPATCHER_MP_SYNC_DATA*) AllocatePages (EFI_SIZE_TO_PAGES (mSmmMpSyncDataSize)); + ASSERT (mSmmMpSyncData != NULL); + InitializeMpSyncData (); + + // + // Record current MTRR settings + // + ZeroMem(gSmiMtrrs, sizeof (gSmiMtrrs)); + Mtrr = (MTRR_SETTINGS*)gSmiMtrrs; + MtrrGetAllMtrrs (Mtrr); + + return Cr3; +} + +/** + + Register the SMM Foundation entry point. + + @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance + @param SmmEntryPoint SMM Foundation EntryPoint + + @retval EFI_SUCCESS Successfully to register SMM foundation entry point + +**/ +EFI_STATUS +EFIAPI +RegisterSmmEntry ( + IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This, + IN EFI_SMM_ENTRY_POINT SmmEntryPoint + ) +{ + // + // Record SMM Foundation EntryPoint, later invoke it on SMI entry vector. + // + gSmmCpuPrivate->SmmCoreEntry = SmmEntryPoint; + return EFI_SUCCESS; +} diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c new file mode 100644 index 0000000000..0e39173cbb --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.c @@ -0,0 +1,1489 @@ +/** @file +Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU. + +Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#include "PiSmmCpuDxeSmm.h" + +// +// SMM CPU Private Data structure that contains SMM Configuration Protocol +// along its supporting fields. +// +SMM_CPU_PRIVATE_DATA mSmmCpuPrivateData = { + SMM_CPU_PRIVATE_DATA_SIGNATURE, // Signature + NULL, // SmmCpuHandle + NULL, // Pointer to ProcessorInfo array + NULL, // Pointer to Operation array + NULL, // Pointer to CpuSaveStateSize array + NULL, // Pointer to CpuSaveState array + { {0} }, // SmmReservedSmramRegion + { + SmmStartupThisAp, // SmmCoreEntryContext.SmmStartupThisAp + 0, // SmmCoreEntryContext.CurrentlyExecutingCpu + 0, // SmmCoreEntryContext.NumberOfCpus + NULL, // SmmCoreEntryContext.CpuSaveStateSize + NULL // SmmCoreEntryContext.CpuSaveState + }, + NULL, // SmmCoreEntry + { + mSmmCpuPrivateData.SmmReservedSmramRegion, // SmmConfiguration.SmramReservedRegions + RegisterSmmEntry // SmmConfiguration.RegisterSmmEntry + }, +}; + +CPU_HOT_PLUG_DATA mCpuHotPlugData = { + CPU_HOT_PLUG_DATA_REVISION_1, // Revision + 0, // Array Length of SmBase and APIC ID + NULL, // Pointer to APIC ID array + NULL, // Pointer to SMBASE array + 0, // Reserved + 0, // SmrrBase + 0 // SmrrSize +}; + +// +// Global pointer used to access mSmmCpuPrivateData from outside and inside SMM +// +SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate = &mSmmCpuPrivateData; + +// +// SMM Relocation variables +// +volatile BOOLEAN *mRebased; +volatile BOOLEAN mIsBsp; + +/// +/// Handle for the SMM CPU Protocol +/// +EFI_HANDLE mSmmCpuHandle = NULL; + +/// +/// SMM CPU Protocol instance +/// +EFI_SMM_CPU_PROTOCOL mSmmCpu = { + SmmReadSaveState, + SmmWriteSaveState +}; + +EFI_CPU_INTERRUPT_HANDLER mExternalVectorTable[EXCEPTION_VECTOR_NUMBER]; + +/// +/// SMM CPU Save State Protocol instance +/// +EFI_SMM_CPU_SAVE_STATE_PROTOCOL mSmmCpuSaveState = { + NULL +}; + +// +// SMM stack information +// +UINTN mSmmStackArrayBase; +UINTN mSmmStackArrayEnd; +UINTN mSmmStackSize; + +// +// Pointer to structure used during S3 Resume +// +SMM_S3_RESUME_STATE *mSmmS3ResumeState = NULL; + +UINTN mMaxNumberOfCpus = 1; +UINTN mNumberOfCpus = 1; + +// +// SMM ready to lock flag +// +BOOLEAN mSmmReadyToLock = FALSE; + +// +// Global used to cache PCD for SMM Code Access Check enable +// +BOOLEAN mSmmCodeAccessCheckEnable = FALSE; + +// +// Spin lock used to serialize setting of SMM Code Access Check feature +// +SPIN_LOCK mConfigSmmCodeAccessCheckLock; + +/** + Initialize IDT to setup exception handlers for SMM. + +**/ +VOID +InitializeSmmIdt ( + VOID + ) +{ + EFI_STATUS Status; + BOOLEAN InterruptState; + IA32_DESCRIPTOR DxeIdtr; + // + // Disable Interrupt and save DXE IDT table + // + InterruptState = SaveAndDisableInterrupts (); + AsmReadIdtr (&DxeIdtr); + // + // Load SMM temporary IDT table + // + AsmWriteIdtr (&gcSmiIdtr); + // + // Setup SMM default exception handlers, SMM IDT table + // will be updated and saved in gcSmiIdtr + // + Status = InitializeCpuExceptionHandlers (NULL); + ASSERT_EFI_ERROR (Status); + // + // Restore DXE IDT table and CPU interrupt + // + AsmWriteIdtr ((IA32_DESCRIPTOR *) &DxeIdtr); + SetInterruptState (InterruptState); +} + +/** + Search module name by input IP address and output it. + + @param CallerIpAddress Caller instruction pointer. + +**/ +VOID +DumpModuleInfoByIp ( + IN UINTN CallerIpAddress + ) +{ + UINTN Pe32Data; + EFI_IMAGE_DOS_HEADER *DosHdr; + EFI_IMAGE_OPTIONAL_HEADER_PTR_UNION Hdr; + VOID *PdbPointer; + UINT64 DumpIpAddress; + + // + // Find Image Base + // + Pe32Data = CallerIpAddress & ~(SIZE_4KB - 1); + while (Pe32Data != 0) { + DosHdr = (EFI_IMAGE_DOS_HEADER *) Pe32Data; + if (DosHdr->e_magic == EFI_IMAGE_DOS_SIGNATURE) { + // + // DOS image header is present, so read the PE header after the DOS image header. + // + Hdr.Pe32 = (EFI_IMAGE_NT_HEADERS32 *)(Pe32Data + (UINTN) ((DosHdr->e_lfanew) & 0x0ffff)); + // + // Make sure PE header address does not overflow and is less than the initial address. + // + if (((UINTN)Hdr.Pe32 > Pe32Data) && ((UINTN)Hdr.Pe32 < CallerIpAddress)) { + if (Hdr.Pe32->Signature == EFI_IMAGE_NT_SIGNATURE) { + // + // It's PE image. + // + break; + } + } + } + + // + // Not found the image base, check the previous aligned address + // + Pe32Data -= SIZE_4KB; + } + + DumpIpAddress = CallerIpAddress; + DEBUG ((EFI_D_ERROR, "It is invoked from the instruction before IP(0x%lx)", DumpIpAddress)); + + if (Pe32Data != 0) { + PdbPointer = PeCoffLoaderGetPdbPointer ((VOID *) Pe32Data); + if (PdbPointer != NULL) { + DEBUG ((EFI_D_ERROR, " in module (%a)", PdbPointer)); + } + } +} + +/** + Read information from the CPU save state. + + @param This EFI_SMM_CPU_PROTOCOL instance + @param Width The number of bytes to read from the CPU save state. + @param Register Specifies the CPU register to read form the save state. + @param CpuIndex Specifies the zero-based index of the CPU save state. + @param Buffer Upon return, this holds the CPU register value read from the save state. + + @retval EFI_SUCCESS The register was read from Save State + @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor + @retval EFI_INVALID_PARAMTER This or Buffer is NULL. + +**/ +EFI_STATUS +EFIAPI +SmmReadSaveState ( + IN CONST EFI_SMM_CPU_PROTOCOL *This, + IN UINTN Width, + IN EFI_SMM_SAVE_STATE_REGISTER Register, + IN UINTN CpuIndex, + OUT VOID *Buffer + ) +{ + EFI_STATUS Status; + + // + // Retrieve pointer to the specified CPU's SMM Save State buffer + // + if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) { + return EFI_INVALID_PARAMETER; + } + + // + // Check for special EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID + // + if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) { + // + // The pseudo-register only supports the 64-bit size specified by Width. + // + if (Width != sizeof (UINT64)) { + return EFI_INVALID_PARAMETER; + } + // + // If the processor is in SMM at the time the SMI occurred, + // the pseudo register value for EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID is returned in Buffer. + // Otherwise, EFI_NOT_FOUND is returned. + // + if (mSmmMpSyncData->CpuData[CpuIndex].Present) { + *(UINT64 *)Buffer = gSmmCpuPrivate->ProcessorInfo[CpuIndex].ProcessorId; + return EFI_SUCCESS; + } else { + return EFI_NOT_FOUND; + } + } + + if (!mSmmMpSyncData->CpuData[CpuIndex].Present) { + return EFI_INVALID_PARAMETER; + } + + Status = SmmCpuFeaturesReadSaveStateRegister (CpuIndex, Register, Width, Buffer); + if (Status == EFI_UNSUPPORTED) { + Status = ReadSaveStateRegister (CpuIndex, Register, Width, Buffer); + } + return Status; +} + +/** + Write data to the CPU save state. + + @param This EFI_SMM_CPU_PROTOCOL instance + @param Width The number of bytes to read from the CPU save state. + @param Register Specifies the CPU register to write to the save state. + @param CpuIndex Specifies the zero-based index of the CPU save state + @param Buffer Upon entry, this holds the new CPU register value. + + @retval EFI_SUCCESS The register was written from Save State + @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor + @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct + +**/ +EFI_STATUS +EFIAPI +SmmWriteSaveState ( + IN CONST EFI_SMM_CPU_PROTOCOL *This, + IN UINTN Width, + IN EFI_SMM_SAVE_STATE_REGISTER Register, + IN UINTN CpuIndex, + IN CONST VOID *Buffer + ) +{ + EFI_STATUS Status; + + // + // Retrieve pointer to the specified CPU's SMM Save State buffer + // + if ((CpuIndex >= gSmst->NumberOfCpus) || (Buffer == NULL)) { + return EFI_INVALID_PARAMETER; + } + + // + // Writes to EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID are ignored + // + if (Register == EFI_SMM_SAVE_STATE_REGISTER_PROCESSOR_ID) { + return EFI_SUCCESS; + } + + if (!mSmmMpSyncData->CpuData[CpuIndex].Present) { + return EFI_INVALID_PARAMETER; + } + + Status = SmmCpuFeaturesWriteSaveStateRegister (CpuIndex, Register, Width, Buffer); + if (Status == EFI_UNSUPPORTED) { + Status = WriteSaveStateRegister (CpuIndex, Register, Width, Buffer); + } + return Status; +} + + +/** + C function for SMI handler. To change all processor's SMMBase Register. + +**/ +VOID +EFIAPI +SmmInitHandler ( + VOID + ) +{ + UINT32 ApicId; + UINTN Index; + + // + // Update SMM IDT entries' code segment and load IDT + // + AsmWriteIdtr (&gcSmiIdtr); + ApicId = GetApicId (); + + ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); + + for (Index = 0; Index < mNumberOfCpus; Index++) { + if (ApicId == (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) { + // + // Initialize SMM specific features on the currently executing CPU + // + SmmCpuFeaturesInitializeProcessor ( + Index, + mIsBsp, + gSmmCpuPrivate->ProcessorInfo, + &mCpuHotPlugData + ); + + if (mIsBsp) { + // + // BSP rebase is already done above. + // Initialize private data during S3 resume + // + InitializeMpSyncData (); + } + + // + // Hook return after RSM to set SMM re-based flag + // + SemaphoreHook (Index, &mRebased[Index]); + + return; + } + } + ASSERT (FALSE); +} + +/** + Relocate SmmBases for each processor. + + Execute on first boot and all S3 resumes + +**/ +VOID +EFIAPI +SmmRelocateBases ( + VOID + ) +{ + UINT8 BakBuf[BACK_BUF_SIZE]; + SMRAM_SAVE_STATE_MAP BakBuf2; + SMRAM_SAVE_STATE_MAP *CpuStatePtr; + UINT8 *U8Ptr; + UINT32 ApicId; + UINTN Index; + UINTN BspIndex; + + // + // Make sure the reserved size is large enough for procedure SmmInitTemplate. + // + ASSERT (sizeof (BakBuf) >= gcSmmInitSize); + + // + // Patch ASM code template with current CR0, CR3, and CR4 values + // + gSmmCr0 = (UINT32)AsmReadCr0 (); + gSmmCr3 = (UINT32)AsmReadCr3 (); + gSmmCr4 = (UINT32)AsmReadCr4 (); + + // + // Patch GDTR for SMM base relocation + // + gcSmiInitGdtr.Base = gcSmiGdtr.Base; + gcSmiInitGdtr.Limit = gcSmiGdtr.Limit; + + U8Ptr = (UINT8*)(UINTN)(SMM_DEFAULT_SMBASE + SMM_HANDLER_OFFSET); + CpuStatePtr = (SMRAM_SAVE_STATE_MAP *)(UINTN)(SMM_DEFAULT_SMBASE + SMRAM_SAVE_STATE_MAP_OFFSET); + + // + // Backup original contents at address 0x38000 + // + CopyMem (BakBuf, U8Ptr, sizeof (BakBuf)); + CopyMem (&BakBuf2, CpuStatePtr, sizeof (BakBuf2)); + + // + // Load image for relocation + // + CopyMem (U8Ptr, gcSmmInitTemplate, gcSmmInitSize); + + // + // Retrieve the local APIC ID of current processor + // + ApicId = GetApicId (); + + // + // Relocate SM bases for all APs + // This is APs' 1st SMI - rebase will be done here, and APs' default SMI handler will be overridden by gcSmmInitTemplate + // + mIsBsp = FALSE; + BspIndex = (UINTN)-1; + for (Index = 0; Index < mNumberOfCpus; Index++) { + mRebased[Index] = FALSE; + if (ApicId != (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId) { + SendSmiIpi ((UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId); + // + // Wait for this AP to finish its 1st SMI + // + while (!mRebased[Index]); + } else { + // + // BSP will be Relocated later + // + BspIndex = Index; + } + } + + // + // Relocate BSP's SMM base + // + ASSERT (BspIndex != (UINTN)-1); + mIsBsp = TRUE; + SendSmiIpi (ApicId); + // + // Wait for the BSP to finish its 1st SMI + // + while (!mRebased[BspIndex]); + + // + // Restore contents at address 0x38000 + // + CopyMem (CpuStatePtr, &BakBuf2, sizeof (BakBuf2)); + CopyMem (U8Ptr, BakBuf, sizeof (BakBuf)); +} + +/** + Perform SMM initialization for all processors in the S3 boot path. + + For a native platform, MP initialization in the S3 boot path is also performed in this function. +**/ +VOID +EFIAPI +SmmRestoreCpu ( + VOID + ) +{ + SMM_S3_RESUME_STATE *SmmS3ResumeState; + IA32_DESCRIPTOR Ia32Idtr; + IA32_DESCRIPTOR X64Idtr; + IA32_IDT_GATE_DESCRIPTOR IdtEntryTable[EXCEPTION_VECTOR_NUMBER]; + EFI_STATUS Status; + + DEBUG ((EFI_D_INFO, "SmmRestoreCpu()\n")); + + // + // See if there is enough context to resume PEI Phase + // + if (mSmmS3ResumeState == NULL) { + DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n")); + CpuDeadLoop (); + } + + SmmS3ResumeState = mSmmS3ResumeState; + ASSERT (SmmS3ResumeState != NULL); + + if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) { + // + // Save the IA32 IDT Descriptor + // + AsmReadIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr); + + // + // Setup X64 IDT table + // + ZeroMem (IdtEntryTable, sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32); + X64Idtr.Base = (UINTN) IdtEntryTable; + X64Idtr.Limit = (UINT16) (sizeof (IA32_IDT_GATE_DESCRIPTOR) * 32 - 1); + AsmWriteIdtr ((IA32_DESCRIPTOR *) &X64Idtr); + + // + // Setup the default exception handler + // + Status = InitializeCpuExceptionHandlers (NULL); + ASSERT_EFI_ERROR (Status); + + // + // Initialize Debug Agent to support source level debug + // + InitializeDebugAgent (DEBUG_AGENT_INIT_THUNK_PEI_IA32TOX64, (VOID *)&Ia32Idtr, NULL); + } + + // + // Do below CPU things for native platform only + // + if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) { + // + // Skip initialization if mAcpiCpuData is not valid + // + if (mAcpiCpuData.NumberOfCpus > 0) { + // + // First time microcode load and restore MTRRs + // + EarlyInitializeCpu (); + } + } + + // + // Restore SMBASE for BSP and all APs + // + SmmRelocateBases (); + + // + // Do below CPU things for native platform only + // + if (!FeaturePcdGet(PcdFrameworkCompatibilitySupport)) { + // + // Skip initialization if mAcpiCpuData is not valid + // + if (mAcpiCpuData.NumberOfCpus > 0) { + // + // Restore MSRs for BSP and all APs + // + InitializeCpu (); + } + } + + // + // Set a flag to restore SMM configuration in S3 path. + // + mRestoreSmmConfigurationInS3 = TRUE; + + DEBUG (( EFI_D_INFO, "SMM S3 Return CS = %x\n", SmmS3ResumeState->ReturnCs)); + DEBUG (( EFI_D_INFO, "SMM S3 Return Entry Point = %x\n", SmmS3ResumeState->ReturnEntryPoint)); + DEBUG (( EFI_D_INFO, "SMM S3 Return Context1 = %x\n", SmmS3ResumeState->ReturnContext1)); + DEBUG (( EFI_D_INFO, "SMM S3 Return Context2 = %x\n", SmmS3ResumeState->ReturnContext2)); + DEBUG (( EFI_D_INFO, "SMM S3 Return Stack Pointer = %x\n", SmmS3ResumeState->ReturnStackPointer)); + + // + // If SMM is in 32-bit mode, then use SwitchStack() to resume PEI Phase + // + if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_32) { + DEBUG ((EFI_D_INFO, "Call SwitchStack() to return to S3 Resume in PEI Phase\n")); + + SwitchStack ( + (SWITCH_STACK_ENTRY_POINT)(UINTN)SmmS3ResumeState->ReturnEntryPoint, + (VOID *)(UINTN)SmmS3ResumeState->ReturnContext1, + (VOID *)(UINTN)SmmS3ResumeState->ReturnContext2, + (VOID *)(UINTN)SmmS3ResumeState->ReturnStackPointer + ); + } + + // + // If SMM is in 64-bit mode, then use AsmDisablePaging64() to resume PEI Phase + // + if (SmmS3ResumeState->Signature == SMM_S3_RESUME_SMM_64) { + DEBUG ((EFI_D_INFO, "Call AsmDisablePaging64() to return to S3 Resume in PEI Phase\n")); + // + // Disable interrupt of Debug timer, since new IDT table is for IA32 and will not work in long mode. + // + SaveAndSetDebugTimerInterrupt (FALSE); + // + // Restore IA32 IDT table + // + AsmWriteIdtr ((IA32_DESCRIPTOR *) &Ia32Idtr); + AsmDisablePaging64 ( + SmmS3ResumeState->ReturnCs, + (UINT32)SmmS3ResumeState->ReturnEntryPoint, + (UINT32)SmmS3ResumeState->ReturnContext1, + (UINT32)SmmS3ResumeState->ReturnContext2, + (UINT32)SmmS3ResumeState->ReturnStackPointer + ); + } + + // + // Can not resume PEI Phase + // + DEBUG ((EFI_D_ERROR, "No context to return to PEI Phase\n")); + CpuDeadLoop (); +} + +/** + Copy register table from ACPI NVS memory into SMRAM. + + @param[in] DestinationRegisterTableList Points to destination register table. + @param[in] SourceRegisterTableList Points to source register table. + @param[in] NumberOfCpus Number of CPUs. + +**/ +VOID +CopyRegisterTable ( + IN CPU_REGISTER_TABLE *DestinationRegisterTableList, + IN CPU_REGISTER_TABLE *SourceRegisterTableList, + IN UINT32 NumberOfCpus + ) +{ + UINTN Index; + UINTN Index1; + CPU_REGISTER_TABLE_ENTRY *RegisterTableEntry; + + CopyMem (DestinationRegisterTableList, SourceRegisterTableList, NumberOfCpus * sizeof (CPU_REGISTER_TABLE)); + for (Index = 0; Index < NumberOfCpus; Index++) { + DestinationRegisterTableList[Index].RegisterTableEntry = AllocatePool (DestinationRegisterTableList[Index].AllocatedSize); + ASSERT (DestinationRegisterTableList[Index].RegisterTableEntry != NULL); + CopyMem (DestinationRegisterTableList[Index].RegisterTableEntry, SourceRegisterTableList[Index].RegisterTableEntry, DestinationRegisterTableList[Index].AllocatedSize); + // + // Go though all MSRs in register table to initialize MSR spin lock + // + RegisterTableEntry = DestinationRegisterTableList[Index].RegisterTableEntry; + for (Index1 = 0; Index1 < DestinationRegisterTableList[Index].TableLength; Index1++, RegisterTableEntry++) { + if ((RegisterTableEntry->RegisterType == Msr) && (RegisterTableEntry->ValidBitLength < 64)) { + // + // Initialize MSR spin lock only for those MSRs need bit field writing + // + InitMsrSpinLockByIndex (RegisterTableEntry->Index); + } + } + } +} + +/** + SMM Ready To Lock event notification handler. + + The CPU S3 data is copied to SMRAM for security and mSmmReadyToLock is set to + perform additional lock actions that must be performed from SMM on the next SMI. + + @param[in] Protocol Points to the protocol's unique identifier. + @param[in] Interface Points to the interface instance. + @param[in] Handle The handle on which the interface was installed. + + @retval EFI_SUCCESS Notification handler runs successfully. + **/ +EFI_STATUS +EFIAPI +SmmReadyToLockEventNotify ( + IN CONST EFI_GUID *Protocol, + IN VOID *Interface, + IN EFI_HANDLE Handle + ) +{ + ACPI_CPU_DATA *AcpiCpuData; + IA32_DESCRIPTOR *Gdtr; + IA32_DESCRIPTOR *Idtr; + + // + // Prevent use of mAcpiCpuData by initialize NumberOfCpus to 0 + // + mAcpiCpuData.NumberOfCpus = 0; + + // + // If FrameworkCompatibilitySspport is enabled, then do not copy CPU S3 Data into SMRAM + // + if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) { + goto Done; + } + + // + // If PcdCpuS3DataAddress was never set, then do not copy CPU S3 Data into SMRAM + // + AcpiCpuData = (ACPI_CPU_DATA *)(UINTN)PcdGet64 (PcdCpuS3DataAddress); + if (AcpiCpuData == 0) { + goto Done; + } + + // + // For a native platform, copy the CPU S3 data into SMRAM for use on CPU S3 Resume. + // + CopyMem (&mAcpiCpuData, AcpiCpuData, sizeof (mAcpiCpuData)); + + mAcpiCpuData.MtrrTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (MTRR_SETTINGS)); + ASSERT (mAcpiCpuData.MtrrTable != 0); + + CopyMem ((VOID *)(UINTN)mAcpiCpuData.MtrrTable, (VOID *)(UINTN)AcpiCpuData->MtrrTable, sizeof (MTRR_SETTINGS)); + + mAcpiCpuData.GdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR)); + ASSERT (mAcpiCpuData.GdtrProfile != 0); + + CopyMem ((VOID *)(UINTN)mAcpiCpuData.GdtrProfile, (VOID *)(UINTN)AcpiCpuData->GdtrProfile, sizeof (IA32_DESCRIPTOR)); + + mAcpiCpuData.IdtrProfile = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (sizeof (IA32_DESCRIPTOR)); + ASSERT (mAcpiCpuData.IdtrProfile != 0); + + CopyMem ((VOID *)(UINTN)mAcpiCpuData.IdtrProfile, (VOID *)(UINTN)AcpiCpuData->IdtrProfile, sizeof (IA32_DESCRIPTOR)); + + mAcpiCpuData.PreSmmInitRegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE)); + ASSERT (mAcpiCpuData.PreSmmInitRegisterTable != 0); + + CopyRegisterTable ( + (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.PreSmmInitRegisterTable, + (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->PreSmmInitRegisterTable, + mAcpiCpuData.NumberOfCpus + ); + + mAcpiCpuData.RegisterTable = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePool (mAcpiCpuData.NumberOfCpus * sizeof (CPU_REGISTER_TABLE)); + ASSERT (mAcpiCpuData.RegisterTable != 0); + + CopyRegisterTable ( + (CPU_REGISTER_TABLE *)(UINTN)mAcpiCpuData.RegisterTable, + (CPU_REGISTER_TABLE *)(UINTN)AcpiCpuData->RegisterTable, + mAcpiCpuData.NumberOfCpus + ); + + // + // Copy AP's GDT, IDT and Machine Check handler into SMRAM. + // + Gdtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.GdtrProfile; + Idtr = (IA32_DESCRIPTOR *)(UINTN)mAcpiCpuData.IdtrProfile; + + mGdtForAp = AllocatePool ((Gdtr->Limit + 1) + (Idtr->Limit + 1) + mAcpiCpuData.ApMachineCheckHandlerSize); + ASSERT (mGdtForAp != NULL); + mIdtForAp = (VOID *) ((UINTN)mGdtForAp + (Gdtr->Limit + 1)); + mMachineCheckHandlerForAp = (VOID *) ((UINTN)mIdtForAp + (Idtr->Limit + 1)); + + CopyMem (mGdtForAp, (VOID *)Gdtr->Base, Gdtr->Limit + 1); + CopyMem (mIdtForAp, (VOID *)Idtr->Base, Idtr->Limit + 1); + CopyMem (mMachineCheckHandlerForAp, (VOID *)(UINTN)mAcpiCpuData.ApMachineCheckHandlerBase, mAcpiCpuData.ApMachineCheckHandlerSize); + +Done: + // + // Set SMM ready to lock flag and return + // + mSmmReadyToLock = TRUE; + return EFI_SUCCESS; +} + +/** + The module Entry Point of the CPU SMM driver. + + @param ImageHandle The firmware allocated handle for the EFI image. + @param SystemTable A pointer to the EFI System Table. + + @retval EFI_SUCCESS The entry point is executed successfully. + @retval Other Some error occurs when executing this entry point. + +**/ +EFI_STATUS +EFIAPI +PiCpuSmmEntry ( + IN EFI_HANDLE ImageHandle, + IN EFI_SYSTEM_TABLE *SystemTable + ) +{ + EFI_STATUS Status; + EFI_MP_SERVICES_PROTOCOL *MpServices; + UINTN NumberOfEnabledProcessors; + UINTN Index; + VOID *Buffer; + UINTN TileSize; + VOID *GuidHob; + EFI_SMRAM_DESCRIPTOR *SmramDescriptor; + SMM_S3_RESUME_STATE *SmmS3ResumeState; + UINT8 *Stacks; + VOID *Registration; + UINT32 RegEax; + UINT32 RegEdx; + UINTN FamilyId; + UINTN ModelId; + UINT32 Cr3; + + // + // Initialize Debug Agent to support source level debug in SMM code + // + InitializeDebugAgent (DEBUG_AGENT_INIT_SMM, NULL, NULL); + + // + // Report the start of CPU SMM initialization. + // + REPORT_STATUS_CODE ( + EFI_PROGRESS_CODE, + EFI_COMPUTING_UNIT_HOST_PROCESSOR | EFI_CU_HP_PC_SMM_INIT + ); + + // + // Fix segment address of the long-mode-switch jump + // + if (sizeof (UINTN) == sizeof (UINT64)) { + gSmmJmpAddr.Segment = LONG_MODE_CODE_SEGMENT; + } + + // + // Find out SMRR Base and SMRR Size + // + FindSmramInfo (&mCpuHotPlugData.SmrrBase, &mCpuHotPlugData.SmrrSize); + + // + // Get MP Services Protocol + // + Status = SystemTable->BootServices->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices); + ASSERT_EFI_ERROR (Status); + + // + // Use MP Services Protocol to retrieve the number of processors and number of enabled processors + // + Status = MpServices->GetNumberOfProcessors (MpServices, &mNumberOfCpus, &NumberOfEnabledProcessors); + ASSERT_EFI_ERROR (Status); + ASSERT (mNumberOfCpus <= PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); + + // + // If support CPU hot plug, PcdCpuSmmEnableBspElection should be set to TRUE. + // A constant BSP index makes no sense because it may be hot removed. + // + DEBUG_CODE ( + if (FeaturePcdGet (PcdCpuHotPlugSupport)) { + + ASSERT (FeaturePcdGet (PcdCpuSmmEnableBspElection)); + } + ); + + // + // Save the PcdCpuSmmCodeAccessCheckEnable value into a global variable. + // + mSmmCodeAccessCheckEnable = PcdGetBool (PcdCpuSmmCodeAccessCheckEnable); + DEBUG ((EFI_D_INFO, "PcdCpuSmmCodeAccessCheckEnable = %d\n", mSmmCodeAccessCheckEnable)); + + // + // If support CPU hot plug, we need to allocate resources for possibly hot-added processors + // + if (FeaturePcdGet (PcdCpuHotPlugSupport)) { + mMaxNumberOfCpus = PcdGet32 (PcdCpuMaxLogicalProcessorNumber); + } else { + mMaxNumberOfCpus = mNumberOfCpus; + } + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus = mMaxNumberOfCpus; + + // + // The CPU save state and code for the SMI entry point are tiled within an SMRAM + // allocated buffer. The minimum size of this buffer for a uniprocessor system + // is 32 KB, because the entry point is SMBASE + 32KB, and CPU save state area + // just below SMBASE + 64KB. If more than one CPU is present in the platform, + // then the SMI entry point and the CPU save state areas can be tiles to minimize + // the total amount SMRAM required for all the CPUs. The tile size can be computed + // by adding the // CPU save state size, any extra CPU specific context, and + // the size of code that must be placed at the SMI entry point to transfer + // control to a C function in the native SMM execution mode. This size is + // rounded up to the nearest power of 2 to give the tile size for a each CPU. + // The total amount of memory required is the maximum number of CPUs that + // platform supports times the tile size. The picture below shows the tiling, + // where m is the number of tiles that fit in 32KB. + // + // +-----------------------------+ <-- 2^n offset from Base of allocated buffer + // | CPU m+1 Save State | + // +-----------------------------+ + // | CPU m+1 Extra Data | + // +-----------------------------+ + // | Padding | + // +-----------------------------+ + // | CPU 2m SMI Entry | + // +#############################+ <-- Base of allocated buffer + 64 KB + // | CPU m-1 Save State | + // +-----------------------------+ + // | CPU m-1 Extra Data | + // +-----------------------------+ + // | Padding | + // +-----------------------------+ + // | CPU 2m-1 SMI Entry | + // +=============================+ <-- 2^n offset from Base of allocated buffer + // | . . . . . . . . . . . . | + // +=============================+ <-- 2^n offset from Base of allocated buffer + // | CPU 2 Save State | + // +-----------------------------+ + // | CPU 2 Extra Data | + // +-----------------------------+ + // | Padding | + // +-----------------------------+ + // | CPU m+1 SMI Entry | + // +=============================+ <-- Base of allocated buffer + 32 KB + // | CPU 1 Save State | + // +-----------------------------+ + // | CPU 1 Extra Data | + // +-----------------------------+ + // | Padding | + // +-----------------------------+ + // | CPU m SMI Entry | + // +#############################+ <-- Base of allocated buffer + 32 KB == CPU 0 SMBASE + 64 KB + // | CPU 0 Save State | + // +-----------------------------+ + // | CPU 0 Extra Data | + // +-----------------------------+ + // | Padding | + // +-----------------------------+ + // | CPU m-1 SMI Entry | + // +=============================+ <-- 2^n offset from Base of allocated buffer + // | . . . . . . . . . . . . | + // +=============================+ <-- 2^n offset from Base of allocated buffer + // | Padding | + // +-----------------------------+ + // | CPU 1 SMI Entry | + // +=============================+ <-- 2^n offset from Base of allocated buffer + // | Padding | + // +-----------------------------+ + // | CPU 0 SMI Entry | + // +#############################+ <-- Base of allocated buffer == CPU 0 SMBASE + 32 KB + // + + // + // Retrieve CPU Family + // + AsmCpuid (CPUID_VERSION_INFO, &RegEax, NULL, NULL, &RegEdx); + FamilyId = (RegEax >> 8) & 0xf; + ModelId = (RegEax >> 4) & 0xf; + if (FamilyId == 0x06 || FamilyId == 0x0f) { + ModelId = ModelId | ((RegEax >> 12) & 0xf0); + } + + // + // Determine the mode of the CPU at the time an SMI occurs + // Intel(R) 64 and IA-32 Architectures Software Developer's Manual + // Volume 3C, Section 34.4.1.1 + // + mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT; + if ((RegEdx & BIT29) != 0) { + mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT; + } + if (FamilyId == 0x06) { + if (ModelId == 0x17 || ModelId == 0x0f || ModelId == 0x1c) { + mSmmSaveStateRegisterLma = EFI_SMM_SAVE_STATE_REGISTER_LMA_64BIT; + } + } + + // + // Compute tile size of buffer required to hold the CPU SMRAM Save State Map, extra CPU + // specific context in a PROCESSOR_SMM_DESCRIPTOR, and the SMI entry point. This size + // is rounded up to nearest power of 2. + // + TileSize = sizeof (SMRAM_SAVE_STATE_MAP) + sizeof (PROCESSOR_SMM_DESCRIPTOR) + GetSmiHandlerSize () - 1; + TileSize = 2 * GetPowerOfTwo32 ((UINT32)TileSize); + DEBUG ((EFI_D_INFO, "SMRAM TileSize = %08x\n", TileSize)); + + // + // If the TileSize is larger than space available for the SMI Handler of CPU[i], + // the PROCESSOR_SMM_DESCRIPTOR of CPU[i+1] and the SMRAM Save State Map of CPU[i+1], + // the ASSERT(). If this ASSERT() is triggered, then the SMI Handler size must be + // reduced. + // + ASSERT (TileSize <= (SMRAM_SAVE_STATE_MAP_OFFSET + sizeof (SMRAM_SAVE_STATE_MAP) - SMM_HANDLER_OFFSET)); + + // + // Allocate buffer for all of the tiles. + // + // Intel(R) 64 and IA-32 Architectures Software Developer's Manual + // Volume 3C, Section 34.11 SMBASE Relocation + // For Pentium and Intel486 processors, the SMBASE values must be + // aligned on a 32-KByte boundary or the processor will enter shutdown + // state during the execution of a RSM instruction. + // + // Intel486 processors: FamilyId is 4 + // Pentium processors : FamilyId is 5 + // + if ((FamilyId == 4) || (FamilyId == 5)) { + Buffer = AllocateAlignedPages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1)), SIZE_32KB); + } else { + Buffer = AllocatePages (EFI_SIZE_TO_PAGES (SIZE_32KB + TileSize * (mMaxNumberOfCpus - 1))); + } + ASSERT (Buffer != NULL); + + // + // Allocate buffer for pointers to array in SMM_CPU_PRIVATE_DATA. + // + gSmmCpuPrivate->ProcessorInfo = (EFI_PROCESSOR_INFORMATION *)AllocatePool (sizeof (EFI_PROCESSOR_INFORMATION) * mMaxNumberOfCpus); + ASSERT (gSmmCpuPrivate->ProcessorInfo != NULL); + + gSmmCpuPrivate->Operation = (SMM_CPU_OPERATION *)AllocatePool (sizeof (SMM_CPU_OPERATION) * mMaxNumberOfCpus); + ASSERT (gSmmCpuPrivate->Operation != NULL); + + gSmmCpuPrivate->CpuSaveStateSize = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus); + ASSERT (gSmmCpuPrivate->CpuSaveStateSize != NULL); + + gSmmCpuPrivate->CpuSaveState = (VOID **)AllocatePool (sizeof (VOID *) * mMaxNumberOfCpus); + ASSERT (gSmmCpuPrivate->CpuSaveState != NULL); + + mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveStateSize = gSmmCpuPrivate->CpuSaveStateSize; + mSmmCpuPrivateData.SmmCoreEntryContext.CpuSaveState = gSmmCpuPrivate->CpuSaveState; + mSmmCpuSaveState.CpuSaveState = (EFI_SMM_CPU_STATE **)gSmmCpuPrivate->CpuSaveState; + + // + // Allocate buffer for pointers to array in CPU_HOT_PLUG_DATA. + // + mCpuHotPlugData.ApicId = (UINT64 *)AllocatePool (sizeof (UINT64) * mMaxNumberOfCpus); + ASSERT (mCpuHotPlugData.ApicId != NULL); + mCpuHotPlugData.SmBase = (UINTN *)AllocatePool (sizeof (UINTN) * mMaxNumberOfCpus); + ASSERT (mCpuHotPlugData.SmBase != NULL); + mCpuHotPlugData.ArrayLength = (UINT32)mMaxNumberOfCpus; + + // + // Retrieve APIC ID of each enabled processor from the MP Services protocol. + // Also compute the SMBASE address, CPU Save State address, and CPU Save state + // size for each CPU in the platform + // + for (Index = 0; Index < mMaxNumberOfCpus; Index++) { + mCpuHotPlugData.SmBase[Index] = (UINTN)Buffer + Index * TileSize - SMM_HANDLER_OFFSET; + gSmmCpuPrivate->CpuSaveStateSize[Index] = sizeof(SMRAM_SAVE_STATE_MAP); + gSmmCpuPrivate->CpuSaveState[Index] = (VOID *)(mCpuHotPlugData.SmBase[Index] + SMRAM_SAVE_STATE_MAP_OFFSET); + gSmmCpuPrivate->Operation[Index] = SmmCpuNone; + + if (Index < mNumberOfCpus) { + Status = MpServices->GetProcessorInfo (MpServices, Index, &gSmmCpuPrivate->ProcessorInfo[Index]); + ASSERT_EFI_ERROR (Status); + mCpuHotPlugData.ApicId[Index] = gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId; + + DEBUG ((EFI_D_INFO, "CPU[%03x] APIC ID=%04x SMBASE=%08x SaveState=%08x Size=%08x\n", + Index, + (UINT32)gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId, + mCpuHotPlugData.SmBase[Index], + gSmmCpuPrivate->CpuSaveState[Index], + gSmmCpuPrivate->CpuSaveStateSize[Index] + )); + } else { + gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId = INVALID_APIC_ID; + mCpuHotPlugData.ApicId[Index] = INVALID_APIC_ID; + } + } + + // + // Allocate SMI stacks for all processors. + // + if (FeaturePcdGet (PcdCpuSmmStackGuard)) { + // + // 2 more pages is allocated for each processor. + // one is guard page and the other is known good stack. + // + // +-------------------------------------------+-----+-------------------------------------------+ + // | Known Good Stack | Guard Page | SMM Stack | ... | Known Good Stack | Guard Page | SMM Stack | + // +-------------------------------------------+-----+-------------------------------------------+ + // | | | | + // |<-------------- Processor 0 -------------->| |<-------------- Processor n -------------->| + // + mSmmStackSize = EFI_PAGES_TO_SIZE (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2); + Stacks = (UINT8 *) AllocatePages (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * (EFI_SIZE_TO_PAGES (PcdGet32 (PcdCpuSmmStackSize)) + 2)); + ASSERT (Stacks != NULL); + mSmmStackArrayBase = (UINTN)Stacks; + mSmmStackArrayEnd = mSmmStackArrayBase + gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize - 1; + } else { + mSmmStackSize = PcdGet32 (PcdCpuSmmStackSize); + Stacks = (UINT8 *) AllocatePages (EFI_SIZE_TO_PAGES (gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus * mSmmStackSize)); + ASSERT (Stacks != NULL); + } + + // + // Set SMI stack for SMM base relocation + // + gSmmInitStack = (UINTN) (Stacks + mSmmStackSize - sizeof (UINTN)); + + // + // Initialize IDT + // + InitializeSmmIdt (); + + // + // Relocate SMM Base addresses to the ones allocated from SMRAM + // + mRebased = (BOOLEAN *)AllocateZeroPool (sizeof (BOOLEAN) * mMaxNumberOfCpus); + ASSERT (mRebased != NULL); + SmmRelocateBases (); + + // + // Call hook for BSP to perform extra actions in normal mode after all + // SMM base addresses have been relocated on all CPUs + // + SmmCpuFeaturesSmmRelocationComplete (); + + // + // SMM Time initialization + // + InitializeSmmTimer (); + + // + // Initialize MP globals + // + Cr3 = InitializeMpServiceData (Stacks, mSmmStackSize); + + // + // Fill in SMM Reserved Regions + // + gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedStart = 0; + gSmmCpuPrivate->SmmReservedSmramRegion[0].SmramReservedSize = 0; + + // + // Install the SMM Configuration Protocol onto a new handle on the handle database. + // The entire SMM Configuration Protocol is allocated from SMRAM, so only a pointer + // to an SMRAM address will be present in the handle database + // + Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces ( + &gSmmCpuPrivate->SmmCpuHandle, + &gEfiSmmConfigurationProtocolGuid, &gSmmCpuPrivate->SmmConfiguration, + NULL + ); + ASSERT_EFI_ERROR (Status); + + // + // Install the SMM CPU Protocol into SMM protocol database + // + Status = gSmst->SmmInstallProtocolInterface ( + &mSmmCpuHandle, + &gEfiSmmCpuProtocolGuid, + EFI_NATIVE_INTERFACE, + &mSmmCpu + ); + ASSERT_EFI_ERROR (Status); + + // + // Expose address of CPU Hot Plug Data structure if CPU hot plug is supported. + // + if (FeaturePcdGet (PcdCpuHotPlugSupport)) { + PcdSet64 (PcdCpuHotPlugDataAddress, (UINT64)(UINTN)&mCpuHotPlugData); + } + + // + // Initialize SMM CPU Services Support + // + Status = InitializeSmmCpuServices (mSmmCpuHandle); + ASSERT_EFI_ERROR (Status); + + if (FeaturePcdGet (PcdFrameworkCompatibilitySupport)) { + // + // Install Framework SMM Save State Protocol into UEFI protocol database for backward compatibility + // + Status = SystemTable->BootServices->InstallMultipleProtocolInterfaces ( + &gSmmCpuPrivate->SmmCpuHandle, + &gEfiSmmCpuSaveStateProtocolGuid, + &mSmmCpuSaveState, + NULL + ); + ASSERT_EFI_ERROR (Status); + // + // The SmmStartupThisAp service in Framework SMST should always be non-null. + // Update SmmStartupThisAp pointer in PI SMST here so that PI/Framework SMM thunk + // can have it ready when constructing Framework SMST. + // + gSmst->SmmStartupThisAp = SmmStartupThisAp; + } + + // + // register SMM Ready To Lock Protocol notification + // + Status = gSmst->SmmRegisterProtocolNotify ( + &gEfiSmmReadyToLockProtocolGuid, + SmmReadyToLockEventNotify, + &Registration + ); + ASSERT_EFI_ERROR (Status); + + GuidHob = GetFirstGuidHob (&gEfiAcpiVariableGuid); + if (GuidHob != NULL) { + SmramDescriptor = (EFI_SMRAM_DESCRIPTOR *) GET_GUID_HOB_DATA (GuidHob); + + DEBUG ((EFI_D_INFO, "SMM S3 SMRAM Structure = %x\n", SmramDescriptor)); + DEBUG ((EFI_D_INFO, "SMM S3 Structure = %x\n", SmramDescriptor->CpuStart)); + + SmmS3ResumeState = (SMM_S3_RESUME_STATE *)(UINTN)SmramDescriptor->CpuStart; + ZeroMem (SmmS3ResumeState, sizeof (SMM_S3_RESUME_STATE)); + + mSmmS3ResumeState = SmmS3ResumeState; + SmmS3ResumeState->Smst = (EFI_PHYSICAL_ADDRESS)(UINTN)gSmst; + + SmmS3ResumeState->SmmS3ResumeEntryPoint = (EFI_PHYSICAL_ADDRESS)(UINTN)SmmRestoreCpu; + + SmmS3ResumeState->SmmS3StackSize = SIZE_32KB; + SmmS3ResumeState->SmmS3StackBase = (EFI_PHYSICAL_ADDRESS)(UINTN)AllocatePages (EFI_SIZE_TO_PAGES ((UINTN)SmmS3ResumeState->SmmS3StackSize)); + if (SmmS3ResumeState->SmmS3StackBase == 0) { + SmmS3ResumeState->SmmS3StackSize = 0; + } + + SmmS3ResumeState->SmmS3Cr0 = gSmmCr0; + SmmS3ResumeState->SmmS3Cr3 = Cr3; + SmmS3ResumeState->SmmS3Cr4 = gSmmCr4; + + if (sizeof (UINTN) == sizeof (UINT64)) { + SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_64; + } + if (sizeof (UINTN) == sizeof (UINT32)) { + SmmS3ResumeState->Signature = SMM_S3_RESUME_SMM_32; + } + } + + // + // Check XD and BTS features + // + CheckProcessorFeature (); + + // + // Initialize SMM Profile feature + // + InitSmmProfile (Cr3); + + // + // Patch SmmS3ResumeState->SmmS3Cr3 + // + InitSmmS3Cr3 (); + + DEBUG ((EFI_D_INFO, "SMM CPU Module exit from SMRAM with EFI_SUCCESS\n")); + + return EFI_SUCCESS; +} + +/** + + Find out SMRAM information including SMRR base and SMRR size. + + @param SmrrBase SMRR base + @param SmrrSize SMRR size + +**/ +VOID +FindSmramInfo ( + OUT UINT32 *SmrrBase, + OUT UINT32 *SmrrSize + ) +{ + EFI_STATUS Status; + UINTN Size; + EFI_SMM_ACCESS2_PROTOCOL *SmmAccess; + EFI_SMRAM_DESCRIPTOR *CurrentSmramRange; + EFI_SMRAM_DESCRIPTOR *SmramRanges; + UINTN SmramRangeCount; + UINTN Index; + UINT64 MaxSize; + BOOLEAN Found; + + // + // Get SMM Access Protocol + // + Status = gBS->LocateProtocol (&gEfiSmmAccess2ProtocolGuid, NULL, (VOID **)&SmmAccess); + ASSERT_EFI_ERROR (Status); + + // + // Get SMRAM information + // + Size = 0; + Status = SmmAccess->GetCapabilities (SmmAccess, &Size, NULL); + ASSERT (Status == EFI_BUFFER_TOO_SMALL); + + SmramRanges = (EFI_SMRAM_DESCRIPTOR *)AllocatePool (Size); + ASSERT (SmramRanges != NULL); + + Status = SmmAccess->GetCapabilities (SmmAccess, &Size, SmramRanges); + ASSERT_EFI_ERROR (Status); + + SmramRangeCount = Size / sizeof (EFI_SMRAM_DESCRIPTOR); + + // + // Find the largest SMRAM range between 1MB and 4GB that is at least 256K - 4K in size + // + CurrentSmramRange = NULL; + for (Index = 0, MaxSize = SIZE_256KB - EFI_PAGE_SIZE; Index < SmramRangeCount; Index++) { + // + // Skip any SMRAM region that is already allocated, needs testing, or needs ECC initialization + // + if ((SmramRanges[Index].RegionState & (EFI_ALLOCATED | EFI_NEEDS_TESTING | EFI_NEEDS_ECC_INITIALIZATION)) != 0) { + continue; + } + + if (SmramRanges[Index].CpuStart >= BASE_1MB) { + if ((SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize) <= BASE_4GB) { + if (SmramRanges[Index].PhysicalSize >= MaxSize) { + MaxSize = SmramRanges[Index].PhysicalSize; + CurrentSmramRange = &SmramRanges[Index]; + } + } + } + } + + ASSERT (CurrentSmramRange != NULL); + + *SmrrBase = (UINT32)CurrentSmramRange->CpuStart; + *SmrrSize = (UINT32)CurrentSmramRange->PhysicalSize; + + do { + Found = FALSE; + for (Index = 0; Index < SmramRangeCount; Index++) { + if (SmramRanges[Index].CpuStart < *SmrrBase && *SmrrBase == (SmramRanges[Index].CpuStart + SmramRanges[Index].PhysicalSize)) { + *SmrrBase = (UINT32)SmramRanges[Index].CpuStart; + *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize); + Found = TRUE; + } else if ((*SmrrBase + *SmrrSize) == SmramRanges[Index].CpuStart && SmramRanges[Index].PhysicalSize > 0) { + *SmrrSize = (UINT32)(*SmrrSize + SmramRanges[Index].PhysicalSize); + Found = TRUE; + } + } + } while (Found); + + DEBUG ((EFI_D_INFO, "SMRR Base: 0x%x, SMRR Size: 0x%x\n", *SmrrBase, *SmrrSize)); +} + +/** +Configure SMM Code Access Check feature on an AP. +SMM Feature Control MSR will be locked after configuration. + +@param[in,out] Buffer Pointer to private data buffer. +**/ +VOID +EFIAPI +ConfigSmmCodeAccessCheckOnCurrentProcessor ( + IN OUT VOID *Buffer + ) +{ + UINTN CpuIndex; + UINT64 SmmFeatureControlMsr; + UINT64 NewSmmFeatureControlMsr; + + // + // Retrieve the CPU Index from the context passed in + // + CpuIndex = *(UINTN *)Buffer; + + // + // Get the current SMM Feature Control MSR value + // + SmmFeatureControlMsr = SmmCpuFeaturesGetSmmRegister (CpuIndex, SmmRegFeatureControl); + + // + // Compute the new SMM Feature Control MSR value + // + NewSmmFeatureControlMsr = SmmFeatureControlMsr; + if (mSmmCodeAccessCheckEnable) { + NewSmmFeatureControlMsr |= SMM_CODE_CHK_EN_BIT; + } + if (FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) { + NewSmmFeatureControlMsr |= SMM_FEATURE_CONTROL_LOCK_BIT; + } + + // + // Only set the SMM Feature Control MSR value if the new value is different than the current value + // + if (NewSmmFeatureControlMsr != SmmFeatureControlMsr) { + SmmCpuFeaturesSetSmmRegister (CpuIndex, SmmRegFeatureControl, NewSmmFeatureControlMsr); + } + + // + // Release the spin lock user to serialize the updates to the SMM Feature Control MSR + // + ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock); +} + +/** +Configure SMM Code Access Check feature for all processors. +SMM Feature Control MSR will be locked after configuration. +**/ +VOID +ConfigSmmCodeAccessCheck ( + VOID + ) +{ + UINTN Index; + EFI_STATUS Status; + + // + // Check to see if the Feature Control MSR is supported on this CPU + // + Index = gSmst->CurrentlyExecutingCpu; + if (!SmmCpuFeaturesIsSmmRegisterSupported (Index, SmmRegFeatureControl)) { + mSmmCodeAccessCheckEnable = FALSE; + return; + } + + // + // Check to see if the CPU supports the SMM Code Access Check feature + // Do not access this MSR unless the CPU supports the SmmRegFeatureControl + // + if ((AsmReadMsr64 (EFI_MSR_SMM_MCA_CAP) & SMM_CODE_ACCESS_CHK_BIT) == 0) { + mSmmCodeAccessCheckEnable = FALSE; + } + + // + // If the SMM Code Access Check feature is disabled and the Feature Control MSR + // is not being locked, then no additional work is required + // + if (!mSmmCodeAccessCheckEnable && !FeaturePcdGet (PcdCpuSmmFeatureControlMsrLock)) { + return; + } + + // + // Initialize the lock used to serialize the MSR programming in BSP and all APs + // + InitializeSpinLock (&mConfigSmmCodeAccessCheckLock); + + // + // Acquire Config SMM Code Access Check spin lock. The BSP will release the + // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor(). + // + AcquireSpinLock (&mConfigSmmCodeAccessCheckLock); + + // + // Enable SMM Code Access Check feature on the BSP. + // + ConfigSmmCodeAccessCheckOnCurrentProcessor (&Index); + + // + // Enable SMM Code Access Check feature for the APs. + // + for (Index = 0; Index < gSmst->NumberOfCpus; Index++) { + if (Index != gSmst->CurrentlyExecutingCpu) { + + // + // Acquire Config SMM Code Access Check spin lock. The AP will release the + // spin lock when it is done executing ConfigSmmCodeAccessCheckOnCurrentProcessor(). + // + AcquireSpinLock (&mConfigSmmCodeAccessCheckLock); + + // + // Call SmmStartupThisAp() to enable SMM Code Access Check on an AP. + // + Status = gSmst->SmmStartupThisAp (ConfigSmmCodeAccessCheckOnCurrentProcessor, Index, &Index); + ASSERT_EFI_ERROR (Status); + + // + // Wait for the AP to release the Config SMM Code Access Check spin lock. + // + while (!AcquireSpinLockOrFail (&mConfigSmmCodeAccessCheckLock)) { + CpuPause (); + } + + // + // Release the Config SMM Code Access Check spin lock. + // + ReleaseSpinLock (&mConfigSmmCodeAccessCheckLock); + } + } +} + +/** + Perform the remaining tasks. + +**/ +VOID +PerformRemainingTasks ( + VOID + ) +{ + if (mSmmReadyToLock) { + // + // Start SMM Profile feature + // + if (FeaturePcdGet (PcdCpuSmmProfileEnable)) { + SmmProfileStart (); + } + // + // Create a mix of 2MB and 4KB page table. Update some memory ranges absent and execute-disable. + // + InitPaging (); + // + // Configure SMM Code Access Check feature if available. + // + ConfigSmmCodeAccessCheck (); + + // + // Clean SMM ready to lock flag + // + mSmmReadyToLock = FALSE; + } +} diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h new file mode 100644 index 0000000000..9ea11894cd --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.h @@ -0,0 +1,699 @@ +/** @file +Agent Module to load other modules to deploy SMM Entry Vector for X86 CPU. + +Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _CPU_PISMMCPUDXESMM_H_ +#define _CPU_PISMMCPUDXESMM_H_ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include "CpuService.h" +#include "SmmProfile.h" + +// +// MSRs required for configuration of SMM Code Access Check +// +#define EFI_MSR_SMM_MCA_CAP 0x17D +#define SMM_CODE_ACCESS_CHK_BIT BIT58 + +#define SMM_FEATURE_CONTROL_LOCK_BIT BIT0 +#define SMM_CODE_CHK_EN_BIT BIT2 + +/// +/// Page Table Entry +/// +#define IA32_PG_P BIT0 +#define IA32_PG_RW BIT1 +#define IA32_PG_WT BIT3 +#define IA32_PG_CD BIT4 +#define IA32_PG_A BIT5 +#define IA32_PG_PS BIT7 +#define IA32_PG_PAT_2M BIT12 +#define IA32_PG_PAT_4K IA32_PG_PS +#define IA32_PG_PMNT BIT62 +#define IA32_PG_NX BIT63 + +// +// Size of Task-State Segment defined in IA32 Manual +// +#define TSS_SIZE 104 +#define TSS_X64_IST1_OFFSET 36 +#define TSS_IA32_CR3_OFFSET 28 +#define TSS_IA32_ESP_OFFSET 56 + +// +// Code select value +// +#define PROTECT_MODE_CODE_SEGMENT 0x08 +#define LONG_MODE_CODE_SEGMENT 0x38 + +// +// The size 0x20 must be bigger than +// the size of template code of SmmInit. Currently, +// the size of SmmInit requires the 0x16 Bytes buffer +// at least. +// +#define BACK_BUF_SIZE 0x20 + +#define EXCEPTION_VECTOR_NUMBER 0x20 + +#define INVALID_APIC_ID 0xFFFFFFFFFFFFFFFFULL + +typedef UINT32 SMM_CPU_ARRIVAL_EXCEPTIONS; +#define ARRIVAL_EXCEPTION_BLOCKED 0x1 +#define ARRIVAL_EXCEPTION_DELAYED 0x2 +#define ARRIVAL_EXCEPTION_SMI_DISABLED 0x4 + +// +// Private structure for the SMM CPU module that is stored in DXE Runtime memory +// Contains the SMM Configuration Protocols that is produced. +// Contains a mix of DXE and SMM contents. All the fields must be used properly. +// +#define SMM_CPU_PRIVATE_DATA_SIGNATURE SIGNATURE_32 ('s', 'c', 'p', 'u') + +typedef struct { + UINTN Signature; + + EFI_HANDLE SmmCpuHandle; + + EFI_PROCESSOR_INFORMATION *ProcessorInfo; + SMM_CPU_OPERATION *Operation; + UINTN *CpuSaveStateSize; + VOID **CpuSaveState; + + EFI_SMM_RESERVED_SMRAM_REGION SmmReservedSmramRegion[1]; + EFI_SMM_ENTRY_CONTEXT SmmCoreEntryContext; + EFI_SMM_ENTRY_POINT SmmCoreEntry; + + EFI_SMM_CONFIGURATION_PROTOCOL SmmConfiguration; +} SMM_CPU_PRIVATE_DATA; + +extern SMM_CPU_PRIVATE_DATA *gSmmCpuPrivate; +extern CPU_HOT_PLUG_DATA mCpuHotPlugData; +extern UINTN mMaxNumberOfCpus; +extern UINTN mNumberOfCpus; +extern BOOLEAN mRestoreSmmConfigurationInS3; +extern EFI_SMM_CPU_PROTOCOL mSmmCpu; + +/// +/// The mode of the CPU at the time an SMI occurs +/// +extern UINT8 mSmmSaveStateRegisterLma; + + +// +// SMM CPU Protocol function prototypes. +// + +/** + Read information from the CPU save state. + + @param This EFI_SMM_CPU_PROTOCOL instance + @param Width The number of bytes to read from the CPU save state. + @param Register Specifies the CPU register to read form the save state. + @param CpuIndex Specifies the zero-based index of the CPU save state + @param Buffer Upon return, this holds the CPU register value read from the save state. + + @retval EFI_SUCCESS The register was read from Save State + @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor + @retval EFI_INVALID_PARAMTER This or Buffer is NULL. + +**/ +EFI_STATUS +EFIAPI +SmmReadSaveState ( + IN CONST EFI_SMM_CPU_PROTOCOL *This, + IN UINTN Width, + IN EFI_SMM_SAVE_STATE_REGISTER Register, + IN UINTN CpuIndex, + OUT VOID *Buffer + ); + +/** + Write data to the CPU save state. + + @param This EFI_SMM_CPU_PROTOCOL instance + @param Width The number of bytes to read from the CPU save state. + @param Register Specifies the CPU register to write to the save state. + @param CpuIndex Specifies the zero-based index of the CPU save state + @param Buffer Upon entry, this holds the new CPU register value. + + @retval EFI_SUCCESS The register was written from Save State + @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor + @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct + +**/ +EFI_STATUS +EFIAPI +SmmWriteSaveState ( + IN CONST EFI_SMM_CPU_PROTOCOL *This, + IN UINTN Width, + IN EFI_SMM_SAVE_STATE_REGISTER Register, + IN UINTN CpuIndex, + IN CONST VOID *Buffer + ); + +/** +Read a CPU Save State register on the target processor. + +This function abstracts the differences that whether the CPU Save State register is in the +IA32 CPU Save State Map or X64 CPU Save State Map. + +This function supports reading a CPU Save State register in SMBase relocation handler. + +@param[in] CpuIndex Specifies the zero-based index of the CPU save state. +@param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table. +@param[in] Width The number of bytes to read from the CPU save state. +@param[out] Buffer Upon return, this holds the CPU register value read from the save state. + +@retval EFI_SUCCESS The register was read from Save State. +@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor. +@retval EFI_INVALID_PARAMTER This or Buffer is NULL. + +**/ +EFI_STATUS +EFIAPI +ReadSaveStateRegister ( + IN UINTN CpuIndex, + IN EFI_SMM_SAVE_STATE_REGISTER Register, + IN UINTN Width, + OUT VOID *Buffer + ); + +/** +Write value to a CPU Save State register on the target processor. + +This function abstracts the differences that whether the CPU Save State register is in the +IA32 CPU Save State Map or X64 CPU Save State Map. + +This function supports writing a CPU Save State register in SMBase relocation handler. + +@param[in] CpuIndex Specifies the zero-based index of the CPU save state. +@param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table. +@param[in] Width The number of bytes to read from the CPU save state. +@param[in] Buffer Upon entry, this holds the new CPU register value. + +@retval EFI_SUCCESS The register was written to Save State. +@retval EFI_NOT_FOUND The register is not defined for the Save State of Processor. +@retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct. + +**/ +EFI_STATUS +EFIAPI +WriteSaveStateRegister ( + IN UINTN CpuIndex, + IN EFI_SMM_SAVE_STATE_REGISTER Register, + IN UINTN Width, + IN CONST VOID *Buffer + ); + +// +// +// +typedef struct { + UINT32 Offset; + UINT16 Segment; + UINT16 Reserved; +} IA32_FAR_ADDRESS; + +extern IA32_FAR_ADDRESS gSmmJmpAddr; + +extern CONST UINT8 gcSmmInitTemplate[]; +extern CONST UINT16 gcSmmInitSize; +extern UINT32 gSmmCr0; +extern UINT32 gSmmCr3; +extern UINT32 gSmmCr4; +extern UINTN gSmmInitStack; + +/** + Semaphore operation for all processor relocate SMMBase. +**/ +VOID +EFIAPI +SmmRelocationSemaphoreComplete ( + VOID + ); + +/// +/// The type of SMM CPU Information +/// +typedef struct { + SPIN_LOCK Busy; + volatile EFI_AP_PROCEDURE Procedure; + volatile VOID *Parameter; + volatile UINT32 Run; + volatile BOOLEAN Present; +} SMM_CPU_DATA_BLOCK; + +typedef enum { + SmmCpuSyncModeTradition, + SmmCpuSyncModeRelaxedAp, + SmmCpuSyncModeMax +} SMM_CPU_SYNC_MODE; + +typedef struct { + // + // Pointer to an array. The array should be located immediately after this structure + // so that UC cache-ability can be set together. + // + SMM_CPU_DATA_BLOCK *CpuData; + volatile UINT32 Counter; + volatile UINT32 BspIndex; + volatile BOOLEAN InsideSmm; + volatile BOOLEAN AllCpusInSync; + volatile SMM_CPU_SYNC_MODE EffectiveSyncMode; + volatile BOOLEAN SwitchBsp; + volatile BOOLEAN *CandidateBsp; +} SMM_DISPATCHER_MP_SYNC_DATA; + +typedef struct { + SPIN_LOCK SpinLock; + UINT32 MsrIndex; +} MP_MSR_LOCK; + +#define SMM_PSD_OFFSET 0xfb00 + +typedef struct { + UINT64 Signature; // Offset 0x00 + UINT16 Reserved1; // Offset 0x08 + UINT16 Reserved2; // Offset 0x0A + UINT16 Reserved3; // Offset 0x0C + UINT16 SmmCs; // Offset 0x0E + UINT16 SmmDs; // Offset 0x10 + UINT16 SmmSs; // Offset 0x12 + UINT16 SmmOtherSegment; // Offset 0x14 + UINT16 Reserved4; // Offset 0x16 + UINT64 Reserved5; // Offset 0x18 + UINT64 Reserved6; // Offset 0x20 + UINT64 Reserved7; // Offset 0x28 + UINT64 SmmGdtPtr; // Offset 0x30 + UINT32 SmmGdtSize; // Offset 0x38 + UINT32 Reserved8; // Offset 0x3C + UINT64 Reserved9; // Offset 0x40 + UINT64 Reserved10; // Offset 0x48 + UINT16 Reserved11; // Offset 0x50 + UINT16 Reserved12; // Offset 0x52 + UINT32 Reserved13; // Offset 0x54 + UINT64 MtrrBaseMaskPtr; // Offset 0x58 +} PROCESSOR_SMM_DESCRIPTOR; + +extern IA32_DESCRIPTOR gcSmiGdtr; +extern IA32_DESCRIPTOR gcSmiIdtr; +extern VOID *gcSmiIdtrPtr; +extern CONST PROCESSOR_SMM_DESCRIPTOR gcPsd; +extern UINT64 gPhyMask; +extern ACPI_CPU_DATA mAcpiCpuData; +extern SMM_DISPATCHER_MP_SYNC_DATA *mSmmMpSyncData; +extern VOID *mGdtForAp; +extern VOID *mIdtForAp; +extern VOID *mMachineCheckHandlerForAp; +extern UINTN mSmmStackArrayBase; +extern UINTN mSmmStackArrayEnd; +extern UINTN mSmmStackSize; +extern EFI_SMM_CPU_SERVICE_PROTOCOL mSmmCpuService; +extern IA32_DESCRIPTOR gcSmiInitGdtr; + +/** + Create 4G PageTable in SMRAM. + + @param ExtraPages Additional page numbers besides for 4G memory + @return PageTable Address + +**/ +UINT32 +Gen4GPageTable ( + IN UINTN ExtraPages + ); + + +/** + Initialize global data for MP synchronization. + + @param Stacks Base address of SMI stack buffer for all processors. + @param StackSize Stack size for each processor in SMM. + +**/ +UINT32 +InitializeMpServiceData ( + IN VOID *Stacks, + IN UINTN StackSize + ); + +/** + Initialize Timer for SMM AP Sync. + +**/ +VOID +InitializeSmmTimer ( + VOID + ); + +/** + Start Timer for SMM AP Sync. + +**/ +UINT64 +EFIAPI +StartSyncTimer ( + VOID + ); + +/** + Check if the SMM AP Sync timer is timeout. + + @param Timer The start timer from the begin. + +**/ +BOOLEAN +EFIAPI +IsSyncTimerTimeout ( + IN UINT64 Timer + ); + +/** + Initialize IDT for SMM Stack Guard. + +**/ +VOID +EFIAPI +InitializeIDTSmmStackGuard ( + VOID + ); + +/** + + Register the SMM Foundation entry point. + + @param This Pointer to EFI_SMM_CONFIGURATION_PROTOCOL instance + @param SmmEntryPoint SMM Foundation EntryPoint + + @retval EFI_SUCCESS Successfully to register SMM foundation entry point + +**/ +EFI_STATUS +EFIAPI +RegisterSmmEntry ( + IN CONST EFI_SMM_CONFIGURATION_PROTOCOL *This, + IN EFI_SMM_ENTRY_POINT SmmEntryPoint + ); + +/** + Create PageTable for SMM use. + + @return PageTable Address + +**/ +UINT32 +SmmInitPageTable ( + VOID + ); + +/** + Schedule a procedure to run on the specified CPU. + + @param Procedure The address of the procedure to run + @param CpuIndex Target CPU number + @param ProcArguments The parameter to pass to the procedure + + @retval EFI_INVALID_PARAMETER CpuNumber not valid + @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy + @retval EFI_SUCCESS - The procedure has been successfully scheduled + +**/ +EFI_STATUS +EFIAPI +SmmStartupThisAp ( + IN EFI_AP_PROCEDURE Procedure, + IN UINTN CpuIndex, + IN OUT VOID *ProcArguments OPTIONAL + ); + +/** + Schedule a procedure to run on the specified CPU in a blocking fashion. + + @param Procedure The address of the procedure to run + @param CpuIndex Target CPU Index + @param ProcArguments The parameter to pass to the procedure + + @retval EFI_INVALID_PARAMETER CpuNumber not valid + @retval EFI_INVALID_PARAMETER CpuNumber specifying BSP + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber did not enter SMM + @retval EFI_INVALID_PARAMETER The AP specified by CpuNumber is busy + @retval EFI_SUCCESS The procedure has been successfully scheduled + +**/ +EFI_STATUS +EFIAPI +SmmBlockingStartupThisAp ( + IN EFI_AP_PROCEDURE Procedure, + IN UINTN CpuIndex, + IN OUT VOID *ProcArguments OPTIONAL + ); + +/** + Initialize MP synchronization data. + +**/ +VOID +EFIAPI +InitializeMpSyncData ( + VOID + ); + +/** + + Find out SMRAM information including SMRR base and SMRR size. + + @param SmrrBase SMRR base + @param SmrrSize SMRR size + +**/ +VOID +FindSmramInfo ( + OUT UINT32 *SmrrBase, + OUT UINT32 *SmrrSize + ); + +/** + The function is invoked before SMBASE relocation in S3 path to restores CPU status. + + The function is invoked before SMBASE relocation in S3 path. It does first time microcode load + and restores MTRRs for both BSP and APs. + +**/ +VOID +EarlyInitializeCpu ( + VOID + ); + +/** + The function is invoked after SMBASE relocation in S3 path to restores CPU status. + + The function is invoked after SMBASE relocation in S3 path. It restores configuration according to + data saved by normal boot path for both BSP and APs. + +**/ +VOID +InitializeCpu ( + VOID + ); + +/** + Page Fault handler for SMM use. + + @param InterruptType Defines the type of interrupt or exception that + occurred on the processor.This parameter is processor architecture specific. + @param SystemContext A pointer to the processor context when + the interrupt occurred on the processor. +**/ +VOID +EFIAPI +SmiPFHandler ( + IN EFI_EXCEPTION_TYPE InterruptType, + IN EFI_SYSTEM_CONTEXT SystemContext + ); + +/** + Perform the remaining tasks. + +**/ +VOID +PerformRemainingTasks ( + VOID + ); + +/** + Initialize MSR spin lock by MSR index. + + @param MsrIndex MSR index value. + +**/ +VOID +InitMsrSpinLockByIndex ( + IN UINT32 MsrIndex + ); + +/** + Hook return address of SMM Save State so that semaphore code + can be executed immediately after AP exits SMM to indicate to + the BSP that an AP has exited SMM after SMBASE relocation. + + @param[in] CpuIndex The processor index. + @param[in] RebasedFlag A pointer to a flag that is set to TRUE + immediately after AP exits SMM. + +**/ +VOID +SemaphoreHook ( + IN UINTN CpuIndex, + IN volatile BOOLEAN *RebasedFlag + ); + +/** +Configure SMM Code Access Check feature for all processors. +SMM Feature Control MSR will be locked after configuration. +**/ +VOID +ConfigSmmCodeAccessCheck ( + VOID + ); + +/** + Hook the code executed immediately after an RSM instruction on the currently + executing CPU. The mode of code executed immediately after RSM must be + detected, and the appropriate hook must be selected. Always clear the auto + HALT restart flag if it is set. + + @param[in] CpuIndex The processor index for the currently + executing CPU. + @param[in] CpuState Pointer to SMRAM Save State Map for the + currently executing CPU. + @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to + 32-bit mode from 64-bit SMM. + @param[in] NewInstructionPointer Instruction pointer to use if resuming to + same mode as SMM. + + @retval The value of the original instruction pointer before it was hooked. + +**/ +UINT64 +EFIAPI +HookReturnFromSmm ( + IN UINTN CpuIndex, + SMRAM_SAVE_STATE_MAP *CpuState, + UINT64 NewInstructionPointer32, + UINT64 NewInstructionPointer + ); + +/** + Get the size of the SMI Handler in bytes. + + @retval The size, in bytes, of the SMI Handler. + +**/ +UINTN +EFIAPI +GetSmiHandlerSize ( + VOID + ); + +/** + Install the SMI handler for the CPU specified by CpuIndex. This function + is called by the CPU that was elected as monarch during System Management + Mode initialization. + + @param[in] CpuIndex The index of the CPU to install the custom SMI handler. + The value must be between 0 and the NumberOfCpus field + in the System Management System Table (SMST). + @param[in] SmBase The SMBASE address for the CPU specified by CpuIndex. + @param[in] SmiStack The stack to use when an SMI is processed by the + the CPU specified by CpuIndex. + @param[in] StackSize The size, in bytes, if the stack used when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] GdtBase The base address of the GDT to use when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] GdtSize The size, in bytes, of the GDT used when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] IdtBase The base address of the IDT to use when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] IdtSize The size, in bytes, of the IDT used when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] Cr3 The base address of the page tables to use when an SMI + is processed by the CPU specified by CpuIndex. +**/ +VOID +EFIAPI +InstallSmiHandler ( + IN UINTN CpuIndex, + IN UINT32 SmBase, + IN VOID *SmiStack, + IN UINTN StackSize, + IN UINTN GdtBase, + IN UINTN GdtSize, + IN UINTN IdtBase, + IN UINTN IdtSize, + IN UINT32 Cr3 + ); + +/** + Search module name by input IP address and output it. + + @param CallerIpAddress Caller instruction pointer. + +**/ +VOID +DumpModuleInfoByIp ( + IN UINTN CallerIpAddress + ); +#endif diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf new file mode 100644 index 0000000000..45ab16c07f --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.inf @@ -0,0 +1,163 @@ +## @file +# CPU SMM driver. +# +# This SMM driver performs SMM initialization, deploy SMM Entry Vector, +# provides CPU specific services in SMM. +# +# Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.
+# +# This program and the accompanying materials +# are licensed and made available under the terms and conditions of the BSD License +# which accompanies this distribution. The full text of the license may be found at +# http://opensource.org/licenses/bsd-license.php +# +# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +# +## + +[Defines] + INF_VERSION = 0x00010005 + BASE_NAME = PiSmmCpuDxeSmm + MODULE_UNI_FILE = PiSmmCpuDxeSmm.uni + FILE_GUID = A3FF0EF5-0C28-42f5-B544-8C7DE1E80014 + MODULE_TYPE = DXE_SMM_DRIVER + VERSION_STRING = 1.0 + PI_SPECIFICATION_VERSION = 0x0001000A + ENTRY_POINT = PiCpuSmmEntry + +# +# The following information is for reference only and not required by the build tools. +# +# VALID_ARCHITECTURES = IA32 X64 +# + +[Sources] + PiSmmCpuDxeSmm.c + PiSmmCpuDxeSmm.h + MpService.c + SyncTimer.c + CpuS3.c + CpuService.c + CpuService.h + SmmProfile.c + SmmProfile.h + SmmProfileInternal.h + SmramSaveState.c + +[Sources.Ia32] + Ia32/Semaphore.c + Ia32/PageTbl.c + Ia32/SmmProfileArch.c + Ia32/SmmProfileArch.h + Ia32/SmmInit.asm | MSFT + Ia32/SmiEntry.asm | MSFT + Ia32/SmiException.asm | MSFT + Ia32/MpFuncs.asm | MSFT + + Ia32/SmmInit.asm | INTEL + Ia32/SmiEntry.asm | INTEL + Ia32/SmiException.asm | INTEL + Ia32/MpFuncs.asm | INTEL + + Ia32/SmmInit.S | GCC + Ia32/SmiEntry.S | GCC + Ia32/SmiException.S | GCC + Ia32/MpFuncs.S | GCC + +[Sources.X64] + X64/Semaphore.c + X64/PageTbl.c + X64/SmmProfileArch.c + X64/SmmProfileArch.h + X64/SmmInit.asm | MSFT + X64/SmiEntry.asm | MSFT + X64/SmiException.asm | MSFT + X64/MpFuncs.asm | MSFT + + X64/SmmInit.asm | INTEL + X64/SmiEntry.asm | INTEL + X64/SmiException.asm | INTEL + X64/MpFuncs.asm | INTEL + + X64/SmmInit.S | GCC + X64/SmiEntry.S | GCC + X64/SmiException.S | GCC + X64/MpFuncs.S | GCC + +[Packages] + MdePkg/MdePkg.dec + MdeModulePkg/MdeModulePkg.dec + UefiCpuPkg/UefiCpuPkg.dec + IntelFrameworkPkg/IntelFrameworkPkg.dec + +[LibraryClasses] + UefiDriverEntryPoint + UefiRuntimeServicesTableLib + CacheMaintenanceLib + PcdLib + DebugLib + BaseLib + SynchronizationLib + BaseMemoryLib + MtrrLib + SmmLib + IoLib + TimerLib + SmmServicesTableLib + MemoryAllocationLib + DebugAgentLib + HobLib + PciLib + LocalApicLib + UefiCpuLib + SmmCpuPlatformHookLib + CpuExceptionHandlerLib + UefiLib + DxeServicesTableLib + CpuLib + ReportStatusCodeLib + SmmCpuFeaturesLib + PeCoffGetEntryPointLib + +[Protocols] + gEfiSmmAccess2ProtocolGuid ## CONSUMES + gEfiMpServiceProtocolGuid ## CONSUMES + gEfiSmmConfigurationProtocolGuid ## PRODUCES + gEfiSmmCpuProtocolGuid ## PRODUCES + gEfiSmmReadyToLockProtocolGuid ## NOTIFY + gEfiSmmCpuServiceProtocolGuid ## PRODUCES + gEfiSmmCpuSaveStateProtocolGuid ## SOMETIMES_PRODUCES + +[Guids] + gEfiAcpiVariableGuid ## SOMETIMES_CONSUMES ## HOB # it is used for S3 boot. + gEfiGlobalVariableGuid ## SOMETIMES_PRODUCES ## Variable:L"SmmProfileData" + gEfiAcpi20TableGuid ## SOMETIMES_CONSUMES ## SystemTable + gEfiAcpi10TableGuid ## SOMETIMES_CONSUMES ## SystemTable + +[FeaturePcd] + gEfiMdeModulePkgTokenSpaceGuid.PcdFrameworkCompatibilitySupport ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmDebug ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmBlockStartupThisAp ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmEnableBspElection ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugSupport ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileEnable ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileRingBuffer ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmFeatureControlMsrLock ## CONSUMES + +[Pcd] + gUefiCpuPkgTokenSpaceGuid.PcdCpuMaxLogicalProcessorNumber ## SOMETIMES_CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileSize ## SOMETIMES_CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackSize ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmApSyncTimeout ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuS3DataAddress ## SOMETIMES_CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugDataAddress ## SOMETIMES_PRODUCES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable ## CONSUMES + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode ## CONSUMES + +[Depex] + gEfiMpServiceProtocolGuid + +[UserExtensions.TianoCore."ExtraFiles"] + PiSmmCpuDxeSmmExtra.uni diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.uni b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.uni new file mode 100644 index 0000000000..6a5b8c972f Binary files /dev/null and b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmm.uni differ diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmmExtra.uni b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmmExtra.uni new file mode 100644 index 0000000000..cd71f57ed9 Binary files /dev/null and b/UefiCpuPkg/PiSmmCpuDxeSmm/PiSmmCpuDxeSmmExtra.uni differ diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c new file mode 100644 index 0000000000..8ddde9acb5 --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.c @@ -0,0 +1,1443 @@ +/** @file +Enable SMM profile. + +Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#include "PiSmmCpuDxeSmm.h" +#include "SmmProfileInternal.h" + +UINT32 mSmmProfileCr3; + +SMM_PROFILE_HEADER *mSmmProfileBase; +MSR_DS_AREA_STRUCT *mMsrDsAreaBase; +// +// The buffer to store SMM profile data. +// +UINTN mSmmProfileSize; + +// +// The buffer to enable branch trace store. +// +UINTN mMsrDsAreaSize = SMM_PROFILE_DTS_SIZE; + +// +// The flag indicates if execute-disable is supported by processor. +// +BOOLEAN mXdSupported = FALSE; + +// +// The flag indicates if execute-disable is enabled on processor. +// +BOOLEAN mXdEnabled = FALSE; + +// +// The flag indicates if BTS is supported by processor. +// +BOOLEAN mBtsSupported = FALSE; + +// +// The flag indicates if SMM profile starts to record data. +// +BOOLEAN mSmmProfileStart = FALSE; + +// +// Record the page fault exception count for one instruction execution. +// +UINTN *mPFEntryCount; + +UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT]; +UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT]; + +MSR_DS_AREA_STRUCT **mMsrDsArea; +BRANCH_TRACE_RECORD **mMsrBTSRecord; +UINTN mBTSRecordNumber; +PEBS_RECORD **mMsrPEBSRecord; + +// +// These memory ranges are always present, they does not generate the access type of page fault exception, +// but they possibly generate instruction fetch type of page fault exception. +// +MEMORY_PROTECTION_RANGE *mProtectionMemRange = NULL; +UINTN mProtectionMemRangeCount = 0; + +// +// Some predefined memory ranges. +// +MEMORY_PROTECTION_RANGE mProtectionMemRangeTemplate[] = { + // + // SMRAM range (to be fixed in runtime). + // It is always present and instruction fetches are allowed. + // + {{0x00000000, 0x00000000},TRUE,FALSE}, + + // + // SMM profile data range( to be fixed in runtime). + // It is always present and instruction fetches are not allowed. + // + {{0x00000000, 0x00000000},TRUE,TRUE}, + + // + // Future extended range could be added here. + // + + // + // PCI MMIO ranges (to be added in runtime). + // They are always present and instruction fetches are not allowed. + // +}; + +// +// These memory ranges are mapped by 4KB-page instead of 2MB-page. +// +MEMORY_RANGE *mSplitMemRange = NULL; +UINTN mSplitMemRangeCount = 0; + +// +// SMI command port. +// +UINT32 mSmiCommandPort; + +/** + Disable branch trace store. + +**/ +VOID +DisableBTS ( + VOID + ) +{ + AsmMsrAnd64 (MSR_DEBUG_CTL, ~((UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR))); +} + +/** + Enable branch trace store. + +**/ +VOID +EnableBTS ( + VOID + ) +{ + AsmMsrOr64 (MSR_DEBUG_CTL, (MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR)); +} + +/** + Get CPU Index from APIC ID. + +**/ +UINTN +GetCpuIndex ( + VOID + ) +{ + UINTN Index; + UINT32 ApicId; + + ApicId = GetApicId (); + + for (Index = 0; Index < PcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) { + if (gSmmCpuPrivate->ProcessorInfo[Index].ProcessorId == ApicId) { + return Index; + } + } + ASSERT (FALSE); + return 0; +} + +/** + Get the source of IP after execute-disable exception is triggered. + + @param CpuIndex The index of CPU. + @param DestinationIP The destination address. + +**/ +UINT64 +GetSourceFromDestinationOnBts ( + UINTN CpuIndex, + UINT64 DestinationIP + ) +{ + BRANCH_TRACE_RECORD *CurrentBTSRecord; + UINTN Index; + BOOLEAN FirstMatch; + + FirstMatch = FALSE; + + CurrentBTSRecord = (BRANCH_TRACE_RECORD *)mMsrDsArea[CpuIndex]->BTSIndex; + for (Index = 0; Index < mBTSRecordNumber; Index++) { + if ((UINTN)CurrentBTSRecord < (UINTN)mMsrBTSRecord[CpuIndex]) { + // + // Underflow + // + CurrentBTSRecord = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[CpuIndex]->BTSAbsoluteMaximum - 1); + CurrentBTSRecord --; + } + if (CurrentBTSRecord->LastBranchTo == DestinationIP) { + // + // Good! find 1st one, then find 2nd one. + // + if (!FirstMatch) { + // + // The first one is DEBUG exception + // + FirstMatch = TRUE; + } else { + // + // Good find proper one. + // + return CurrentBTSRecord->LastBranchFrom; + } + } + CurrentBTSRecord--; + } + + return 0; +} + +/** + SMM profile specific INT 1 (single-step) exception handler. + + @param InterruptType Defines the type of interrupt or exception that + occurred on the processor.This parameter is processor architecture specific. + @param SystemContext A pointer to the processor context when + the interrupt occurred on the processor. +**/ +VOID +EFIAPI +DebugExceptionHandler ( + IN EFI_EXCEPTION_TYPE InterruptType, + IN EFI_SYSTEM_CONTEXT SystemContext + ) +{ + UINTN CpuIndex; + UINTN PFEntry; + + if (!mSmmProfileStart) { + return; + } + CpuIndex = GetCpuIndex (); + + // + // Clear last PF entries + // + for (PFEntry = 0; PFEntry < mPFEntryCount[CpuIndex]; PFEntry++) { + *mLastPFEntryPointer[CpuIndex][PFEntry] = mLastPFEntryValue[CpuIndex][PFEntry]; + } + + // + // Reset page fault exception count for next page fault. + // + mPFEntryCount[CpuIndex] = 0; + + // + // Flush TLB + // + CpuFlushTlb (); + + // + // Clear TF in EFLAGS + // + ClearTrapFlag (SystemContext); +} + +/** + Check if the memory address will be mapped by 4KB-page. + + @param Address The address of Memory. + @param Nx The flag indicates if the memory is execute-disable. + +**/ +BOOLEAN +IsAddressValid ( + IN EFI_PHYSICAL_ADDRESS Address, + IN BOOLEAN *Nx + ) +{ + UINTN Index; + + *Nx = FALSE; + if (FeaturePcdGet (PcdCpuSmmProfileEnable)) { + // + // Check configuration + // + for (Index = 0; Index < mProtectionMemRangeCount; Index++) { + if ((Address >= mProtectionMemRange[Index].Range.Base) && (Address < mProtectionMemRange[Index].Range.Top)) { + *Nx = mProtectionMemRange[Index].Nx; + return mProtectionMemRange[Index].Present; + } + } + *Nx = TRUE; + return FALSE; + + } else { + if ((Address < mCpuHotPlugData.SmrrBase) || + (Address >= mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize)) { + *Nx = TRUE; + } + return TRUE; + } +} + +/** + Check if the memory address will be mapped by 4KB-page. + + @param Address The address of Memory. + +**/ +BOOLEAN +IsAddressSplit ( + IN EFI_PHYSICAL_ADDRESS Address + ) +{ + UINTN Index; + + if (FeaturePcdGet (PcdCpuSmmProfileEnable)) { + // + // Check configuration + // + for (Index = 0; Index < mSplitMemRangeCount; Index++) { + if ((Address >= mSplitMemRange[Index].Base) && (Address < mSplitMemRange[Index].Top)) { + return TRUE; + } + } + } else { + if (Address < mCpuHotPlugData.SmrrBase) { + if ((mCpuHotPlugData.SmrrBase - Address) < BASE_2MB) { + return TRUE; + } + } else if (Address > (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) { + if ((Address - (mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize - BASE_2MB)) < BASE_2MB) { + return TRUE; + } + } + } + // + // Return default + // + return FALSE; +} + +/** + Initialize the protected memory ranges and the 4KB-page mapped memory ranges. + +**/ +VOID +InitProtectedMemRange ( + VOID + ) +{ + UINTN Index; + UINTN NumberOfDescriptors; + UINTN NumberOfMmioDescriptors; + UINTN NumberOfProtectRange; + UINTN NumberOfSpliteRange; + EFI_GCD_MEMORY_SPACE_DESCRIPTOR *MemorySpaceMap; + UINTN TotalSize; + EFI_STATUS Status; + EFI_PHYSICAL_ADDRESS ProtectBaseAddress; + EFI_PHYSICAL_ADDRESS ProtectEndAddress; + EFI_PHYSICAL_ADDRESS Top2MBAlignedAddress; + EFI_PHYSICAL_ADDRESS Base2MBAlignedAddress; + UINT64 High4KBPageSize; + UINT64 Low4KBPageSize; + + NumberOfDescriptors = 0; + NumberOfMmioDescriptors = 0; + NumberOfSpliteRange = 0; + MemorySpaceMap = NULL; + + // + // Get MMIO ranges from GCD and add them into protected memory ranges. + // + Status = gDS->GetMemorySpaceMap ( + &NumberOfDescriptors, + &MemorySpaceMap + ); + for (Index = 0; Index < NumberOfDescriptors; Index++) { + if (MemorySpaceMap[Index].GcdMemoryType == EfiGcdMemoryTypeMemoryMappedIo) { + NumberOfMmioDescriptors++; + } + } + + if (NumberOfMmioDescriptors != 0) { + TotalSize = NumberOfMmioDescriptors * sizeof (MEMORY_PROTECTION_RANGE) + sizeof (mProtectionMemRangeTemplate); + mProtectionMemRange = (MEMORY_PROTECTION_RANGE *) AllocateZeroPool (TotalSize); + ASSERT (mProtectionMemRange != NULL); + mProtectionMemRangeCount = TotalSize / sizeof (MEMORY_PROTECTION_RANGE); + + // + // Copy existing ranges. + // + CopyMem (mProtectionMemRange, mProtectionMemRangeTemplate, sizeof (mProtectionMemRangeTemplate)); + + // + // Create split ranges which come from protected ranges. + // + TotalSize = (TotalSize / sizeof (MEMORY_PROTECTION_RANGE)) * sizeof (MEMORY_RANGE); + mSplitMemRange = (MEMORY_RANGE *) AllocateZeroPool (TotalSize); + ASSERT (mSplitMemRange != NULL); + + // + // Create MMIO ranges which are set to present and execution-disable. + // + NumberOfProtectRange = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE); + for (Index = 0; Index < NumberOfDescriptors; Index++) { + if (MemorySpaceMap[Index].GcdMemoryType != EfiGcdMemoryTypeMemoryMappedIo) { + continue; + } + mProtectionMemRange[NumberOfProtectRange].Range.Base = MemorySpaceMap[Index].BaseAddress; + mProtectionMemRange[NumberOfProtectRange].Range.Top = MemorySpaceMap[Index].BaseAddress + MemorySpaceMap[Index].Length; + mProtectionMemRange[NumberOfProtectRange].Present = TRUE; + mProtectionMemRange[NumberOfProtectRange].Nx = TRUE; + NumberOfProtectRange++; + } + } + + // + // According to protected ranges, create the ranges which will be mapped by 2KB page. + // + NumberOfSpliteRange = 0; + NumberOfProtectRange = mProtectionMemRangeCount; + for (Index = 0; Index < NumberOfProtectRange; Index++) { + // + // If MMIO base address is not 2MB alignment, make 2MB alignment for create 4KB page in page table. + // + ProtectBaseAddress = mProtectionMemRange[Index].Range.Base; + ProtectEndAddress = mProtectionMemRange[Index].Range.Top; + if (((ProtectBaseAddress & (SIZE_2MB - 1)) != 0) || ((ProtectEndAddress & (SIZE_2MB - 1)) != 0)) { + // + // Check if it is possible to create 4KB-page for not 2MB-aligned range and to create 2MB-page for 2MB-aligned range. + // A mix of 4KB and 2MB page could save SMRAM space. + // + Top2MBAlignedAddress = ProtectEndAddress & ~(SIZE_2MB - 1); + Base2MBAlignedAddress = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1); + if ((Top2MBAlignedAddress > Base2MBAlignedAddress) && + ((Top2MBAlignedAddress - Base2MBAlignedAddress) >= SIZE_2MB)) { + // + // There is an range which could be mapped by 2MB-page. + // + High4KBPageSize = ((ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectEndAddress & ~(SIZE_2MB - 1)); + Low4KBPageSize = ((ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1)) - (ProtectBaseAddress & ~(SIZE_2MB - 1)); + if (High4KBPageSize != 0) { + // + // Add not 2MB-aligned range to be mapped by 4KB-page. + // + mSplitMemRange[NumberOfSpliteRange].Base = ProtectEndAddress & ~(SIZE_2MB - 1); + mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1); + NumberOfSpliteRange++; + } + if (Low4KBPageSize != 0) { + // + // Add not 2MB-aligned range to be mapped by 4KB-page. + // + mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1); + mSplitMemRange[NumberOfSpliteRange].Top = (ProtectBaseAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1); + NumberOfSpliteRange++; + } + } else { + // + // The range could only be mapped by 4KB-page. + // + mSplitMemRange[NumberOfSpliteRange].Base = ProtectBaseAddress & ~(SIZE_2MB - 1); + mSplitMemRange[NumberOfSpliteRange].Top = (ProtectEndAddress + SIZE_2MB - 1) & ~(SIZE_2MB - 1); + NumberOfSpliteRange++; + } + } + } + + mSplitMemRangeCount = NumberOfSpliteRange; + + DEBUG ((EFI_D_INFO, "SMM Profile Memory Ranges:\n")); + for (Index = 0; Index < mProtectionMemRangeCount; Index++) { + DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Base = %lx\n", Index, mProtectionMemRange[Index].Range.Base)); + DEBUG ((EFI_D_INFO, "mProtectionMemRange[%d].Top = %lx\n", Index, mProtectionMemRange[Index].Range.Top)); + } + for (Index = 0; Index < mSplitMemRangeCount; Index++) { + DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Base = %lx\n", Index, mSplitMemRange[Index].Base)); + DEBUG ((EFI_D_INFO, "mSplitMemRange[%d].Top = %lx\n", Index, mSplitMemRange[Index].Top)); + } +} + +/** + Update page table according to protected memory ranges and the 4KB-page mapped memory ranges. + +**/ +VOID +InitPaging ( + VOID + ) +{ + UINT64 *Pml4; + UINT64 *Pde; + UINT64 *Pte; + UINT64 *Pt; + UINTN Address; + UINTN Level1; + UINTN Level2; + UINTN Level3; + UINTN Level4; + UINTN NumberOfPdpEntries; + UINTN NumberOfPml4Entries; + UINTN SizeOfMemorySpace; + BOOLEAN Nx; + + if (sizeof (UINTN) == sizeof (UINT64)) { + Pml4 = (UINT64*)(UINTN)mSmmProfileCr3; + SizeOfMemorySpace = HighBitSet64 (gPhyMask) + 1; + // + // Calculate the table entries of PML4E and PDPTE. + // + if (SizeOfMemorySpace <= 39 ) { + NumberOfPml4Entries = 1; + NumberOfPdpEntries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 30)); + } else { + NumberOfPml4Entries = (UINT32)LShiftU64 (1, (SizeOfMemorySpace - 39)); + NumberOfPdpEntries = 512; + } + } else { + NumberOfPml4Entries = 1; + NumberOfPdpEntries = 4; + } + + // + // Go through page table and change 2MB-page into 4KB-page. + // + for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) { + if (sizeof (UINTN) == sizeof (UINT64)) { + if ((Pml4[Level1] & IA32_PG_P) == 0) { + // + // If Pml4 entry does not exist, skip it + // + continue; + } + Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK); + } else { + Pde = (UINT64*)(UINTN)mSmmProfileCr3; + } + for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) { + if ((*Pde & IA32_PG_P) == 0) { + // + // If PDE entry does not exist, skip it + // + continue; + } + Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK); + if (Pte == 0) { + continue; + } + for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) { + if ((*Pte & IA32_PG_P) == 0) { + // + // If PTE entry does not exist, skip it + // + continue; + } + Address = (((Level2 << 9) + Level3) << 21); + + // + // If it is 2M page, check IsAddressSplit() + // + if (((*Pte & IA32_PG_PS) != 0) && IsAddressSplit (Address)) { + // + // Based on current page table, create 4KB page table for split area. + // + ASSERT (Address == (*Pte & PHYSICAL_ADDRESS_MASK)); + + Pt = AllocatePages (1); + ASSERT (Pt != NULL); + + // Split it + for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++) { + Pt[Level4] = Address + ((Level4 << 12) | IA32_PG_RW | IA32_PG_P); + } // end for PT + *Pte = (UINTN)Pt | IA32_PG_RW | IA32_PG_P; + } // end if IsAddressSplit + } // end for PTE + } // end for PDE + } + + // + // Go through page table and set several page table entries to absent or execute-disable. + // + DEBUG ((EFI_D_INFO, "Patch page table start ...\n")); + for (Level1 = 0; Level1 < NumberOfPml4Entries; Level1++) { + if (sizeof (UINTN) == sizeof (UINT64)) { + if ((Pml4[Level1] & IA32_PG_P) == 0) { + // + // If Pml4 entry does not exist, skip it + // + continue; + } + Pde = (UINT64 *)(UINTN)(Pml4[Level1] & PHYSICAL_ADDRESS_MASK); + } else { + Pde = (UINT64*)(UINTN)mSmmProfileCr3; + } + for (Level2 = 0; Level2 < NumberOfPdpEntries; Level2++, Pde++) { + if ((*Pde & IA32_PG_P) == 0) { + // + // If PDE entry does not exist, skip it + // + continue; + } + Pte = (UINT64 *)(UINTN)(*Pde & PHYSICAL_ADDRESS_MASK); + if (Pte == 0) { + continue; + } + for (Level3 = 0; Level3 < SIZE_4KB / sizeof (*Pte); Level3++, Pte++) { + if ((*Pte & IA32_PG_P) == 0) { + // + // If PTE entry does not exist, skip it + // + continue; + } + Address = (((Level2 << 9) + Level3) << 21); + + if ((*Pte & IA32_PG_PS) != 0) { + // 2MB page + + if (!IsAddressValid (Address, &Nx)) { + // + // Patch to remove Present flag and RW flag + // + *Pte = *Pte & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P)); + } + if (Nx && mXdSupported) { + *Pte = *Pte | IA32_PG_NX; + } + } else { + // 4KB page + Pt = (UINT64 *)(UINTN)(*Pte & PHYSICAL_ADDRESS_MASK); + if (Pt == 0) { + continue; + } + for (Level4 = 0; Level4 < SIZE_4KB / sizeof(*Pt); Level4++, Pt++) { + if (!IsAddressValid (Address, &Nx)) { + *Pt = *Pt & (INTN)(INT32)(~(IA32_PG_RW | IA32_PG_P)); + } + if (Nx && mXdSupported) { + *Pt = *Pt | IA32_PG_NX; + } + Address += SIZE_4KB; + } // end for PT + } // end if PS + } // end for PTE + } // end for PDE + } + + // + // Flush TLB + // + CpuFlushTlb (); + DEBUG ((EFI_D_INFO, "Patch page table done!\n")); + // + // Set execute-disable flag + // + mXdEnabled = TRUE; + + return ; +} + +/** + To find FADT in ACPI tables. + + @param AcpiTableGuid The GUID used to find ACPI table in UEFI ConfigurationTable. + + @return FADT table pointer. +**/ +EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE * +FindAcpiFadtTableByAcpiGuid ( + IN EFI_GUID *AcpiTableGuid + ) +{ + EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_POINTER *Rsdp; + EFI_ACPI_DESCRIPTION_HEADER *Rsdt; + EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt; + UINTN Index; + UINT32 Data32; + Rsdp = NULL; + Rsdt = NULL; + Fadt = NULL; + // + // found ACPI table RSD_PTR from system table + // + for (Index = 0; Index < gST->NumberOfTableEntries; Index++) { + if (CompareGuid (&(gST->ConfigurationTable[Index].VendorGuid), AcpiTableGuid)) { + // + // A match was found. + // + Rsdp = gST->ConfigurationTable[Index].VendorTable; + break; + } + } + + if (Rsdp == NULL) { + return NULL; + } + + Rsdt = (EFI_ACPI_DESCRIPTION_HEADER *)(UINTN) Rsdp->RsdtAddress; + if (Rsdt == NULL || Rsdt->Signature != EFI_ACPI_2_0_ROOT_SYSTEM_DESCRIPTION_TABLE_SIGNATURE) { + return NULL; + } + + for (Index = sizeof (EFI_ACPI_DESCRIPTION_HEADER); Index < Rsdt->Length; Index = Index + sizeof (UINT32)) { + + Data32 = *(UINT32 *) ((UINT8 *) Rsdt + Index); + Fadt = (EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *) (UINT32 *) (UINTN) Data32; + if (Fadt->Header.Signature == EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE) { + break; + } + } + + if (Fadt == NULL || Fadt->Header.Signature != EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE_SIGNATURE) { + return NULL; + } + + return Fadt; +} + +/** + To find FADT in ACPI tables. + + @return FADT table pointer. +**/ +EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE * +FindAcpiFadtTable ( + VOID + ) +{ + EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt; + + Fadt = FindAcpiFadtTableByAcpiGuid (&gEfiAcpi20TableGuid); + if (Fadt != NULL) { + return Fadt; + } + + return FindAcpiFadtTableByAcpiGuid (&gEfiAcpi10TableGuid); +} + +/** + To get system port address of the SMI Command Port in FADT table. + +**/ +VOID +GetSmiCommandPort ( + VOID + ) +{ + EFI_ACPI_2_0_FIXED_ACPI_DESCRIPTION_TABLE *Fadt; + + Fadt = FindAcpiFadtTable (); + ASSERT (Fadt != NULL); + + mSmiCommandPort = Fadt->SmiCmd; + DEBUG ((EFI_D_INFO, "mSmiCommandPort = %x\n", mSmiCommandPort)); +} + +/** + Updates page table to make some memory ranges (like system memory) absent + and make some memory ranges (like MMIO) present and execute disable. It also + update 2MB-page to 4KB-page for some memory ranges. + +**/ +VOID +SmmProfileStart ( + VOID + ) +{ + // + // The flag indicates SMM profile starts to work. + // + mSmmProfileStart = TRUE; +} + +/** + Initialize SMM profile in SmmReadyToLock protocol callback function. + + @param Protocol Points to the protocol's unique identifier. + @param Interface Points to the interface instance. + @param Handle The handle on which the interface was installed. + + @retval EFI_SUCCESS SmmReadyToLock protocol callback runs successfully. +**/ +EFI_STATUS +EFIAPI +InitSmmProfileCallBack ( + IN CONST EFI_GUID *Protocol, + IN VOID *Interface, + IN EFI_HANDLE Handle + ) +{ + EFI_STATUS Status; + + // + // Save to variable so that SMM profile data can be found. + // + Status = gRT->SetVariable ( + SMM_PROFILE_NAME, + &gEfiCallerIdGuid, + EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS, + sizeof(mSmmProfileBase), + &mSmmProfileBase + ); + + // + // Get Software SMI from FADT + // + GetSmiCommandPort (); + + // + // Initialize protected memory range for patching page table later. + // + InitProtectedMemRange (); + + return EFI_SUCCESS; +} + +/** + Initialize SMM profile data structures. + +**/ +VOID +InitSmmProfileInternal ( + VOID + ) +{ + EFI_STATUS Status; + EFI_PHYSICAL_ADDRESS Base; + VOID *Registration; + UINTN Index; + UINTN MsrDsAreaSizePerCpu; + UINTN TotalSize; + + mPFEntryCount = (UINTN *)AllocateZeroPool (sizeof (UINTN) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); + ASSERT (mPFEntryCount != NULL); + mLastPFEntryValue = (UINT64 (*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool ( + sizeof (mLastPFEntryValue[0]) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); + ASSERT (mLastPFEntryValue != NULL); + mLastPFEntryPointer = (UINT64 *(*)[MAX_PF_ENTRY_COUNT])AllocateZeroPool ( + sizeof (mLastPFEntryPointer[0]) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); + ASSERT (mLastPFEntryPointer != NULL); + + // + // Allocate memory for SmmProfile below 4GB. + // The base address + // + mSmmProfileSize = PcdGet32 (PcdCpuSmmProfileSize); + ASSERT ((mSmmProfileSize & 0xFFF) == 0); + + if (mBtsSupported) { + TotalSize = mSmmProfileSize + mMsrDsAreaSize; + } else { + TotalSize = mSmmProfileSize; + } + + Base = 0xFFFFFFFF; + Status = gBS->AllocatePages ( + AllocateMaxAddress, + EfiReservedMemoryType, + EFI_SIZE_TO_PAGES (TotalSize), + &Base + ); + ASSERT_EFI_ERROR (Status); + ZeroMem ((VOID *)(UINTN)Base, TotalSize); + mSmmProfileBase = (SMM_PROFILE_HEADER *)(UINTN)Base; + + // + // Initialize SMM profile data header. + // + mSmmProfileBase->HeaderSize = sizeof (SMM_PROFILE_HEADER); + mSmmProfileBase->MaxDataEntries = (UINT64)((mSmmProfileSize - sizeof(SMM_PROFILE_HEADER)) / sizeof (SMM_PROFILE_ENTRY)); + mSmmProfileBase->MaxDataSize = MultU64x64 (mSmmProfileBase->MaxDataEntries, sizeof(SMM_PROFILE_ENTRY)); + mSmmProfileBase->CurDataEntries = 0; + mSmmProfileBase->CurDataSize = 0; + mSmmProfileBase->TsegStart = mCpuHotPlugData.SmrrBase; + mSmmProfileBase->TsegSize = mCpuHotPlugData.SmrrSize; + mSmmProfileBase->NumSmis = 0; + mSmmProfileBase->NumCpus = gSmmCpuPrivate->SmmCoreEntryContext.NumberOfCpus; + + if (mBtsSupported) { + mMsrDsArea = (MSR_DS_AREA_STRUCT **)AllocateZeroPool (sizeof (MSR_DS_AREA_STRUCT *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); + ASSERT (mMsrDsArea != NULL); + mMsrBTSRecord = (BRANCH_TRACE_RECORD **)AllocateZeroPool (sizeof (BRANCH_TRACE_RECORD *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); + ASSERT (mMsrBTSRecord != NULL); + mMsrPEBSRecord = (PEBS_RECORD **)AllocateZeroPool (sizeof (PEBS_RECORD *) * PcdGet32 (PcdCpuMaxLogicalProcessorNumber)); + ASSERT (mMsrPEBSRecord != NULL); + + mMsrDsAreaBase = (MSR_DS_AREA_STRUCT *)((UINTN)Base + mSmmProfileSize); + MsrDsAreaSizePerCpu = mMsrDsAreaSize / PcdGet32 (PcdCpuMaxLogicalProcessorNumber); + mBTSRecordNumber = (MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER - sizeof(MSR_DS_AREA_STRUCT)) / sizeof(BRANCH_TRACE_RECORD); + for (Index = 0; Index < PcdGet32 (PcdCpuMaxLogicalProcessorNumber); Index++) { + mMsrDsArea[Index] = (MSR_DS_AREA_STRUCT *)((UINTN)mMsrDsAreaBase + MsrDsAreaSizePerCpu * Index); + mMsrBTSRecord[Index] = (BRANCH_TRACE_RECORD *)((UINTN)mMsrDsArea[Index] + sizeof(MSR_DS_AREA_STRUCT)); + mMsrPEBSRecord[Index] = (PEBS_RECORD *)((UINTN)mMsrDsArea[Index] + MsrDsAreaSizePerCpu - sizeof(PEBS_RECORD) * PEBS_RECORD_NUMBER); + + mMsrDsArea[Index]->BTSBufferBase = (UINTN)mMsrBTSRecord[Index]; + mMsrDsArea[Index]->BTSIndex = mMsrDsArea[Index]->BTSBufferBase; + mMsrDsArea[Index]->BTSAbsoluteMaximum = mMsrDsArea[Index]->BTSBufferBase + mBTSRecordNumber * sizeof(BRANCH_TRACE_RECORD) + 1; + mMsrDsArea[Index]->BTSInterruptThreshold = mMsrDsArea[Index]->BTSAbsoluteMaximum + 1; + + mMsrDsArea[Index]->PEBSBufferBase = (UINTN)mMsrPEBSRecord[Index]; + mMsrDsArea[Index]->PEBSIndex = mMsrDsArea[Index]->PEBSBufferBase; + mMsrDsArea[Index]->PEBSAbsoluteMaximum = mMsrDsArea[Index]->PEBSBufferBase + PEBS_RECORD_NUMBER * sizeof(PEBS_RECORD) + 1; + mMsrDsArea[Index]->PEBSInterruptThreshold = mMsrDsArea[Index]->PEBSAbsoluteMaximum + 1; + } + } + + mProtectionMemRange = mProtectionMemRangeTemplate; + mProtectionMemRangeCount = sizeof (mProtectionMemRangeTemplate) / sizeof (MEMORY_PROTECTION_RANGE); + + // + // Update TSeg entry. + // + mProtectionMemRange[0].Range.Base = mCpuHotPlugData.SmrrBase; + mProtectionMemRange[0].Range.Top = mCpuHotPlugData.SmrrBase + mCpuHotPlugData.SmrrSize; + + // + // Update SMM profile entry. + // + mProtectionMemRange[1].Range.Base = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase; + mProtectionMemRange[1].Range.Top = (EFI_PHYSICAL_ADDRESS)(UINTN)mSmmProfileBase + TotalSize; + + // + // Allocate memory reserved for creating 4KB pages. + // + InitPagesForPFHandler (); + + // + // Start SMM profile when SmmReadyToLock protocol is installed. + // + Status = gSmst->SmmRegisterProtocolNotify ( + &gEfiSmmReadyToLockProtocolGuid, + InitSmmProfileCallBack, + &Registration + ); + ASSERT_EFI_ERROR (Status); + + return ; +} + +/** + Check if XD feature is supported by a processor. + +**/ +VOID +CheckFeatureSupported ( + VOID + ) +{ + UINT32 RegEax; + UINT32 RegEdx; + + if (mXdSupported) { + AsmCpuid (CPUID_EXTENDED_FUNCTION, &RegEax, NULL, NULL, NULL); + if (RegEax <= CPUID_EXTENDED_FUNCTION) { + // + // Extended CPUID functions are not supported on this processor. + // + mXdSupported = FALSE; + } + + AsmCpuid (CPUID_EXTENDED_CPU_SIG, NULL, NULL, NULL, &RegEdx); + if ((RegEdx & CPUID1_EDX_XD_SUPPORT) == 0) { + // + // Execute Disable Bit feature is not supported on this processor. + // + mXdSupported = FALSE; + } + } + + if (mBtsSupported) { + AsmCpuid (CPUID_VERSION_INFO, NULL, NULL, NULL, &RegEdx); + if ((RegEdx & CPUID1_EDX_BTS_AVAILABLE) != 0) { + // + // Per IA32 manuals: + // When CPUID.1:EDX[21] is set, the following BTS facilities are available: + // 1. The BTS_UNAVAILABLE flag in the IA32_MISC_ENABLE MSR indicates the + // availability of the BTS facilities, including the ability to set the BTS and + // BTINT bits in the MSR_DEBUGCTLA MSR. + // 2. The IA32_DS_AREA MSR can be programmed to point to the DS save area. + // + if ((AsmMsrBitFieldRead64 (MSR_IA32_MISC_ENABLE, 11, 11) == 0) && + (AsmMsrBitFieldRead64 (MSR_IA32_MISC_ENABLE, 12, 12) == 0)) { + // + // BTS facilities is supported. + // + mBtsSupported = FALSE; + } + } + } +} + +/** + Check if XD and BTS features are supported by all processors. + +**/ +VOID +CheckProcessorFeature ( + VOID + ) +{ + EFI_STATUS Status; + EFI_MP_SERVICES_PROTOCOL *MpServices; + + Status = gBS->LocateProtocol (&gEfiMpServiceProtocolGuid, NULL, (VOID **)&MpServices); + ASSERT_EFI_ERROR (Status); + + // + // First detect if XD and BTS are supported + // + mXdSupported = TRUE; + mBtsSupported = TRUE; + + // + // Check if XD and BTS are supported on all processors. + // + CheckFeatureSupported (); + + // + //Check on other processors if BSP supports this + // + if (mXdSupported || mBtsSupported) { + MpServices->StartupAllAPs ( + MpServices, + (EFI_AP_PROCEDURE) CheckFeatureSupported, + TRUE, + NULL, + 0, + NULL, + NULL + ); + } +} + +/** + Enable XD feature. + +**/ +VOID +ActivateXd ( + VOID + ) +{ + UINT64 MsrRegisters; + + MsrRegisters = AsmReadMsr64 (MSR_EFER); + if ((MsrRegisters & MSR_EFER_XD) != 0) { + return ; + } + MsrRegisters |= MSR_EFER_XD; + AsmWriteMsr64 (MSR_EFER, MsrRegisters); +} + +/** + Enable single step. + +**/ +VOID +ActivateSingleStepDB ( + VOID + ) +{ + UINTN Dr6; + + Dr6 = AsmReadDr6 (); + if ((Dr6 & DR6_SINGLE_STEP) != 0) { + return; + } + Dr6 |= DR6_SINGLE_STEP; + AsmWriteDr6 (Dr6); +} + +/** + Enable last branch. + +**/ +VOID +ActivateLBR ( + VOID + ) +{ + UINT64 DebugCtl; + + DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL); + if ((DebugCtl & MSR_DEBUG_CTL_LBR) != 0) { + return ; + } + AsmWriteMsr64 (MSR_LER_FROM_LIP, 0); + AsmWriteMsr64 (MSR_LER_TO_LIP, 0); + DebugCtl |= MSR_DEBUG_CTL_LBR; + AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl); +} + +/** + Enable branch trace store. + + @param CpuIndex The index of the processor. + +**/ +VOID +ActivateBTS ( + IN UINTN CpuIndex + ) +{ + UINT64 DebugCtl; + + DebugCtl = AsmReadMsr64 (MSR_DEBUG_CTL); + if ((DebugCtl & MSR_DEBUG_CTL_BTS) != 0) { + return ; + } + + AsmWriteMsr64 (MSR_DS_AREA, (UINT64)(UINTN)mMsrDsArea[CpuIndex]); + DebugCtl |= (UINT64)(MSR_DEBUG_CTL_BTS | MSR_DEBUG_CTL_TR); + DebugCtl &= ~((UINT64)MSR_DEBUG_CTL_BTINT); + AsmWriteMsr64 (MSR_DEBUG_CTL, DebugCtl); +} + +/** + Increase SMI number in each SMI entry. + +**/ +VOID +SmmProfileRecordSmiNum ( + VOID + ) +{ + if (mSmmProfileStart) { + mSmmProfileBase->NumSmis++; + } +} + +/** + Initialize processor environment for SMM profile. + + @param CpuIndex The index of the processor. + +**/ +VOID +ActivateSmmProfile ( + IN UINTN CpuIndex + ) +{ + // + // Enable Single Step DB# + // + ActivateSingleStepDB (); + + if (mBtsSupported) { + // + // We can not get useful information from LER, so we have to use BTS. + // + ActivateLBR (); + + // + // Enable BTS + // + ActivateBTS (CpuIndex); + } +} + +/** + Initialize SMM profile in SMM CPU entry point. + + @param[in] Cr3 The base address of the page tables to use in SMM. + +**/ +VOID +InitSmmProfile ( + UINT32 Cr3 + ) +{ + // + // Save Cr3 + // + mSmmProfileCr3 = Cr3; + + // + // Skip SMM profile initialization if feature is disabled + // + if (!FeaturePcdGet (PcdCpuSmmProfileEnable)) { + return; + } + + // + // Initialize SmmProfile here + // + InitSmmProfileInternal (); + + // + // Initialize profile IDT. + // + InitIdtr (); +} + +/** + Update page table to map the memory correctly in order to make the instruction + which caused page fault execute successfully. And it also save the original page + table to be restored in single-step exception. + + @param PageTable PageTable Address. + @param PFAddress The memory address which caused page fault exception. + @param CpuIndex The index of the processor. + @param ErrorCode The Error code of exception. + +**/ +VOID +RestorePageTableBelow4G ( + UINT64 *PageTable, + UINT64 PFAddress, + UINTN CpuIndex, + UINTN ErrorCode + ) +{ + UINTN PTIndex; + UINTN PFIndex; + + // + // PML4 + // + if (sizeof(UINT64) == sizeof(UINTN)) { + PTIndex = (UINTN)BitFieldRead64 (PFAddress, 39, 47); + ASSERT (PageTable[PTIndex] != 0); + PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); + } + + // + // PDPTE + // + PTIndex = (UINTN)BitFieldRead64 (PFAddress, 30, 38); + ASSERT (PageTable[PTIndex] != 0); + PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); + + // + // PD + // + PTIndex = (UINTN)BitFieldRead64 (PFAddress, 21, 29); + if ((PageTable[PTIndex] & IA32_PG_PS) != 0) { + // + // Large page + // + + // + // Record old entries with non-present status + // Old entries include the memory which instruction is at and the memory which instruction access. + // + // + ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT); + if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) { + PFIndex = mPFEntryCount[CpuIndex]; + mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex]; + mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex]; + mPFEntryCount[CpuIndex]++; + } + + // + // Set new entry + // + PageTable[PTIndex] = (PFAddress & ~((1ull << 21) - 1)); + PageTable[PTIndex] |= (UINT64)IA32_PG_PS; + PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P); + if ((ErrorCode & IA32_PF_EC_ID) != 0) { + PageTable[PTIndex] &= ~IA32_PG_NX; + } + } else { + // + // Small page + // + ASSERT (PageTable[PTIndex] != 0); + PageTable = (UINT64*)(UINTN)(PageTable[PTIndex] & PHYSICAL_ADDRESS_MASK); + + // + // 4K PTE + // + PTIndex = (UINTN)BitFieldRead64 (PFAddress, 12, 20); + + // + // Record old entries with non-present status + // Old entries include the memory which instruction is at and the memory which instruction access. + // + // + ASSERT (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT); + if (mPFEntryCount[CpuIndex] < MAX_PF_ENTRY_COUNT) { + PFIndex = mPFEntryCount[CpuIndex]; + mLastPFEntryValue[CpuIndex][PFIndex] = PageTable[PTIndex]; + mLastPFEntryPointer[CpuIndex][PFIndex] = &PageTable[PTIndex]; + mPFEntryCount[CpuIndex]++; + } + + // + // Set new entry + // + PageTable[PTIndex] = (PFAddress & ~((1ull << 12) - 1)); + PageTable[PTIndex] |= (UINT64)(IA32_PG_RW | IA32_PG_P); + if ((ErrorCode & IA32_PF_EC_ID) != 0) { + PageTable[PTIndex] &= ~IA32_PG_NX; + } + } +} + +/** + The Page fault handler to save SMM profile data. + + @param Rip The RIP when exception happens. + @param ErrorCode The Error code of exception. + +**/ +VOID +SmmProfilePFHandler ( + UINTN Rip, + UINTN ErrorCode + ) +{ + UINT64 *PageTable; + UINT64 PFAddress; + UINTN CpuIndex; + UINTN Index; + UINT64 InstructionAddress; + UINTN MaxEntryNumber; + UINTN CurrentEntryNumber; + BOOLEAN IsValidPFAddress; + SMM_PROFILE_ENTRY *SmmProfileEntry; + UINT64 SmiCommand; + EFI_STATUS Status; + UINTN SwSmiCpuIndex; + UINT8 SoftSmiValue; + EFI_SMM_SAVE_STATE_IO_INFO IoInfo; + + if (!mSmmProfileStart) { + // + // If SMM profile does not start, call original page fault handler. + // + SmiDefaultPFHandler (); + return; + } + + if (mBtsSupported) { + DisableBTS (); + } + + IsValidPFAddress = FALSE; + PageTable = (UINT64 *)AsmReadCr3 (); + PFAddress = AsmReadCr2 (); + CpuIndex = GetCpuIndex (); + + if (PFAddress <= 0xFFFFFFFF) { + RestorePageTableBelow4G (PageTable, PFAddress, CpuIndex, ErrorCode); + } else { + RestorePageTableAbove4G (PageTable, PFAddress, CpuIndex, ErrorCode, &IsValidPFAddress); + } + + if (!IsValidPFAddress) { + InstructionAddress = Rip; + if ((ErrorCode & IA32_PF_EC_ID) != 0 && (mBtsSupported)) { + // + // If it is instruction fetch failure, get the correct IP from BTS. + // + InstructionAddress = GetSourceFromDestinationOnBts (CpuIndex, Rip); + if (InstructionAddress == 0) { + // + // It indicates the instruction which caused page fault is not a jump instruction, + // set instruction address same as the page fault address. + // + InstructionAddress = PFAddress; + } + } + + // + // Try to find which CPU trigger SWSMI + // + SwSmiCpuIndex = 0; + // + // Indicate it is not software SMI + // + SmiCommand = 0xFFFFFFFFFFFFFFFFULL; + for (Index = 0; Index < gSmst->NumberOfCpus; Index++) { + Status = SmmReadSaveState(&mSmmCpu, sizeof(IoInfo), EFI_SMM_SAVE_STATE_REGISTER_IO, Index, &IoInfo); + if (EFI_ERROR (Status)) { + continue; + } + if (IoInfo.IoPort == mSmiCommandPort) { + // + // Great! Find it. + // + SwSmiCpuIndex = Index; + // + // A software SMI triggered by SMI command port has been found, get SmiCommand from SMI command port. + // + SoftSmiValue = IoRead8 (mSmiCommandPort); + SmiCommand = (UINT64)SoftSmiValue; + break; + } + } + + SmmProfileEntry = (SMM_PROFILE_ENTRY *)(UINTN)(mSmmProfileBase + 1); + // + // Check if there is already a same entry in profile data. + // + for (Index = 0; Index < (UINTN) mSmmProfileBase->CurDataEntries; Index++) { + if ((SmmProfileEntry[Index].ErrorCode == (UINT64)ErrorCode) && + (SmmProfileEntry[Index].Address == PFAddress) && + (SmmProfileEntry[Index].CpuNum == (UINT64)CpuIndex) && + (SmmProfileEntry[Index].Instruction == InstructionAddress) && + (SmmProfileEntry[Index].SmiCmd == SmiCommand)) { + // + // Same record exist, need not save again. + // + break; + } + } + if (Index == mSmmProfileBase->CurDataEntries) { + CurrentEntryNumber = (UINTN) mSmmProfileBase->CurDataEntries; + MaxEntryNumber = (UINTN) mSmmProfileBase->MaxDataEntries; + if (FeaturePcdGet (PcdCpuSmmProfileRingBuffer)) { + CurrentEntryNumber = CurrentEntryNumber % MaxEntryNumber; + } + if (CurrentEntryNumber < MaxEntryNumber) { + // + // Log the new entry + // + SmmProfileEntry[CurrentEntryNumber].SmiNum = mSmmProfileBase->NumSmis; + SmmProfileEntry[CurrentEntryNumber].ErrorCode = (UINT64)ErrorCode; + SmmProfileEntry[CurrentEntryNumber].ApicId = (UINT64)GetApicId (); + SmmProfileEntry[CurrentEntryNumber].CpuNum = (UINT64)CpuIndex; + SmmProfileEntry[CurrentEntryNumber].Address = PFAddress; + SmmProfileEntry[CurrentEntryNumber].Instruction = InstructionAddress; + SmmProfileEntry[CurrentEntryNumber].SmiCmd = SmiCommand; + // + // Update current entry index and data size in the header. + // + mSmmProfileBase->CurDataEntries++; + mSmmProfileBase->CurDataSize = MultU64x64 (mSmmProfileBase->CurDataEntries, sizeof (SMM_PROFILE_ENTRY)); + } + } + } + // + // Flush TLB + // + CpuFlushTlb (); + + if (mBtsSupported) { + EnableBTS (); + } +} + +/** + Replace INT1 exception handler to restore page table to absent/execute-disable state + in order to trigger page fault again to save SMM profile data.. + +**/ +VOID +InitIdtr ( + VOID + ) +{ + SmmRegisterExceptionHandler (&mSmmCpuService, EXCEPT_IA32_DEBUG, DebugExceptionHandler); +} diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h new file mode 100644 index 0000000000..4548467458 --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfile.h @@ -0,0 +1,134 @@ +/** @file +SMM profile header file. + +Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _SMM_PROFILE_H_ +#define _SMM_PROFILE_H_ + +#include "SmmProfileInternal.h" + +/// +/// MSR Register Index +/// +#define MSR_IA32_MISC_ENABLE 0x1A0 + +// +// External functions +// + +/** + Initialize processor environment for SMM profile. + + @param CpuIndex The index of the processor. + +**/ +VOID +ActivateSmmProfile ( + IN UINTN CpuIndex + ); + +/** + Initialize SMM profile in SMM CPU entry point. + + @param[in] Cr3 The base address of the page tables to use in SMM. + +**/ +VOID +InitSmmProfile ( + UINT32 Cr3 + ); + +/** + Increase SMI number in each SMI entry. + +**/ +VOID +SmmProfileRecordSmiNum ( + VOID + ); + +/** + The Page fault handler to save SMM profile data. + + @param Rip The RIP when exception happens. + @param ErrorCode The Error code of exception. + +**/ +VOID +SmmProfilePFHandler ( + UINTN Rip, + UINTN ErrorCode + ); + +/** + Updates page table to make some memory ranges (like system memory) absent + and make some memory ranges (like MMIO) present and execute disable. It also + update 2MB-page to 4KB-page for some memory ranges. + +**/ +VOID +SmmProfileStart ( + VOID + ); + +/** + Page fault IDT handler for SMM Profile. + +**/ +VOID +EFIAPI +PageFaultIdtHandlerSmmProfile ( + VOID + ); + + +/** + Check if XD feature is supported by a processor. + +**/ +VOID +CheckFeatureSupported ( + VOID + ); + +/** + Enable XD feature. + +**/ +VOID +ActivateXd ( + VOID + ); + +/** + Update page table according to protected memory ranges and the 4KB-page mapped memory ranges. + +**/ +VOID +InitPaging ( + VOID + ); + +/** + Check if XD and BTS features are supported by all processors. + +**/ +VOID +CheckProcessorFeature ( + VOID + ); + +extern BOOLEAN mXdSupported; +extern BOOLEAN mXdEnabled; + +#endif // _SMM_PROFILE_H_ diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h new file mode 100644 index 0000000000..de6eb0aceb --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmmProfileInternal.h @@ -0,0 +1,172 @@ +/** @file +SMM profile internal header file. + +Copyright (c) 2012 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#ifndef _SMM_PROFILE_INTERNAL_H_ +#define _SMM_PROFILE_INTERNAL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "SmmProfileArch.h" + +// +// Configure the SMM_PROFILE DTS region size +// +#define SMM_PROFILE_DTS_SIZE (4 * 1024 * 1024) // 4M + +#define MAX_PF_PAGE_COUNT 0x2 + +#define PEBS_RECORD_NUMBER 0x2 + +#define MAX_PF_ENTRY_COUNT 10 + +// +// This MACRO just enable unit test for the profile +// Please disable it. +// + +#define IA32_PF_EC_P (1u << 0) +#define IA32_PF_EC_WR (1u << 1) +#define IA32_PF_EC_US (1u << 2) +#define IA32_PF_EC_RSVD (1u << 3) +#define IA32_PF_EC_ID (1u << 4) + +#define SMM_PROFILE_NAME L"SmmProfileData" + +// +// CPU generic definition +// +#define CPUID1_EDX_XD_SUPPORT 0x100000 +#define MSR_EFER 0xc0000080 +#define MSR_EFER_XD 0x800 + +#define CPUID1_EDX_BTS_AVAILABLE 0x200000 + +#define DR6_SINGLE_STEP 0x4000 +#define RFLAG_TF 0x100 + +#define MSR_DEBUG_CTL 0x1D9 +#define MSR_DEBUG_CTL_LBR 0x1 +#define MSR_DEBUG_CTL_TR 0x40 +#define MSR_DEBUG_CTL_BTS 0x80 +#define MSR_DEBUG_CTL_BTINT 0x100 +#define MSR_LASTBRANCH_TOS 0x1C9 +#define MSR_LER_FROM_LIP 0x1DD +#define MSR_LER_TO_LIP 0x1DE +#define MSR_DS_AREA 0x600 + +typedef struct { + EFI_PHYSICAL_ADDRESS Base; + EFI_PHYSICAL_ADDRESS Top; +} MEMORY_RANGE; + +typedef struct { + MEMORY_RANGE Range; + BOOLEAN Present; + BOOLEAN Nx; +} MEMORY_PROTECTION_RANGE; + +typedef struct { + UINT64 HeaderSize; + UINT64 MaxDataEntries; + UINT64 MaxDataSize; + UINT64 CurDataEntries; + UINT64 CurDataSize; + UINT64 TsegStart; + UINT64 TsegSize; + UINT64 NumSmis; + UINT64 NumCpus; +} SMM_PROFILE_HEADER; + +typedef struct { + UINT64 SmiNum; + UINT64 CpuNum; + UINT64 ApicId; + UINT64 ErrorCode; + UINT64 Instruction; + UINT64 Address; + UINT64 SmiCmd; +} SMM_PROFILE_ENTRY; + +extern SMM_S3_RESUME_STATE *mSmmS3ResumeState; +extern UINTN gSmiExceptionHandlers[]; +extern BOOLEAN mXdSupported; +extern UINTN *mPFEntryCount; +extern UINT64 (*mLastPFEntryValue)[MAX_PF_ENTRY_COUNT]; +extern UINT64 *(*mLastPFEntryPointer)[MAX_PF_ENTRY_COUNT]; + +// +// Internal functions +// + +/** + Update IDT table to replace page fault handler and INT 1 handler. + +**/ +VOID +InitIdtr ( + VOID + ); + +/** + Check if the memory address will be mapped by 4KB-page. + + @param Address The address of Memory. + +**/ +BOOLEAN +IsAddressSplit ( + IN EFI_PHYSICAL_ADDRESS Address + ); + +/** + Check if the memory address will be mapped by 4KB-page. + + @param Address The address of Memory. + @param Nx The flag indicates if the memory is execute-disable. + +**/ +BOOLEAN +IsAddressValid ( + IN EFI_PHYSICAL_ADDRESS Address, + IN BOOLEAN *Nx + ); + +/** + Page Fault handler for SMM use. + +**/ +VOID +SmiDefaultPFHandler ( + VOID + ); + +/** + Clear TF in FLAGS. + + @param SystemContext A pointer to the processor context when + the interrupt occurred on the processor. + +**/ +VOID +ClearTrapFlag ( + IN OUT EFI_SYSTEM_CONTEXT SystemContext + ); + +#endif // _SMM_PROFILE_H_ diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c new file mode 100644 index 0000000000..539c0294cd --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SmramSaveState.c @@ -0,0 +1,700 @@ +/** @file +Provides services to access SMRAM Save State Map + +Copyright (c) 2010 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#include + +#include + +#include +#include +#include +#include +#include +#include + +// +// EFER register LMA bit +// +#define LMA BIT10 + +/// +/// Macro used to simplify the lookup table entries of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY +/// +#define SMM_CPU_OFFSET(Field) OFFSET_OF (SMRAM_SAVE_STATE_MAP, Field) + +/// +/// Macro used to simplify the lookup table entries of type CPU_SMM_SAVE_STATE_REGISTER_RANGE +/// +#define SMM_REGISTER_RANGE(Start, End) { Start, End, End - Start + 1 } + +/// +/// Structure used to describe a range of registers +/// +typedef struct { + EFI_SMM_SAVE_STATE_REGISTER Start; + EFI_SMM_SAVE_STATE_REGISTER End; + UINTN Length; +} CPU_SMM_SAVE_STATE_REGISTER_RANGE; + +/// +/// Structure used to build a lookup table to retrieve the widths and offsets +/// associated with each supported EFI_SMM_SAVE_STATE_REGISTER value +/// + +#define SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX 1 +#define SMM_SAVE_STATE_REGISTER_IOMISC_INDEX 2 +#define SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX 3 +#define SMM_SAVE_STATE_REGISTER_MAX_INDEX 4 + +typedef struct { + UINT8 Width32; + UINT8 Width64; + UINT16 Offset32; + UINT16 Offset64Lo; + UINT16 Offset64Hi; + BOOLEAN Writeable; +} CPU_SMM_SAVE_STATE_LOOKUP_ENTRY; + +/// +/// Structure used to build a lookup table for the IOMisc width information +/// +typedef struct { + UINT8 Width; + EFI_SMM_SAVE_STATE_IO_WIDTH IoWidth; +} CPU_SMM_SAVE_STATE_IO_WIDTH; + +/// +/// Variables from SMI Handler +/// +extern UINT32 gSmbase; +extern volatile UINT32 gSmiStack; +extern UINT32 gSmiCr3; +extern volatile UINT8 gcSmiHandlerTemplate[]; +extern CONST UINT16 gcSmiHandlerSize; + +// +// Variables used by SMI Handler +// +IA32_DESCRIPTOR gSmiHandlerIdtr; + +/// +/// Table used by GetRegisterIndex() to convert an EFI_SMM_SAVE_STATE_REGISTER +/// value to an index into a table of type CPU_SMM_SAVE_STATE_LOOKUP_ENTRY +/// +CONST CPU_SMM_SAVE_STATE_REGISTER_RANGE mSmmCpuRegisterRanges[] = { + SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_GDTBASE, EFI_SMM_SAVE_STATE_REGISTER_LDTINFO), + SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_ES, EFI_SMM_SAVE_STATE_REGISTER_RIP), + SMM_REGISTER_RANGE (EFI_SMM_SAVE_STATE_REGISTER_RFLAGS, EFI_SMM_SAVE_STATE_REGISTER_CR4), + { (EFI_SMM_SAVE_STATE_REGISTER)0, (EFI_SMM_SAVE_STATE_REGISTER)0, 0 } +}; + +/// +/// Lookup table used to retrieve the widths and offsets associated with each +/// supported EFI_SMM_SAVE_STATE_REGISTER value +/// +CONST CPU_SMM_SAVE_STATE_LOOKUP_ENTRY mSmmCpuWidthOffset[] = { + {0, 0, 0, 0, 0, FALSE}, // Reserved + + // + // Internally defined CPU Save State Registers. Not defined in PI SMM CPU Protocol. + // + {4, 4, SMM_CPU_OFFSET (x86.SMMRevId) , SMM_CPU_OFFSET (x64.SMMRevId) , 0 , FALSE}, // SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX = 1 + {4, 4, SMM_CPU_OFFSET (x86.IOMisc) , SMM_CPU_OFFSET (x64.IOMisc) , 0 , FALSE}, // SMM_SAVE_STATE_REGISTER_IOMISC_INDEX = 2 + {4, 8, SMM_CPU_OFFSET (x86.IOMemAddr) , SMM_CPU_OFFSET (x64.IOMemAddr) , SMM_CPU_OFFSET (x64.IOMemAddr) + 4, FALSE}, // SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX = 3 + + // + // CPU Save State registers defined in PI SMM CPU Protocol. + // + {0, 8, 0 , SMM_CPU_OFFSET (x64.GdtBaseLoDword) , SMM_CPU_OFFSET (x64.GdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTBASE = 4 + {0, 8, 0 , SMM_CPU_OFFSET (x64.IdtBaseLoDword) , SMM_CPU_OFFSET (x64.IdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTBASE = 5 + {0, 8, 0 , SMM_CPU_OFFSET (x64.LdtBaseLoDword) , SMM_CPU_OFFSET (x64.LdtBaseHiDword), FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTBASE = 6 + {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GDTLIMIT = 7 + {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_IDTLIMIT = 8 + {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTLIMIT = 9 + {0, 0, 0 , 0 , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTINFO = 10 + + {4, 4, SMM_CPU_OFFSET (x86._ES) , SMM_CPU_OFFSET (x64._ES) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_ES = 20 + {4, 4, SMM_CPU_OFFSET (x86._CS) , SMM_CPU_OFFSET (x64._CS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CS = 21 + {4, 4, SMM_CPU_OFFSET (x86._SS) , SMM_CPU_OFFSET (x64._SS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_SS = 22 + {4, 4, SMM_CPU_OFFSET (x86._DS) , SMM_CPU_OFFSET (x64._DS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DS = 23 + {4, 4, SMM_CPU_OFFSET (x86._FS) , SMM_CPU_OFFSET (x64._FS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_FS = 24 + {4, 4, SMM_CPU_OFFSET (x86._GS) , SMM_CPU_OFFSET (x64._GS) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_GS = 25 + {0, 4, 0 , SMM_CPU_OFFSET (x64._LDTR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_LDTR_SEL = 26 + {4, 4, SMM_CPU_OFFSET (x86._TR) , SMM_CPU_OFFSET (x64._TR) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_TR_SEL = 27 + {4, 8, SMM_CPU_OFFSET (x86._DR7) , SMM_CPU_OFFSET (x64._DR7) , SMM_CPU_OFFSET (x64._DR7) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR7 = 28 + {4, 8, SMM_CPU_OFFSET (x86._DR6) , SMM_CPU_OFFSET (x64._DR6) , SMM_CPU_OFFSET (x64._DR6) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_DR6 = 29 + {0, 8, 0 , SMM_CPU_OFFSET (x64._R8) , SMM_CPU_OFFSET (x64._R8) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R8 = 30 + {0, 8, 0 , SMM_CPU_OFFSET (x64._R9) , SMM_CPU_OFFSET (x64._R9) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R9 = 31 + {0, 8, 0 , SMM_CPU_OFFSET (x64._R10) , SMM_CPU_OFFSET (x64._R10) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R10 = 32 + {0, 8, 0 , SMM_CPU_OFFSET (x64._R11) , SMM_CPU_OFFSET (x64._R11) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R11 = 33 + {0, 8, 0 , SMM_CPU_OFFSET (x64._R12) , SMM_CPU_OFFSET (x64._R12) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R12 = 34 + {0, 8, 0 , SMM_CPU_OFFSET (x64._R13) , SMM_CPU_OFFSET (x64._R13) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R13 = 35 + {0, 8, 0 , SMM_CPU_OFFSET (x64._R14) , SMM_CPU_OFFSET (x64._R14) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R14 = 36 + {0, 8, 0 , SMM_CPU_OFFSET (x64._R15) , SMM_CPU_OFFSET (x64._R15) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_R15 = 37 + {4, 8, SMM_CPU_OFFSET (x86._EAX) , SMM_CPU_OFFSET (x64._RAX) , SMM_CPU_OFFSET (x64._RAX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RAX = 38 + {4, 8, SMM_CPU_OFFSET (x86._EBX) , SMM_CPU_OFFSET (x64._RBX) , SMM_CPU_OFFSET (x64._RBX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBX = 39 + {4, 8, SMM_CPU_OFFSET (x86._ECX) , SMM_CPU_OFFSET (x64._RCX) , SMM_CPU_OFFSET (x64._RCX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RCX = 40 + {4, 8, SMM_CPU_OFFSET (x86._EDX) , SMM_CPU_OFFSET (x64._RDX) , SMM_CPU_OFFSET (x64._RDX) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDX = 41 + {4, 8, SMM_CPU_OFFSET (x86._ESP) , SMM_CPU_OFFSET (x64._RSP) , SMM_CPU_OFFSET (x64._RSP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSP = 42 + {4, 8, SMM_CPU_OFFSET (x86._EBP) , SMM_CPU_OFFSET (x64._RBP) , SMM_CPU_OFFSET (x64._RBP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RBP = 43 + {4, 8, SMM_CPU_OFFSET (x86._ESI) , SMM_CPU_OFFSET (x64._RSI) , SMM_CPU_OFFSET (x64._RSI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RSI = 44 + {4, 8, SMM_CPU_OFFSET (x86._EDI) , SMM_CPU_OFFSET (x64._RDI) , SMM_CPU_OFFSET (x64._RDI) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RDI = 45 + {4, 8, SMM_CPU_OFFSET (x86._EIP) , SMM_CPU_OFFSET (x64._RIP) , SMM_CPU_OFFSET (x64._RIP) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RIP = 46 + + {4, 8, SMM_CPU_OFFSET (x86._EFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) , SMM_CPU_OFFSET (x64._RFLAGS) + 4, TRUE }, // EFI_SMM_SAVE_STATE_REGISTER_RFLAGS = 51 + {4, 8, SMM_CPU_OFFSET (x86._CR0) , SMM_CPU_OFFSET (x64._CR0) , SMM_CPU_OFFSET (x64._CR0) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR0 = 52 + {4, 8, SMM_CPU_OFFSET (x86._CR3) , SMM_CPU_OFFSET (x64._CR3) , SMM_CPU_OFFSET (x64._CR3) + 4, FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR3 = 53 + {0, 4, 0 , SMM_CPU_OFFSET (x64._CR4) , 0 , FALSE}, // EFI_SMM_SAVE_STATE_REGISTER_CR4 = 54 +}; + +/// +/// Lookup table for the IOMisc width information +/// +CONST CPU_SMM_SAVE_STATE_IO_WIDTH mSmmCpuIoWidth[] = { + { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 0 + { 1, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // SMM_IO_LENGTH_BYTE = 1 + { 2, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT16 }, // SMM_IO_LENGTH_WORD = 2 + { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 3 + { 4, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT32 }, // SMM_IO_LENGTH_DWORD = 4 + { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 5 + { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 }, // Undefined = 6 + { 0, EFI_SMM_SAVE_STATE_IO_WIDTH_UINT8 } // Undefined = 7 +}; + +/// +/// Lookup table for the IOMisc type information +/// +CONST EFI_SMM_SAVE_STATE_IO_TYPE mSmmCpuIoType[] = { + EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT, // SMM_IO_TYPE_OUT_DX = 0 + EFI_SMM_SAVE_STATE_IO_TYPE_INPUT, // SMM_IO_TYPE_IN_DX = 1 + EFI_SMM_SAVE_STATE_IO_TYPE_STRING, // SMM_IO_TYPE_OUTS = 2 + EFI_SMM_SAVE_STATE_IO_TYPE_STRING, // SMM_IO_TYPE_INS = 3 + (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 4 + (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 5 + EFI_SMM_SAVE_STATE_IO_TYPE_REP_PREFIX, // SMM_IO_TYPE_REP_OUTS = 6 + EFI_SMM_SAVE_STATE_IO_TYPE_REP_PREFIX, // SMM_IO_TYPE_REP_INS = 7 + EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT, // SMM_IO_TYPE_OUT_IMMEDIATE = 8 + EFI_SMM_SAVE_STATE_IO_TYPE_INPUT, // SMM_IO_TYPE_OUT_IMMEDIATE = 9 + (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 10 + (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 11 + (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 12 + (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 13 + (EFI_SMM_SAVE_STATE_IO_TYPE)0, // Undefined = 14 + (EFI_SMM_SAVE_STATE_IO_TYPE)0 // Undefined = 15 +}; + +/// +/// The mode of the CPU at the time an SMI occurs +/// +UINT8 mSmmSaveStateRegisterLma; + +/** + Read information from the CPU save state. + + @param Register Specifies the CPU register to read form the save state. + + @retval 0 Register is not valid + @retval >0 Index into mSmmCpuWidthOffset[] associated with Register + +**/ +UINTN +GetRegisterIndex ( + IN EFI_SMM_SAVE_STATE_REGISTER Register + ) +{ + UINTN Index; + UINTN Offset; + + for (Index = 0, Offset = SMM_SAVE_STATE_REGISTER_MAX_INDEX; mSmmCpuRegisterRanges[Index].Length != 0; Index++) { + if (Register >= mSmmCpuRegisterRanges[Index].Start && Register <= mSmmCpuRegisterRanges[Index].End) { + return Register - mSmmCpuRegisterRanges[Index].Start + Offset; + } + Offset += mSmmCpuRegisterRanges[Index].Length; + } + return 0; +} + +/** + Read a CPU Save State register on the target processor. + + This function abstracts the differences that whether the CPU Save State register is in the + IA32 CPU Save State Map or X64 CPU Save State Map. + + This function supports reading a CPU Save State register in SMBase relocation handler. + + @param[in] CpuIndex Specifies the zero-based index of the CPU save state. + @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table. + @param[in] Width The number of bytes to read from the CPU save state. + @param[out] Buffer Upon return, this holds the CPU register value read from the save state. + + @retval EFI_SUCCESS The register was read from Save State. + @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor. + @retval EFI_INVALID_PARAMTER This or Buffer is NULL. + +**/ +EFI_STATUS +ReadSaveStateRegisterByIndex ( + IN UINTN CpuIndex, + IN UINTN RegisterIndex, + IN UINTN Width, + OUT VOID *Buffer + ) +{ + SMRAM_SAVE_STATE_MAP *CpuSaveState; + + if (RegisterIndex == 0) { + return EFI_NOT_FOUND; + } + + CpuSaveState = gSmst->CpuSaveState[CpuIndex]; + + if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) { + // + // If 32-bit mode width is zero, then the specified register can not be accessed + // + if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) { + return EFI_NOT_FOUND; + } + + // + // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed + // + if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) { + return EFI_INVALID_PARAMETER; + } + + // + // Write return buffer + // + ASSERT(CpuSaveState != NULL); + CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Width); + } else { + // + // If 64-bit mode width is zero, then the specified register can not be accessed + // + if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) { + return EFI_NOT_FOUND; + } + + // + // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed + // + if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) { + return EFI_INVALID_PARAMETER; + } + + // + // Write lower 32-bits of return buffer + // + CopyMem(Buffer, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, MIN(4, Width)); + if (Width >= 4) { + // + // Write upper 32-bits of return buffer + // + CopyMem((UINT8 *)Buffer + 4, (UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, Width - 4); + } + } + return EFI_SUCCESS; +} + +/** + Read a CPU Save State register on the target processor. + + This function abstracts the differences that whether the CPU Save State register is in the + IA32 CPU Save State Map or X64 CPU Save State Map. + + This function supports reading a CPU Save State register in SMBase relocation handler. + + @param[in] CpuIndex Specifies the zero-based index of the CPU save state. + @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table. + @param[in] Width The number of bytes to read from the CPU save state. + @param[out] Buffer Upon return, this holds the CPU register value read from the save state. + + @retval EFI_SUCCESS The register was read from Save State. + @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor. + @retval EFI_INVALID_PARAMTER This or Buffer is NULL. + +**/ +EFI_STATUS +EFIAPI +ReadSaveStateRegister ( + IN UINTN CpuIndex, + IN EFI_SMM_SAVE_STATE_REGISTER Register, + IN UINTN Width, + OUT VOID *Buffer + ) +{ + UINT32 SmmRevId; + SMRAM_SAVE_STATE_IOMISC IoMisc; + EFI_SMM_SAVE_STATE_IO_INFO *IoInfo; + VOID *IoMemAddr; + + // + // Check for special EFI_SMM_SAVE_STATE_REGISTER_LMA + // + if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) { + // + // Only byte access is supported for this register + // + if (Width != 1) { + return EFI_INVALID_PARAMETER; + } + + *(UINT8 *)Buffer = mSmmSaveStateRegisterLma; + + return EFI_SUCCESS; + } + + // + // Check for special EFI_SMM_SAVE_STATE_REGISTER_IO + // + if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) { + // + // Get SMM Revision ID + // + ReadSaveStateRegisterByIndex (CpuIndex, SMM_SAVE_STATE_REGISTER_SMMREVID_INDEX, sizeof(SmmRevId), &SmmRevId); + + // + // See if the CPU supports the IOMisc register in the save state + // + if (SmmRevId < SMRAM_SAVE_STATE_MIN_REV_ID_IOMISC) { + return EFI_NOT_FOUND; + } + + // + // Get the IOMisc register value + // + ReadSaveStateRegisterByIndex (CpuIndex, SMM_SAVE_STATE_REGISTER_IOMISC_INDEX, sizeof(IoMisc.Uint32), &IoMisc.Uint32); + + // + // Check for the SMI_FLAG in IOMisc + // + if (IoMisc.Bits.SmiFlag == 0) { + return EFI_NOT_FOUND; + } + + // + // Compute index for the I/O Length and I/O Type lookup tables + // + if (mSmmCpuIoWidth[IoMisc.Bits.Length].Width == 0 || mSmmCpuIoType[IoMisc.Bits.Type] == 0) { + return EFI_NOT_FOUND; + } + + // + // Zero the IoInfo structure that will be returned in Buffer + // + IoInfo = (EFI_SMM_SAVE_STATE_IO_INFO *)Buffer; + ZeroMem (IoInfo, sizeof(EFI_SMM_SAVE_STATE_IO_INFO)); + + // + // Use lookup tables to help fill in all the fields of the IoInfo structure + // + IoInfo->IoPort = (UINT16)IoMisc.Bits.Port; + IoInfo->IoWidth = mSmmCpuIoWidth[IoMisc.Bits.Length].IoWidth; + IoInfo->IoType = mSmmCpuIoType[IoMisc.Bits.Type]; + if (IoInfo->IoType == EFI_SMM_SAVE_STATE_IO_TYPE_INPUT || IoInfo->IoType == EFI_SMM_SAVE_STATE_IO_TYPE_OUTPUT) { + ReadSaveStateRegister (CpuIndex, EFI_SMM_SAVE_STATE_REGISTER_RAX, mSmmCpuIoWidth[IoMisc.Bits.Length].Width, &IoInfo->IoData); + } + else { + ReadSaveStateRegisterByIndex(CpuIndex, SMM_SAVE_STATE_REGISTER_IOMEMADDR_INDEX, sizeof(IoMemAddr), &IoMemAddr); + CopyMem(&IoInfo->IoData, IoMemAddr, mSmmCpuIoWidth[IoMisc.Bits.Length].Width); + } + return EFI_SUCCESS; + } + + // + // Convert Register to a register lookup table index + // + return ReadSaveStateRegisterByIndex (CpuIndex, GetRegisterIndex (Register), Width, Buffer); +} + +/** + Write value to a CPU Save State register on the target processor. + + This function abstracts the differences that whether the CPU Save State register is in the + IA32 CPU Save State Map or X64 CPU Save State Map. + + This function supports writing a CPU Save State register in SMBase relocation handler. + + @param[in] CpuIndex Specifies the zero-based index of the CPU save state. + @param[in] RegisterIndex Index into mSmmCpuWidthOffset[] look up table. + @param[in] Width The number of bytes to read from the CPU save state. + @param[in] Buffer Upon entry, this holds the new CPU register value. + + @retval EFI_SUCCESS The register was written to Save State. + @retval EFI_NOT_FOUND The register is not defined for the Save State of Processor. + @retval EFI_INVALID_PARAMTER ProcessorIndex or Width is not correct. + +**/ +EFI_STATUS +EFIAPI +WriteSaveStateRegister ( + IN UINTN CpuIndex, + IN EFI_SMM_SAVE_STATE_REGISTER Register, + IN UINTN Width, + IN CONST VOID *Buffer + ) +{ + UINTN RegisterIndex; + SMRAM_SAVE_STATE_MAP *CpuSaveState; + + // + // Writes to EFI_SMM_SAVE_STATE_REGISTER_LMA are ignored + // + if (Register == EFI_SMM_SAVE_STATE_REGISTER_LMA) { + return EFI_SUCCESS; + } + + // + // Writes to EFI_SMM_SAVE_STATE_REGISTER_IO are not supported + // + if (Register == EFI_SMM_SAVE_STATE_REGISTER_IO) { + return EFI_NOT_FOUND; + } + + // + // Convert Register to a register lookup table index + // + RegisterIndex = GetRegisterIndex (Register); + if (RegisterIndex == 0) { + return EFI_NOT_FOUND; + } + + CpuSaveState = gSmst->CpuSaveState[CpuIndex]; + + // + // Do not write non-writable SaveState, because it will cause exception. + // + if (!mSmmCpuWidthOffset[RegisterIndex].Writeable) { + return EFI_UNSUPPORTED; + } + + // + // Check CPU mode + // + if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) { + // + // If 32-bit mode width is zero, then the specified register can not be accessed + // + if (mSmmCpuWidthOffset[RegisterIndex].Width32 == 0) { + return EFI_NOT_FOUND; + } + + // + // If Width is bigger than the 32-bit mode width, then the specified register can not be accessed + // + if (Width > mSmmCpuWidthOffset[RegisterIndex].Width32) { + return EFI_INVALID_PARAMETER; + } + // + // Write SMM State register + // + ASSERT (CpuSaveState != NULL); + CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset32, Buffer, Width); + } else { + // + // If 64-bit mode width is zero, then the specified register can not be accessed + // + if (mSmmCpuWidthOffset[RegisterIndex].Width64 == 0) { + return EFI_NOT_FOUND; + } + + // + // If Width is bigger than the 64-bit mode width, then the specified register can not be accessed + // + if (Width > mSmmCpuWidthOffset[RegisterIndex].Width64) { + return EFI_INVALID_PARAMETER; + } + + // + // Write lower 32-bits of SMM State register + // + CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Lo, Buffer, MIN (4, Width)); + if (Width >= 4) { + // + // Write upper 32-bits of SMM State register + // + CopyMem((UINT8 *)CpuSaveState + mSmmCpuWidthOffset[RegisterIndex].Offset64Hi, (UINT8 *)Buffer + 4, Width - 4); + } + } + return EFI_SUCCESS; +} + +/** + Hook the code executed immediately after an RSM instruction on the currently + executing CPU. The mode of code executed immediately after RSM must be + detected, and the appropriate hook must be selected. Always clear the auto + HALT restart flag if it is set. + + @param[in] CpuIndex The processor index for the currently + executing CPU. + @param[in] CpuState Pointer to SMRAM Save State Map for the + currently executing CPU. + @param[in] NewInstructionPointer32 Instruction pointer to use if resuming to + 32-bit mode from 64-bit SMM. + @param[in] NewInstructionPointer Instruction pointer to use if resuming to + same mode as SMM. + + @retval The value of the original instruction pointer before it was hooked. + +**/ +UINT64 +EFIAPI +HookReturnFromSmm ( + IN UINTN CpuIndex, + SMRAM_SAVE_STATE_MAP *CpuState, + UINT64 NewInstructionPointer32, + UINT64 NewInstructionPointer + ) +{ + UINT64 OriginalInstructionPointer; + + OriginalInstructionPointer = SmmCpuFeaturesHookReturnFromSmm ( + CpuIndex, + CpuState, + NewInstructionPointer32, + NewInstructionPointer + ); + if (OriginalInstructionPointer != 0) { + return OriginalInstructionPointer; + } + + if (mSmmSaveStateRegisterLma == EFI_SMM_SAVE_STATE_REGISTER_LMA_32BIT) { + OriginalInstructionPointer = (UINT64)CpuState->x86._EIP; + CpuState->x86._EIP = (UINT32)NewInstructionPointer; + // + // Clear the auto HALT restart flag so the RSM instruction returns + // program control to the instruction following the HLT instruction. + // + if ((CpuState->x86.AutoHALTRestart & BIT0) != 0) { + CpuState->x86.AutoHALTRestart &= ~BIT0; + } + } else { + OriginalInstructionPointer = CpuState->x64._RIP; + if ((CpuState->x64.IA32_EFER & LMA) == 0) { + CpuState->x64._RIP = (UINT32)NewInstructionPointer32; + } else { + CpuState->x64._RIP = (UINT32)NewInstructionPointer; + } + // + // Clear the auto HALT restart flag so the RSM instruction returns + // program control to the instruction following the HLT instruction. + // + if ((CpuState->x64.AutoHALTRestart & BIT0) != 0) { + CpuState->x64.AutoHALTRestart &= ~BIT0; + } + } + return OriginalInstructionPointer; +} + +/** + Get the size of the SMI Handler in bytes. + + @retval The size, in bytes, of the SMI Handler. + +**/ +UINTN +EFIAPI +GetSmiHandlerSize ( + VOID + ) +{ + UINTN Size; + + Size = SmmCpuFeaturesGetSmiHandlerSize (); + if (Size != 0) { + return Size; + } + return gcSmiHandlerSize; +} + +/** + Install the SMI handler for the CPU specified by CpuIndex. This function + is called by the CPU that was elected as monarch during System Management + Mode initialization. + + @param[in] CpuIndex The index of the CPU to install the custom SMI handler. + The value must be between 0 and the NumberOfCpus field + in the System Management System Table (SMST). + @param[in] SmBase The SMBASE address for the CPU specified by CpuIndex. + @param[in] SmiStack The stack to use when an SMI is processed by the + the CPU specified by CpuIndex. + @param[in] StackSize The size, in bytes, if the stack used when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] GdtBase The base address of the GDT to use when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] GdtSize The size, in bytes, of the GDT used when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] IdtBase The base address of the IDT to use when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] IdtSize The size, in bytes, of the IDT used when an SMI is + processed by the CPU specified by CpuIndex. + @param[in] Cr3 The base address of the page tables to use when an SMI + is processed by the CPU specified by CpuIndex. +**/ +VOID +EFIAPI +InstallSmiHandler ( + IN UINTN CpuIndex, + IN UINT32 SmBase, + IN VOID *SmiStack, + IN UINTN StackSize, + IN UINTN GdtBase, + IN UINTN GdtSize, + IN UINTN IdtBase, + IN UINTN IdtSize, + IN UINT32 Cr3 + ) +{ + if (SmmCpuFeaturesGetSmiHandlerSize () != 0) { + // + // Install SMI handler provided by library + // + SmmCpuFeaturesInstallSmiHandler ( + CpuIndex, + SmBase, + SmiStack, + StackSize, + GdtBase, + GdtSize, + IdtBase, + IdtSize, + Cr3 + ); + return; + } + + // + // Initialize values in template before copy + // + gSmiStack = (UINT32)((UINTN)SmiStack + StackSize - sizeof (UINTN)); + gSmiCr3 = Cr3; + gSmbase = SmBase; + gSmiHandlerIdtr.Base = IdtBase; + gSmiHandlerIdtr.Limit = (UINT16)(IdtSize - 1); + + // + // Set the value at the top of the CPU stack to the CPU Index + // + *(UINTN*)(UINTN)gSmiStack = CpuIndex; + + // + // Copy template to CPU specific SMI handler location + // + CopyMem ( + (VOID*)(UINTN)(SmBase + SMM_HANDLER_OFFSET), + (VOID*)gcSmiHandlerTemplate, + gcSmiHandlerSize + ); +} diff --git a/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c b/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c new file mode 100644 index 0000000000..5a632eaa24 --- /dev/null +++ b/UefiCpuPkg/PiSmmCpuDxeSmm/SyncTimer.c @@ -0,0 +1,116 @@ +/** @file +SMM Timer feature support + +Copyright (c) 2009 - 2015, Intel Corporation. All rights reserved.
+This program and the accompanying materials +are licensed and made available under the terms and conditions of the BSD License +which accompanies this distribution. The full text of the license may be found at +http://opensource.org/licenses/bsd-license.php + +THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. + +**/ + +#include "PiSmmCpuDxeSmm.h" + +UINT64 mTimeoutTicker = 0; +// +// Number of counts in a roll-over cycle of the performance counter. +// +UINT64 mCycle = 0; +// +// Flag to indicate the performance counter is count-up or count-down. +// +BOOLEAN mCountDown; + +/** + Initialize Timer for SMM AP Sync. + +**/ +VOID +InitializeSmmTimer ( + VOID + ) +{ + UINT64 TimerFrequency; + UINT64 Start; + UINT64 End; + + TimerFrequency = GetPerformanceCounterProperties (&Start, &End); + mTimeoutTicker = DivU64x32 ( + MultU64x64(TimerFrequency, PcdGet64 (PcdCpuSmmApSyncTimeout)), + 1000 * 1000 + ); + if (End < Start) { + mCountDown = TRUE; + mCycle = Start - End; + } else { + mCountDown = FALSE; + mCycle = End - Start; + } +} + +/** + Start Timer for SMM AP Sync. + +**/ +UINT64 +EFIAPI +StartSyncTimer ( + VOID + ) +{ + return GetPerformanceCounter (); +} + + +/** + Check if the SMM AP Sync timer is timeout. + + @param Timer The start timer from the begin. + +**/ +BOOLEAN +EFIAPI +IsSyncTimerTimeout ( + IN UINT64 Timer + ) +{ + UINT64 CurrentTimer; + UINT64 Delta; + + CurrentTimer = GetPerformanceCounter (); + // + // We need to consider the case that CurrentTimer is equal to Timer + // when some timer runs too slow and CPU runs fast. We think roll over + // condition does not happen on this case. + // + if (mCountDown) { + // + // The performance counter counts down. Check for roll over condition. + // + if (CurrentTimer <= Timer) { + Delta = Timer - CurrentTimer; + } else { + // + // Handle one roll-over. + // + Delta = mCycle - (CurrentTimer - Timer) + 1; + } + } else { + // + // The performance counter counts up. Check for roll over condition. + // + if (CurrentTimer >= Timer) { + Delta = CurrentTimer - Timer; + } else { + // + // Handle one roll-over. + // + Delta = mCycle - (Timer - CurrentTimer) + 1; + } + } + + return (BOOLEAN) (Delta >= mTimeoutTicker); +} diff --git a/UefiCpuPkg/UefiCpuPkg.dec b/UefiCpuPkg/UefiCpuPkg.dec index 4f7065fca6..e783a7b536 100644 --- a/UefiCpuPkg/UefiCpuPkg.dec +++ b/UefiCpuPkg/UefiCpuPkg.dec @@ -41,11 +41,11 @@ ## @libraryclass Provides platform specific initialization functions in the SEC phase. ## PlatformSecLib|Include/Library/PlatformSecLib.h - + ## @libraryclass Public include file for the SMM CPU Platform Hook Library. ## SmmCpuPlatformHookLib|Include/Library/SmmCpuPlatformHookLib.h - + ## @libraryclass Provides the CPU specific programming for PiSmmCpuDxeSmm module. ## SmmCpuFeaturesLib|Include/Library/SmmCpuFeaturesLib.h @@ -56,23 +56,82 @@ [Protocols] ## Include/Protocol/SmmCpuService.h gEfiSmmCpuServiceProtocolGuid = { 0x1d202cab, 0xc8ab, 0x4d5c, { 0x94, 0xf7, 0x3c, 0xfc, 0xc0, 0xd3, 0xd3, 0x35 }} - + # # [Error.gUefiCpuPkgTokenSpaceGuid] # 0x80000001 | Invalid value provided. # +[PcdsFeatureFlag] + ## Indicates if SMM Profile will be enabled. + # If enabled, instruction executions in and data accesses to memory outside of SMRAM will be logged. + # This PCD is only for validation purpose. It should be set to false in production.

+ # TRUE - SMM Profile will be enabled.
+ # FALSE - SMM Profile will be disabled.
+ # @Prompt Enable SMM Profile. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileEnable|FALSE|BOOLEAN|0x32132109 + + ## Indicates if the SMM profile log buffer is a ring buffer. + # If disabled, no additional log can be done when the buffer is full.

+ # TRUE - the SMM profile log buffer is a ring buffer.
+ # FALSE - the SMM profile log buffer is a normal buffer.
+ # @Prompt The SMM profile log buffer is a ring buffer. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileRingBuffer|FALSE|BOOLEAN|0x3213210a + + ## Indicates if SMM Startup AP in a blocking fashion. + # TRUE - SMM Startup AP in a blocking fashion.
+ # FALSE - SMM Startup AP in a non-blocking fashion.
+ # @Prompt SMM Startup AP in a blocking fashion. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmBlockStartupThisAp|FALSE|BOOLEAN|0x32132108 + + ## Indicates if SMM Stack Guard will be enabled. + # If enabled, stack overflow in SMM can be caught which eases debugging.

+ # TRUE - SMM Stack Guard will be enabled.
+ # FALSE - SMM Stack Guard will be disabled.
+ # @Prompt Enable SMM Stack Guard. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackGuard|FALSE|BOOLEAN|0x1000001C + + ## Indicates if BSP election in SMM will be enabled. + # If enabled, a BSP will be dynamically elected among all processors in each SMI. + # Otherwise, processor 0 is always as BSP in each SMI.

+ # TRUE - BSP election in SMM will be enabled.
+ # FALSE - BSP election in SMM will be disabled.
+ # @Prompt Enable BSP election in SMM. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmEnableBspElection|TRUE|BOOLEAN|0x32132106 + + ## Indicates if CPU SMM hot-plug will be enabled.

+ # TRUE - SMM CPU hot-plug will be enabled.
+ # FALSE - SMM CPU hot-plug will be disabled.
+ # @Prompt SMM CPU hot-plug. + gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugSupport|FALSE|BOOLEAN|0x3213210C + + ## Indicates if SMM Debug will be enabled. + # If enabled, hardware breakpoints in SMRAM can be set outside of SMM mode and take effect in SMM.

+ # TRUE - SMM Debug will be enabled.
+ # FALSE - SMM Debug will be disabled.
+ # @Prompt Enable SMM Debug. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmDebug|FALSE|BOOLEAN|0x1000001B + + ## Indicates if lock SMM Feature Control MSR.

+ # TRUE - SMM Feature Control MSR will be locked.
+ # FALSE - SMM Feature Control MSR will not be locked.
+ # @Prompt Lock SMM Feature Control MSR. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmFeatureControlMsrLock|TRUE|BOOLEAN|0x3213210B + [PcdsFixedAtBuild, PcdsPatchableInModule] - ## This value is the CPU Local Apic base address, which aligns the address on a 4-KByte boundary. - # @Prompt Configure base address of CPU Local Apic + ## This value is the CPU Local APIC base address, which aligns the address on a 4-KByte boundary. + # @Prompt Configure base address of CPU Local APIC # @Expression 0x80000001 | (gUefiCpuPkgTokenSpaceGuid.PcdCpuLocalApicBaseAddress & 0xfff) == 0 gUefiCpuPkgTokenSpaceGuid.PcdCpuLocalApicBaseAddress|0xfee00000|UINT32|0x00000001 + ## Specifies delay value in microseconds after sending out an INIT IPI. # @Prompt Configure delay value after send an INIT IPI gUefiCpuPkgTokenSpaceGuid.PcdCpuInitIpiDelayInMicroSeconds|10000|UINT32|0x30000002 + ## Specifies max supported number of Logical Processors. - # @Prompt Configure max supported number of Logical Processorss + # @Prompt Configure max supported number of Logical Processors gUefiCpuPkgTokenSpaceGuid.PcdCpuMaxLogicalProcessorNumber|64|UINT32|0x00000002 + ## This value specifies the Application Processor (AP) stack size, used for Mp Service, which must ## aligns the address on a 4-KByte boundary. # @Prompt Configure stack size for Application Processor (AP) @@ -82,6 +141,32 @@ # @Prompt Stack size in the temporary RAM. gUefiCpuPkgTokenSpaceGuid.PcdPeiTemporaryRamStackSize|0|UINT32|0x10001003 + ## Specifies buffer size in bytes to save SMM profile data. The value should be a multiple of 4KB. + # @Prompt SMM profile data buffer size. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmProfileSize|0x200000|UINT32|0x32132107 + + ## Specifies stack size in bytes for each processor in SMM. + # @Prompt Processor stack size in SMM. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmStackSize|0x2000|UINT32|0x32132105 + + ## Specifies timeout value in microseconds for the BSP in SMM to wait for all APs to come into SMM. + # @Prompt AP synchronization timeout value in SMM. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmApSyncTimeout|1000000|UINT64|0x32132104 + + ## Indicates if SMM Code Access Check is enabled. + # If enabled, the SMM handler cannot execute the code outside SMM regions. + # This PCD is suggested to TRUE in production image.

+ # TRUE - SMM Code Access Check will be enabled.
+ # FALSE - SMM Code Access Check will be disabled.
+ # @Prompt SMM Code Access Check. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmCodeAccessCheckEnable|TRUE|BOOLEAN|0x60000013 + + ## Indicates the CPU synchronization method used when processing an SMI. + # 0x00 - Traditional CPU synchronization method.
+ # 0x01 - Relaxed CPU synchronization method.
+ # @Prompt SMM CPU Synchronization Method. + gUefiCpuPkgTokenSpaceGuid.PcdCpuSmmSyncMode|0x00|UINT8|0x60000014 + [PcdsFixedAtBuild, PcdsPatchableInModule, PcdsDynamic, PcdsDynamicEx] ## Specifies timeout value in microseconds for the BSP to detect all APs for the first time. # @Prompt Timeout for the BSP to detect all APs for the first time. @@ -93,5 +178,16 @@ # @Prompt Microcode Region size. gUefiCpuPkgTokenSpaceGuid.PcdCpuMicrocodePatchRegionSize|0x0|UINT64|0x00000006 +[PcdsDynamic, PcdsDynamicEx] + ## Contains the pointer to a CPU S3 data buffer of structure ACPI_CPU_DATA. + # @Prompt The pointer to a CPU S3 data buffer. + # @ValidList 0x80000001 | 0 + gUefiCpuPkgTokenSpaceGuid.PcdCpuS3DataAddress|0x0|UINT64|0x60000010 + + ## Contains the pointer to a CPU Hot Plug Data structure if CPU hot-plug is supported. + # @Prompt The pointer to CPU Hot Plug Data. + # @ValidList 0x80000001 | 0 + gUefiCpuPkgTokenSpaceGuid.PcdCpuHotPlugDataAddress|0x0|UINT64|0x60000011 + [UserExtensions.TianoCore."ExtraFiles"] UefiCpuPkgExtra.uni diff --git a/UefiCpuPkg/UefiCpuPkg.uni b/UefiCpuPkg/UefiCpuPkg.uni index 4fe2faf981..f7ab84c5a2 100644 Binary files a/UefiCpuPkg/UefiCpuPkg.uni and b/UefiCpuPkg/UefiCpuPkg.uni differ