ArmPkg: Apply uncrustify changes

REF: https://bugzilla.tianocore.org/show_bug.cgi?id=3737

Apply uncrustify changes to .c/.h files in the ArmPkg package

Cc: Andrew Fish <afish@apple.com>
Cc: Leif Lindholm <leif@nuviainc.com>
Cc: Michael D Kinney <michael.d.kinney@intel.com>
Signed-off-by: Michael Kubacki <michael.kubacki@microsoft.com>
Reviewed-by: Andrew Fish <afish@apple.com>
This commit is contained in:
Michael Kubacki
2021-12-05 14:53:50 -08:00
committed by mergify[bot]
parent 7c2a6033c1
commit 429309e0c6
142 changed files with 6020 additions and 5216 deletions

View File

@@ -26,31 +26,32 @@ ArmMemoryAttributeToPageAttribute (
)
{
switch (Attributes) {
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
return TT_ATTR_INDX_MEMORY_WRITE_BACK;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
return TT_ATTR_INDX_MEMORY_WRITE_BACK;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
return TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
return TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
// Uncached and device mappings are treated as outer shareable by default,
case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
// Uncached and device mappings are treated as outer shareable by default,
case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
default:
ASSERT (0);
case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
if (ArmReadCurrentEL () == AARCH64_EL2)
return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
else
return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
default:
ASSERT (0);
case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
if (ArmReadCurrentEL () == AARCH64_EL2) {
return TT_ATTR_INDX_DEVICE_MEMORY | TT_XN_MASK;
} else {
return TT_ATTR_INDX_DEVICE_MEMORY | TT_UXN_MASK | TT_PXN_MASK;
}
}
}
@@ -61,7 +62,7 @@ ArmMemoryAttributeToPageAttribute (
STATIC
UINTN
GetRootTableEntryCount (
IN UINTN T0SZ
IN UINTN T0SZ
)
{
return TT_ENTRY_COUNT >> (T0SZ - MIN_T0SZ) % BITS_PER_LEVEL;
@@ -70,7 +71,7 @@ GetRootTableEntryCount (
STATIC
UINTN
GetRootTableLevel (
IN UINTN T0SZ
IN UINTN T0SZ
)
{
return (T0SZ - MIN_T0SZ) / BITS_PER_LEVEL;
@@ -79,10 +80,10 @@ GetRootTableLevel (
STATIC
VOID
ReplaceTableEntry (
IN UINT64 *Entry,
IN UINT64 Value,
IN UINT64 RegionStart,
IN BOOLEAN IsLiveBlockMapping
IN UINT64 *Entry,
IN UINT64 Value,
IN UINT64 RegionStart,
IN BOOLEAN IsLiveBlockMapping
)
{
if (!ArmMmuEnabled () || !IsLiveBlockMapping) {
@@ -100,19 +101,22 @@ FreePageTablesRecursive (
IN UINTN Level
)
{
UINTN Index;
UINTN Index;
ASSERT (Level <= 3);
if (Level < 3) {
for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
if ((TranslationTable[Index] & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY) {
FreePageTablesRecursive ((VOID *)(UINTN)(TranslationTable[Index] &
TT_ADDRESS_MASK_BLOCK_ENTRY),
Level + 1);
FreePageTablesRecursive (
(VOID *)(UINTN)(TranslationTable[Index] &
TT_ADDRESS_MASK_BLOCK_ENTRY),
Level + 1
);
}
}
}
FreePages (TranslationTable, 1);
}
@@ -126,6 +130,7 @@ IsBlockEntry (
if (Level == 3) {
return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY_LEVEL3;
}
return (Entry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY;
}
@@ -143,39 +148,48 @@ IsTableEntry (
//
return FALSE;
}
return (Entry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY;
}
STATIC
EFI_STATUS
UpdateRegionMappingRecursive (
IN UINT64 RegionStart,
IN UINT64 RegionEnd,
IN UINT64 AttributeSetMask,
IN UINT64 AttributeClearMask,
IN UINT64 *PageTable,
IN UINTN Level
IN UINT64 RegionStart,
IN UINT64 RegionEnd,
IN UINT64 AttributeSetMask,
IN UINT64 AttributeClearMask,
IN UINT64 *PageTable,
IN UINTN Level
)
{
UINTN BlockShift;
UINT64 BlockMask;
UINT64 BlockEnd;
UINT64 *Entry;
UINT64 EntryValue;
VOID *TranslationTable;
EFI_STATUS Status;
UINTN BlockShift;
UINT64 BlockMask;
UINT64 BlockEnd;
UINT64 *Entry;
UINT64 EntryValue;
VOID *TranslationTable;
EFI_STATUS Status;
ASSERT (((RegionStart | RegionEnd) & EFI_PAGE_MASK) == 0);
BlockShift = (Level + 1) * BITS_PER_LEVEL + MIN_T0SZ;
BlockMask = MAX_UINT64 >> BlockShift;
BlockMask = MAX_UINT64 >> BlockShift;
DEBUG ((DEBUG_VERBOSE, "%a(%d): %llx - %llx set %lx clr %lx\n", __FUNCTION__,
Level, RegionStart, RegionEnd, AttributeSetMask, AttributeClearMask));
DEBUG ((
DEBUG_VERBOSE,
"%a(%d): %llx - %llx set %lx clr %lx\n",
__FUNCTION__,
Level,
RegionStart,
RegionEnd,
AttributeSetMask,
AttributeClearMask
));
for (; RegionStart < RegionEnd; RegionStart = BlockEnd) {
for ( ; RegionStart < RegionEnd; RegionStart = BlockEnd) {
BlockEnd = MIN (RegionEnd, (RegionStart | BlockMask) + 1);
Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
Entry = &PageTable[(RegionStart >> (64 - BlockShift)) & (TT_ENTRY_COUNT - 1)];
//
// If RegionStart or BlockEnd is not aligned to the block size at this
@@ -187,8 +201,9 @@ UpdateRegionMappingRecursive (
// we cannot replace it with a block entry without potentially losing
// attribute information, so keep the table entry in that case.
//
if (Level == 0 || ((RegionStart | BlockEnd) & BlockMask) != 0 ||
(IsTableEntry (*Entry, Level) && AttributeClearMask != 0)) {
if ((Level == 0) || (((RegionStart | BlockEnd) & BlockMask) != 0) ||
(IsTableEntry (*Entry, Level) && (AttributeClearMask != 0)))
{
ASSERT (Level < 3);
if (!IsTableEntry (*Entry, Level)) {
@@ -216,9 +231,14 @@ UpdateRegionMappingRecursive (
// We are splitting an existing block entry, so we have to populate
// the new table with the attributes of the block entry it replaces.
//
Status = UpdateRegionMappingRecursive (RegionStart & ~BlockMask,
(RegionStart | BlockMask) + 1, *Entry & TT_ATTRIBUTES_MASK,
0, TranslationTable, Level + 1);
Status = UpdateRegionMappingRecursive (
RegionStart & ~BlockMask,
(RegionStart | BlockMask) + 1,
*Entry & TT_ATTRIBUTES_MASK,
0,
TranslationTable,
Level + 1
);
if (EFI_ERROR (Status)) {
//
// The range we passed to UpdateRegionMappingRecursive () is block
@@ -236,9 +256,14 @@ UpdateRegionMappingRecursive (
//
// Recurse to the next level
//
Status = UpdateRegionMappingRecursive (RegionStart, BlockEnd,
AttributeSetMask, AttributeClearMask, TranslationTable,
Level + 1);
Status = UpdateRegionMappingRecursive (
RegionStart,
BlockEnd,
AttributeSetMask,
AttributeClearMask,
TranslationTable,
Level + 1
);
if (EFI_ERROR (Status)) {
if (!IsTableEntry (*Entry, Level)) {
//
@@ -250,16 +275,21 @@ UpdateRegionMappingRecursive (
//
FreePageTablesRecursive (TranslationTable, Level + 1);
}
return Status;
}
if (!IsTableEntry (*Entry, Level)) {
EntryValue = (UINTN)TranslationTable | TT_TYPE_TABLE_ENTRY;
ReplaceTableEntry (Entry, EntryValue, RegionStart,
IsBlockEntry (*Entry, Level));
ReplaceTableEntry (
Entry,
EntryValue,
RegionStart,
IsBlockEntry (*Entry, Level)
);
}
} else {
EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
EntryValue = (*Entry & AttributeClearMask) | AttributeSetMask;
EntryValue |= RegionStart;
EntryValue |= (Level == 3) ? TT_TYPE_BLOCK_ENTRY_LEVEL3
: TT_TYPE_BLOCK_ENTRY;
@@ -280,6 +310,7 @@ UpdateRegionMappingRecursive (
}
}
}
return EFI_SUCCESS;
}
@@ -292,7 +323,7 @@ UpdateRegionMapping (
IN UINT64 AttributeClearMask
)
{
UINTN T0SZ;
UINTN T0SZ;
if (((RegionStart | RegionLength) & EFI_PAGE_MASK) != 0) {
return EFI_INVALID_PARAMETER;
@@ -300,9 +331,14 @@ UpdateRegionMapping (
T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
return UpdateRegionMappingRecursive (RegionStart, RegionStart + RegionLength,
AttributeSetMask, AttributeClearMask, ArmGetTTBR0BaseAddress (),
GetRootTableLevel (T0SZ));
return UpdateRegionMappingRecursive (
RegionStart,
RegionStart + RegionLength,
AttributeSetMask,
AttributeClearMask,
ArmGetTTBR0BaseAddress (),
GetRootTableLevel (T0SZ)
);
}
STATIC
@@ -323,31 +359,32 @@ FillTranslationTable (
STATIC
UINT64
GcdAttributeToPageAttribute (
IN UINT64 GcdAttributes
IN UINT64 GcdAttributes
)
{
UINT64 PageAttributes;
UINT64 PageAttributes;
switch (GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) {
case EFI_MEMORY_UC:
PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
break;
case EFI_MEMORY_WC:
PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
break;
case EFI_MEMORY_WT:
PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
break;
case EFI_MEMORY_WB:
PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
break;
default:
PageAttributes = TT_ATTR_INDX_MASK;
break;
case EFI_MEMORY_UC:
PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
break;
case EFI_MEMORY_WC:
PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
break;
case EFI_MEMORY_WT:
PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH | TT_SH_INNER_SHAREABLE;
break;
case EFI_MEMORY_WB:
PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK | TT_SH_INNER_SHAREABLE;
break;
default:
PageAttributes = TT_ATTR_INDX_MASK;
break;
}
if ((GcdAttributes & EFI_MEMORY_XP) != 0 ||
(GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC) {
if (((GcdAttributes & EFI_MEMORY_XP) != 0) ||
((GcdAttributes & EFI_MEMORY_CACHETYPE_MASK) == EFI_MEMORY_UC))
{
if (ArmReadCurrentEL () == AARCH64_EL2) {
PageAttributes |= TT_XN_MASK;
} else {
@@ -364,15 +401,15 @@ GcdAttributeToPageAttribute (
EFI_STATUS
ArmSetMemoryAttributes (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes
)
{
UINT64 PageAttributes;
UINT64 PageAttributeMask;
UINT64 PageAttributes;
UINT64 PageAttributeMask;
PageAttributes = GcdAttributeToPageAttribute (Attributes);
PageAttributes = GcdAttributeToPageAttribute (Attributes);
PageAttributeMask = 0;
if ((Attributes & EFI_MEMORY_CACHETYPE_MASK) == 0) {
@@ -380,22 +417,26 @@ ArmSetMemoryAttributes (
// No memory type was set in Attributes, so we are going to update the
// permissions only.
//
PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
PageAttributes &= TT_AP_MASK | TT_UXN_MASK | TT_PXN_MASK;
PageAttributeMask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK |
TT_PXN_MASK | TT_XN_MASK);
}
return UpdateRegionMapping (BaseAddress, Length, PageAttributes,
PageAttributeMask);
return UpdateRegionMapping (
BaseAddress,
Length,
PageAttributes,
PageAttributeMask
);
}
STATIC
EFI_STATUS
SetMemoryRegionAttribute (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
IN UINT64 BlockEntryMask
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
IN UINT64 BlockEntryMask
)
{
return UpdateRegionMapping (BaseAddress, Length, Attributes, BlockEntryMask);
@@ -403,11 +444,11 @@ SetMemoryRegionAttribute (
EFI_STATUS
ArmSetMemoryRegionNoExec (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
)
{
UINT64 Val;
UINT64 Val;
if (ArmReadCurrentEL () == AARCH64_EL1) {
Val = TT_PXN_MASK | TT_UXN_MASK;
@@ -419,16 +460,17 @@ ArmSetMemoryRegionNoExec (
BaseAddress,
Length,
Val,
~TT_ADDRESS_MASK_BLOCK_ENTRY);
~TT_ADDRESS_MASK_BLOCK_ENTRY
);
}
EFI_STATUS
ArmClearMemoryRegionNoExec (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
)
{
UINT64 Mask;
UINT64 Mask;
// XN maps to UXN in the EL1&0 translation regime
Mask = ~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_PXN_MASK | TT_XN_MASK);
@@ -437,50 +479,53 @@ ArmClearMemoryRegionNoExec (
BaseAddress,
Length,
0,
Mask);
Mask
);
}
EFI_STATUS
ArmSetMemoryRegionReadOnly (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
)
{
return SetMemoryRegionAttribute (
BaseAddress,
Length,
TT_AP_RO_RO,
~TT_ADDRESS_MASK_BLOCK_ENTRY);
~TT_ADDRESS_MASK_BLOCK_ENTRY
);
}
EFI_STATUS
ArmClearMemoryRegionReadOnly (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
)
{
return SetMemoryRegionAttribute (
BaseAddress,
Length,
TT_AP_RW_RW,
~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK));
~(TT_ADDRESS_MASK_BLOCK_ENTRY | TT_AP_MASK)
);
}
EFI_STATUS
EFIAPI
ArmConfigureMmu (
IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
OUT VOID **TranslationTableBase OPTIONAL,
OUT VOID **TranslationTableBase OPTIONAL,
OUT UINTN *TranslationTableSize OPTIONAL
)
{
VOID* TranslationTable;
UINTN MaxAddressBits;
UINT64 MaxAddress;
UINTN T0SZ;
UINTN RootTableEntryCount;
UINT64 TCR;
EFI_STATUS Status;
VOID *TranslationTable;
UINTN MaxAddressBits;
UINT64 MaxAddress;
UINTN T0SZ;
UINTN RootTableEntryCount;
UINT64 TCR;
EFI_STATUS Status;
if (MemoryTable == NULL) {
ASSERT (MemoryTable != NULL);
@@ -495,9 +540,9 @@ ArmConfigureMmu (
// use of 4 KB pages.
//
MaxAddressBits = MIN (ArmGetPhysicalAddressBits (), MAX_VA_BITS);
MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;
MaxAddress = LShiftU64 (1ULL, MaxAddressBits) - 1;
T0SZ = 64 - MaxAddressBits;
T0SZ = 64 - MaxAddressBits;
RootTableEntryCount = GetRootTableEntryCount (T0SZ);
//
@@ -506,7 +551,7 @@ ArmConfigureMmu (
// Ideally we will be running at EL2, but should support EL1 as well.
// UEFI should not run at EL3.
if (ArmReadCurrentEL () == AARCH64_EL2) {
//Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
// Note: Bits 23 and 31 are reserved(RES1) bits in TCR_EL2
TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
// Set the Physical Address Size using MaxAddress
@@ -523,9 +568,11 @@ ArmConfigureMmu (
} else if (MaxAddress < SIZE_256TB) {
TCR |= TCR_PS_256TB;
} else {
DEBUG ((DEBUG_ERROR,
DEBUG ((
DEBUG_ERROR,
"ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
MaxAddress));
MaxAddress
));
ASSERT (0); // Bigger than 48-bit memory space are not supported
return EFI_UNSUPPORTED;
}
@@ -547,9 +594,11 @@ ArmConfigureMmu (
} else if (MaxAddress < SIZE_256TB) {
TCR |= TCR_IPS_256TB;
} else {
DEBUG ((DEBUG_ERROR,
DEBUG ((
DEBUG_ERROR,
"ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU configuration.\n",
MaxAddress));
MaxAddress
));
ASSERT (0); // Bigger than 48-bit memory space are not supported
return EFI_UNSUPPORTED;
}
@@ -579,6 +628,7 @@ ArmConfigureMmu (
if (TranslationTable == NULL) {
return EFI_OUT_OF_RESOURCES;
}
//
// We set TTBR0 just after allocating the table to retrieve its location from
// the subsequent functions without needing to pass this value across the
@@ -599,8 +649,10 @@ ArmConfigureMmu (
// Make sure we are not inadvertently hitting in the caches
// when populating the page tables.
//
InvalidateDataCacheRange (TranslationTable,
RootTableEntryCount * sizeof (UINT64));
InvalidateDataCacheRange (
TranslationTable,
RootTableEntryCount * sizeof (UINT64)
);
ZeroMem (TranslationTable, RootTableEntryCount * sizeof (UINT64));
while (MemoryTable->Length != 0) {
@@ -608,6 +660,7 @@ ArmConfigureMmu (
if (EFI_ERROR (Status)) {
goto FreeTranslationTable;
}
MemoryTable++;
}
@@ -618,10 +671,10 @@ ArmConfigureMmu (
// EFI_MEMORY_WB ==> MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK
//
ArmSetMAIR (
MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
MAIR_ATTR (TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) |
MAIR_ATTR (TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) |
MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) |
MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
MAIR_ATTR (TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)
);
ArmDisableAlignmentCheck ();
@@ -643,14 +696,16 @@ ArmMmuBaseLibConstructor (
VOID
)
{
extern UINT32 ArmReplaceLiveTranslationEntrySize;
extern UINT32 ArmReplaceLiveTranslationEntrySize;
//
// The ArmReplaceLiveTranslationEntry () helper function may be invoked
// with the MMU off so we have to ensure that it gets cleaned to the PoC
//
WriteBackDataCacheRange ((VOID *)(UINTN)ArmReplaceLiveTranslationEntry,
ArmReplaceLiveTranslationEntrySize);
WriteBackDataCacheRange (
(VOID *)(UINTN)ArmReplaceLiveTranslationEntry,
ArmReplaceLiveTranslationEntrySize
);
return RETURN_SUCCESS;
}

View File

@@ -16,14 +16,14 @@
EFI_STATUS
EFIAPI
ArmMmuPeiLibConstructor (
IN EFI_PEI_FILE_HANDLE FileHandle,
IN CONST EFI_PEI_SERVICES **PeiServices
IN EFI_PEI_FILE_HANDLE FileHandle,
IN CONST EFI_PEI_SERVICES **PeiServices
)
{
extern UINT32 ArmReplaceLiveTranslationEntrySize;
extern UINT32 ArmReplaceLiveTranslationEntrySize;
EFI_FV_FILE_INFO FileInfo;
EFI_STATUS Status;
EFI_FV_FILE_INFO FileInfo;
EFI_STATUS Status;
ASSERT (FileHandle != NULL);
@@ -37,9 +37,10 @@ ArmMmuPeiLibConstructor (
// is executing from DRAM, we only need to perform the cache maintenance
// when not executing in place.
//
if ((UINTN)FileInfo.Buffer <= (UINTN)ArmReplaceLiveTranslationEntry &&
if (((UINTN)FileInfo.Buffer <= (UINTN)ArmReplaceLiveTranslationEntry) &&
((UINTN)FileInfo.Buffer + FileInfo.BufferSize >=
(UINTN)ArmReplaceLiveTranslationEntry + ArmReplaceLiveTranslationEntrySize)) {
(UINTN)ArmReplaceLiveTranslationEntry + ArmReplaceLiveTranslationEntrySize))
{
DEBUG ((DEBUG_INFO, "ArmMmuLib: skipping cache maintenance on XIP PEIM\n"));
} else {
DEBUG ((DEBUG_INFO, "ArmMmuLib: performing cache maintenance on shadowed PEIM\n"));
@@ -47,8 +48,10 @@ ArmMmuPeiLibConstructor (
// The ArmReplaceLiveTranslationEntry () helper function may be invoked
// with the MMU off so we have to ensure that it gets cleaned to the PoC
//
WriteBackDataCacheRange ((VOID *)(UINTN)ArmReplaceLiveTranslationEntry,
ArmReplaceLiveTranslationEntrySize);
WriteBackDataCacheRange (
(VOID *)(UINTN)ArmReplaceLiveTranslationEntry,
ArmReplaceLiveTranslationEntrySize
);
}
return RETURN_SUCCESS;

View File

@@ -19,9 +19,9 @@ ConvertSectionAttributesToPageAttributes (
IN BOOLEAN IsLargePage
)
{
UINT32 PageAttributes;
UINT32 PageAttributes;
PageAttributes = 0;
PageAttributes = 0;
PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_CACHE_POLICY (SectionAttributes, IsLargePage);
PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_AP (SectionAttributes);
PageAttributes |= TT_DESCRIPTOR_CONVERT_TO_PAGE_XN (SectionAttributes, IsLargePage);

View File

@@ -17,19 +17,19 @@
#include <Library/DebugLib.h>
#include <Library/PcdLib.h>
#define ID_MMFR0_SHARELVL_SHIFT 12
#define ID_MMFR0_SHARELVL_MASK 0xf
#define ID_MMFR0_SHARELVL_ONE 0
#define ID_MMFR0_SHARELVL_TWO 1
#define ID_MMFR0_SHARELVL_SHIFT 12
#define ID_MMFR0_SHARELVL_MASK 0xf
#define ID_MMFR0_SHARELVL_ONE 0
#define ID_MMFR0_SHARELVL_TWO 1
#define ID_MMFR0_INNERSHR_SHIFT 28
#define ID_MMFR0_INNERSHR_MASK 0xf
#define ID_MMFR0_OUTERSHR_SHIFT 8
#define ID_MMFR0_OUTERSHR_MASK 0xf
#define ID_MMFR0_INNERSHR_SHIFT 28
#define ID_MMFR0_INNERSHR_MASK 0xf
#define ID_MMFR0_OUTERSHR_SHIFT 8
#define ID_MMFR0_OUTERSHR_MASK 0xf
#define ID_MMFR0_SHR_IMP_UNCACHED 0
#define ID_MMFR0_SHR_IMP_HW_COHERENT 1
#define ID_MMFR0_SHR_IGNORED 0xf
#define ID_MMFR0_SHR_IMP_UNCACHED 0
#define ID_MMFR0_SHR_IMP_HW_COHERENT 1
#define ID_MMFR0_SHR_IGNORED 0xf
UINTN
EFIAPI
@@ -49,8 +49,8 @@ PreferNonshareableMemory (
VOID
)
{
UINTN Mmfr;
UINTN Val;
UINTN Mmfr;
UINTN Val;
if (FeaturePcdGet (PcdNormalMemoryNonshareableOverride)) {
return TRUE;
@@ -63,32 +63,33 @@ PreferNonshareableMemory (
//
Mmfr = ArmReadIdMmfr0 ();
switch ((Mmfr >> ID_MMFR0_SHARELVL_SHIFT) & ID_MMFR0_SHARELVL_MASK) {
case ID_MMFR0_SHARELVL_ONE:
// one level of shareability
Val = (Mmfr >> ID_MMFR0_OUTERSHR_SHIFT) & ID_MMFR0_OUTERSHR_MASK;
break;
case ID_MMFR0_SHARELVL_TWO:
// two levels of shareability
Val = (Mmfr >> ID_MMFR0_INNERSHR_SHIFT) & ID_MMFR0_INNERSHR_MASK;
break;
default:
// unexpected value -> shareable is the safe option
ASSERT (FALSE);
return FALSE;
case ID_MMFR0_SHARELVL_ONE:
// one level of shareability
Val = (Mmfr >> ID_MMFR0_OUTERSHR_SHIFT) & ID_MMFR0_OUTERSHR_MASK;
break;
case ID_MMFR0_SHARELVL_TWO:
// two levels of shareability
Val = (Mmfr >> ID_MMFR0_INNERSHR_SHIFT) & ID_MMFR0_INNERSHR_MASK;
break;
default:
// unexpected value -> shareable is the safe option
ASSERT (FALSE);
return FALSE;
}
return Val != ID_MMFR0_SHR_IMP_HW_COHERENT;
}
STATIC
VOID
PopulateLevel2PageTable (
IN UINT32 *SectionEntry,
IN UINT32 PhysicalBase,
IN UINT32 RemainLength,
IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
IN UINT32 *SectionEntry,
IN UINT32 PhysicalBase,
IN UINT32 RemainLength,
IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
)
{
UINT32* PageEntry;
UINT32 *PageEntry;
UINT32 Pages;
UINT32 Index;
UINT32 PageAttributes;
@@ -104,7 +105,7 @@ PopulateLevel2PageTable (
break;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK;
PageAttributes = TT_DESCRIPTOR_PAGE_WRITE_BACK;
PageAttributes &= ~TT_DESCRIPTOR_PAGE_S_SHARED;
break;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
@@ -132,7 +133,7 @@ PopulateLevel2PageTable (
// Level 2 Translation Table to it
if (*SectionEntry != 0) {
// The entry must be a page table. Otherwise it exists an overlapping in the memory map
if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(*SectionEntry)) {
if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE (*SectionEntry)) {
TranslationTable = *SectionEntry & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK;
} else if ((*SectionEntry & TT_DESCRIPTOR_SECTION_TYPE_MASK) == TT_DESCRIPTOR_SECTION_TYPE_SECTION) {
// Case where a virtual memory map descriptor overlapped a section entry
@@ -140,60 +141,66 @@ PopulateLevel2PageTable (
// Allocate a Level2 Page Table for this Section
TranslationTable = (UINTN)AllocateAlignedPages (
EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_PAGE_SIZE),
TRANSLATION_TABLE_PAGE_ALIGNMENT);
TRANSLATION_TABLE_PAGE_ALIGNMENT
);
// Translate the Section Descriptor into Page Descriptor
SectionDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (*SectionEntry, FALSE);
BaseSectionAddress = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(*SectionEntry);
BaseSectionAddress = TT_DESCRIPTOR_SECTION_BASE_ADDRESS (*SectionEntry);
//
// Make sure we are not inadvertently hitting in the caches
// when populating the page tables
//
InvalidateDataCacheRange ((VOID *)TranslationTable,
TRANSLATION_TABLE_PAGE_SIZE);
InvalidateDataCacheRange (
(VOID *)TranslationTable,
TRANSLATION_TABLE_PAGE_SIZE
);
// Populate the new Level2 Page Table for the section
PageEntry = (UINT32*)TranslationTable;
PageEntry = (UINT32 *)TranslationTable;
for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) {
PageEntry[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(BaseSectionAddress + (Index << 12)) | SectionDescriptor;
PageEntry[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS (BaseSectionAddress + (Index << 12)) | SectionDescriptor;
}
// Overwrite the section entry to point to the new Level2 Translation Table
*SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
(IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) |
TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
(IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE (Attributes) ? (1 << 3) : 0) |
TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
} else {
// We do not support the other section type (16MB Section)
ASSERT(0);
ASSERT (0);
return;
}
} else {
TranslationTable = (UINTN)AllocateAlignedPages (
EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_PAGE_SIZE),
TRANSLATION_TABLE_PAGE_ALIGNMENT);
TRANSLATION_TABLE_PAGE_ALIGNMENT
);
//
// Make sure we are not inadvertently hitting in the caches
// when populating the page tables
//
InvalidateDataCacheRange ((VOID *)TranslationTable,
TRANSLATION_TABLE_PAGE_SIZE);
InvalidateDataCacheRange (
(VOID *)TranslationTable,
TRANSLATION_TABLE_PAGE_SIZE
);
ZeroMem ((VOID *)TranslationTable, TRANSLATION_TABLE_PAGE_SIZE);
*SectionEntry = (TranslationTable & TT_DESCRIPTOR_SECTION_PAGETABLE_ADDRESS_MASK) |
(IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE(Attributes) ? (1 << 3) : 0) |
TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
(IS_ARM_MEMORY_REGION_ATTRIBUTES_SECURE (Attributes) ? (1 << 3) : 0) |
TT_DESCRIPTOR_SECTION_TYPE_PAGE_TABLE;
}
FirstPageOffset = (PhysicalBase & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT;
PageEntry = (UINT32 *)TranslationTable + FirstPageOffset;
Pages = RemainLength / TT_DESCRIPTOR_PAGE_SIZE;
PageEntry = (UINT32 *)TranslationTable + FirstPageOffset;
Pages = RemainLength / TT_DESCRIPTOR_PAGE_SIZE;
ASSERT (FirstPageOffset + Pages <= TRANSLATION_TABLE_PAGE_COUNT);
for (Index = 0; Index < Pages; Index++) {
*PageEntry++ = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(PhysicalBase) | PageAttributes;
*PageEntry++ = TT_DESCRIPTOR_PAGE_BASE_ADDRESS (PhysicalBase) | PageAttributes;
PhysicalBase += TT_DESCRIPTOR_PAGE_SIZE;
}
@@ -202,8 +209,10 @@ PopulateLevel2PageTable (
// [speculatively] since the previous invalidate are evicted again.
//
ArmDataMemoryBarrier ();
InvalidateDataCacheRange ((UINT32 *)TranslationTable + FirstPageOffset,
RemainLength / TT_DESCRIPTOR_PAGE_SIZE * sizeof (*PageEntry));
InvalidateDataCacheRange (
(UINT32 *)TranslationTable + FirstPageOffset,
RemainLength / TT_DESCRIPTOR_PAGE_SIZE * sizeof (*PageEntry)
);
}
STATIC
@@ -219,50 +228,50 @@ FillTranslationTable (
UINT64 RemainLength;
UINT32 PageMapLength;
ASSERT(MemoryRegion->Length > 0);
ASSERT (MemoryRegion->Length > 0);
if (MemoryRegion->PhysicalBase >= SIZE_4GB) {
return;
}
PhysicalBase = (UINT32)MemoryRegion->PhysicalBase;
RemainLength = MIN(MemoryRegion->Length, SIZE_4GB - PhysicalBase);
RemainLength = MIN (MemoryRegion->Length, SIZE_4GB - PhysicalBase);
switch (MemoryRegion->Attributes) {
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(0);
Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK (0);
break;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK_NONSHAREABLE:
Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(0);
Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK (0);
Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;
break;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(0);
Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH (0);
break;
case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
Attributes = TT_DESCRIPTOR_SECTION_DEVICE(0);
Attributes = TT_DESCRIPTOR_SECTION_DEVICE (0);
break;
case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0);
Attributes = TT_DESCRIPTOR_SECTION_UNCACHED (0);
break;
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(1);
Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK (1);
break;
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK_NONSHAREABLE:
Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK(1);
Attributes = TT_DESCRIPTOR_SECTION_WRITE_BACK (1);
Attributes &= ~TT_DESCRIPTOR_SECTION_S_SHARED;
break;
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH(1);
Attributes = TT_DESCRIPTOR_SECTION_WRITE_THROUGH (1);
break;
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
Attributes = TT_DESCRIPTOR_SECTION_DEVICE(1);
Attributes = TT_DESCRIPTOR_SECTION_DEVICE (1);
break;
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(1);
Attributes = TT_DESCRIPTOR_SECTION_UNCACHED (1);
break;
default:
Attributes = TT_DESCRIPTOR_SECTION_UNCACHED(0);
Attributes = TT_DESCRIPTOR_SECTION_UNCACHED (0);
break;
}
@@ -271,14 +280,15 @@ FillTranslationTable (
}
// Get the first section entry for this mapping
SectionEntry = TRANSLATION_TABLE_ENTRY_FOR_VIRTUAL_ADDRESS(TranslationTable, MemoryRegion->VirtualBase);
SectionEntry = TRANSLATION_TABLE_ENTRY_FOR_VIRTUAL_ADDRESS (TranslationTable, MemoryRegion->VirtualBase);
while (RemainLength != 0) {
if (PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE == 0 &&
RemainLength >= TT_DESCRIPTOR_SECTION_SIZE) {
if ((PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE == 0) &&
(RemainLength >= TT_DESCRIPTOR_SECTION_SIZE))
{
// Case: Physical address aligned on the Section Size (1MB) && the length
// is greater than the Section Size
*SectionEntry = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(PhysicalBase) | Attributes;
*SectionEntry = TT_DESCRIPTOR_SECTION_BASE_ADDRESS (PhysicalBase) | Attributes;
//
// Issue a DMB to ensure that the page table entry update made it to
@@ -291,14 +301,21 @@ FillTranslationTable (
PhysicalBase += TT_DESCRIPTOR_SECTION_SIZE;
RemainLength -= TT_DESCRIPTOR_SECTION_SIZE;
} else {
PageMapLength = MIN ((UINT32)RemainLength, TT_DESCRIPTOR_SECTION_SIZE -
(PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE));
PageMapLength = MIN (
(UINT32)RemainLength,
TT_DESCRIPTOR_SECTION_SIZE -
(PhysicalBase % TT_DESCRIPTOR_SECTION_SIZE)
);
// Case: Physical address aligned on the Section Size (1MB) && the length
// does not fill a section
// Case: Physical address NOT aligned on the Section Size (1MB)
PopulateLevel2PageTable (SectionEntry, PhysicalBase, PageMapLength,
MemoryRegion->Attributes);
PopulateLevel2PageTable (
SectionEntry,
PhysicalBase,
PageMapLength,
MemoryRegion->Attributes
);
//
// Issue a DMB to ensure that the page table entry update made it to
@@ -323,16 +340,17 @@ RETURN_STATUS
EFIAPI
ArmConfigureMmu (
IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
OUT VOID **TranslationTableBase OPTIONAL,
OUT VOID **TranslationTableBase OPTIONAL,
OUT UINTN *TranslationTableSize OPTIONAL
)
{
VOID *TranslationTable;
UINT32 TTBRAttributes;
VOID *TranslationTable;
UINT32 TTBRAttributes;
TranslationTable = AllocateAlignedPages (
EFI_SIZE_TO_PAGES (TRANSLATION_TABLE_SECTION_SIZE),
TRANSLATION_TABLE_SECTION_ALIGNMENT);
TRANSLATION_TABLE_SECTION_ALIGNMENT
);
if (TranslationTable == NULL) {
return RETURN_OUT_OF_RESOURCES;
}
@@ -389,25 +407,27 @@ ArmConfigureMmu (
//
ArmSetTTBCR (0);
ArmSetDomainAccessControl (DOMAIN_ACCESS_CONTROL_NONE(15) |
DOMAIN_ACCESS_CONTROL_NONE(14) |
DOMAIN_ACCESS_CONTROL_NONE(13) |
DOMAIN_ACCESS_CONTROL_NONE(12) |
DOMAIN_ACCESS_CONTROL_NONE(11) |
DOMAIN_ACCESS_CONTROL_NONE(10) |
DOMAIN_ACCESS_CONTROL_NONE( 9) |
DOMAIN_ACCESS_CONTROL_NONE( 8) |
DOMAIN_ACCESS_CONTROL_NONE( 7) |
DOMAIN_ACCESS_CONTROL_NONE( 6) |
DOMAIN_ACCESS_CONTROL_NONE( 5) |
DOMAIN_ACCESS_CONTROL_NONE( 4) |
DOMAIN_ACCESS_CONTROL_NONE( 3) |
DOMAIN_ACCESS_CONTROL_NONE( 2) |
DOMAIN_ACCESS_CONTROL_NONE( 1) |
DOMAIN_ACCESS_CONTROL_CLIENT(0));
ArmSetDomainAccessControl (
DOMAIN_ACCESS_CONTROL_NONE (15) |
DOMAIN_ACCESS_CONTROL_NONE (14) |
DOMAIN_ACCESS_CONTROL_NONE (13) |
DOMAIN_ACCESS_CONTROL_NONE (12) |
DOMAIN_ACCESS_CONTROL_NONE (11) |
DOMAIN_ACCESS_CONTROL_NONE (10) |
DOMAIN_ACCESS_CONTROL_NONE (9) |
DOMAIN_ACCESS_CONTROL_NONE (8) |
DOMAIN_ACCESS_CONTROL_NONE (7) |
DOMAIN_ACCESS_CONTROL_NONE (6) |
DOMAIN_ACCESS_CONTROL_NONE (5) |
DOMAIN_ACCESS_CONTROL_NONE (4) |
DOMAIN_ACCESS_CONTROL_NONE (3) |
DOMAIN_ACCESS_CONTROL_NONE (2) |
DOMAIN_ACCESS_CONTROL_NONE (1) |
DOMAIN_ACCESS_CONTROL_CLIENT (0)
);
ArmEnableInstructionCache();
ArmEnableDataCache();
ArmEnableMmu();
ArmEnableInstructionCache ();
ArmEnableDataCache ();
ArmEnableMmu ();
return RETURN_SUCCESS;
}

View File

@@ -18,9 +18,9 @@
#include <Chipset/ArmV7.h>
#define __EFI_MEMORY_RWX 0 // no restrictions
#define __EFI_MEMORY_RWX 0 // no restrictions
#define CACHE_ATTRIBUTE_MASK (EFI_MEMORY_UC | \
#define CACHE_ATTRIBUTE_MASK (EFI_MEMORY_UC | \
EFI_MEMORY_WC | \
EFI_MEMORY_WT | \
EFI_MEMORY_WB | \
@@ -33,14 +33,14 @@ ConvertSectionToPages (
IN EFI_PHYSICAL_ADDRESS BaseAddress
)
{
UINT32 FirstLevelIdx;
UINT32 SectionDescriptor;
UINT32 PageTableDescriptor;
UINT32 PageDescriptor;
UINT32 Index;
UINT32 FirstLevelIdx;
UINT32 SectionDescriptor;
UINT32 PageTableDescriptor;
UINT32 PageDescriptor;
UINT32 Index;
volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;
volatile ARM_PAGE_TABLE_ENTRY *PageTable;
volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;
volatile ARM_PAGE_TABLE_ENTRY *PageTable;
DEBUG ((DEBUG_PAGE, "Converting section at 0x%x to pages\n", (UINTN)BaseAddress));
@@ -48,12 +48,12 @@ ConvertSectionToPages (
FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress ();
// Calculate index into first level translation table for start of modification
FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS (BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT);
// Get section attributes and convert to page attributes
SectionDescriptor = FirstLevelTable[FirstLevelIdx];
PageDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (SectionDescriptor, FALSE);
PageDescriptor = TT_DESCRIPTOR_PAGE_TYPE_PAGE | ConvertSectionAttributesToPageAttributes (SectionDescriptor, FALSE);
// Allocate a page table for the 4KB entries (we use up a full page even though we only need 1KB)
PageTable = (volatile ARM_PAGE_TABLE_ENTRY *)AllocatePages (1);
@@ -63,7 +63,7 @@ ConvertSectionToPages (
// Write the page table entries out
for (Index = 0; Index < TRANSLATION_TABLE_PAGE_COUNT; Index++) {
PageTable[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS(BaseAddress + (Index << 12)) | PageDescriptor;
PageTable[Index] = TT_DESCRIPTOR_PAGE_BASE_ADDRESS (BaseAddress + (Index << 12)) | PageDescriptor;
}
// Formulate page table entry, Domain=0, NS=0
@@ -78,27 +78,27 @@ ConvertSectionToPages (
STATIC
EFI_STATUS
UpdatePageEntries (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
OUT BOOLEAN *FlushTlbs OPTIONAL
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
OUT BOOLEAN *FlushTlbs OPTIONAL
)
{
EFI_STATUS Status;
UINT32 EntryValue;
UINT32 EntryMask;
UINT32 FirstLevelIdx;
UINT32 Offset;
UINT32 NumPageEntries;
UINT32 Descriptor;
UINT32 p;
UINT32 PageTableIndex;
UINT32 PageTableEntry;
UINT32 CurrentPageTableEntry;
VOID *Mva;
EFI_STATUS Status;
UINT32 EntryValue;
UINT32 EntryMask;
UINT32 FirstLevelIdx;
UINT32 Offset;
UINT32 NumPageEntries;
UINT32 Descriptor;
UINT32 p;
UINT32 PageTableIndex;
UINT32 PageTableEntry;
UINT32 CurrentPageTableEntry;
VOID *Mva;
volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;
volatile ARM_PAGE_TABLE_ENTRY *PageTable;
volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;
volatile ARM_PAGE_TABLE_ENTRY *PageTable;
Status = EFI_SUCCESS;
@@ -156,19 +156,19 @@ UpdatePageEntries (
// Iterate for the number of 4KB pages to change
Offset = 0;
for(p = 0; p < NumPageEntries; p++) {
for (p = 0; p < NumPageEntries; p++) {
// Calculate index into first level translation table for page table value
FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress + Offset) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS (BaseAddress + Offset) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT);
// Read the descriptor from the first level page table
Descriptor = FirstLevelTable[FirstLevelIdx];
// Does this descriptor need to be converted from section entry to 4K pages?
if (!TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(Descriptor)) {
if (!TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE (Descriptor)) {
Status = ConvertSectionToPages (FirstLevelIdx << TT_DESCRIPTOR_SECTION_BASE_SHIFT);
if (EFI_ERROR(Status)) {
if (EFI_ERROR (Status)) {
// Exit for loop
break;
}
@@ -181,7 +181,7 @@ UpdatePageEntries (
}
// Obtain page table base address
PageTable = (ARM_PAGE_TABLE_ENTRY *)TT_DESCRIPTOR_PAGE_BASE_ADDRESS(Descriptor);
PageTable = (ARM_PAGE_TABLE_ENTRY *)TT_DESCRIPTOR_PAGE_BASE_ADDRESS (Descriptor);
// Calculate index into the page table
PageTableIndex = ((BaseAddress + Offset) & TT_DESCRIPTOR_PAGE_INDEX_MASK) >> TT_DESCRIPTOR_PAGE_BASE_SHIFT;
@@ -204,9 +204,8 @@ UpdatePageEntries (
ArmUpdateTranslationTableEntry ((VOID *)&PageTable[PageTableIndex], Mva);
}
Status = EFI_SUCCESS;
Status = EFI_SUCCESS;
Offset += TT_DESCRIPTOR_PAGE_SIZE;
} // End first level translation table loop
return Status;
@@ -215,21 +214,21 @@ UpdatePageEntries (
STATIC
EFI_STATUS
UpdateSectionEntries (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes
)
{
EFI_STATUS Status;
UINT32 EntryMask;
UINT32 EntryValue;
UINT32 FirstLevelIdx;
UINT32 NumSections;
UINT32 i;
UINT32 CurrentDescriptor;
UINT32 Descriptor;
VOID *Mva;
volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;
EFI_STATUS Status;
UINT32 EntryMask;
UINT32 EntryValue;
UINT32 FirstLevelIdx;
UINT32 NumSections;
UINT32 i;
UINT32 CurrentDescriptor;
UINT32 Descriptor;
VOID *Mva;
volatile ARM_FIRST_LEVEL_DESCRIPTOR *FirstLevelTable;
Status = EFI_SUCCESS;
@@ -286,24 +285,25 @@ UpdateSectionEntries (
FirstLevelTable = (ARM_FIRST_LEVEL_DESCRIPTOR *)ArmGetTTBR0BaseAddress ();
// calculate index into first level translation table for start of modification
FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS(BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
FirstLevelIdx = TT_DESCRIPTOR_SECTION_BASE_ADDRESS (BaseAddress) >> TT_DESCRIPTOR_SECTION_BASE_SHIFT;
ASSERT (FirstLevelIdx < TRANSLATION_TABLE_SECTION_COUNT);
// calculate number of 1MB first level entries this applies to
NumSections = (UINT32)(Length / TT_DESCRIPTOR_SECTION_SIZE);
// iterate through each descriptor
for(i=0; i<NumSections; i++) {
for (i = 0; i < NumSections; i++) {
CurrentDescriptor = FirstLevelTable[FirstLevelIdx + i];
// has this descriptor already been converted to pages?
if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE(CurrentDescriptor)) {
if (TT_DESCRIPTOR_SECTION_TYPE_IS_PAGE_TABLE (CurrentDescriptor)) {
// forward this 1MB range to page table function instead
Status = UpdatePageEntries (
(FirstLevelIdx + i) << TT_DESCRIPTOR_SECTION_BASE_SHIFT,
TT_DESCRIPTOR_SECTION_SIZE,
Attributes,
NULL);
NULL
);
} else {
// still a section entry
@@ -334,14 +334,14 @@ UpdateSectionEntries (
EFI_STATUS
ArmSetMemoryAttributes (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes
)
{
EFI_STATUS Status;
UINT64 ChunkLength;
BOOLEAN FlushTlbs;
EFI_STATUS Status;
UINT64 ChunkLength;
BOOLEAN FlushTlbs;
if (BaseAddress > (UINT64)MAX_ADDRESS) {
return EFI_UNSUPPORTED;
@@ -355,19 +355,22 @@ ArmSetMemoryAttributes (
FlushTlbs = FALSE;
while (Length > 0) {
if ((BaseAddress % TT_DESCRIPTOR_SECTION_SIZE == 0) &&
Length >= TT_DESCRIPTOR_SECTION_SIZE) {
(Length >= TT_DESCRIPTOR_SECTION_SIZE))
{
ChunkLength = Length - Length % TT_DESCRIPTOR_SECTION_SIZE;
DEBUG ((DEBUG_PAGE,
DEBUG ((
DEBUG_PAGE,
"SetMemoryAttributes(): MMU section 0x%lx length 0x%lx to %lx\n",
BaseAddress, ChunkLength, Attributes));
BaseAddress,
ChunkLength,
Attributes
));
Status = UpdateSectionEntries (BaseAddress, ChunkLength, Attributes);
FlushTlbs = TRUE;
} else {
//
// Process page by page until the next section boundary, but only if
// we have more than a section's worth of area to deal with after that.
@@ -378,12 +381,20 @@ ArmSetMemoryAttributes (
ChunkLength = Length;
}
DEBUG ((DEBUG_PAGE,
DEBUG ((
DEBUG_PAGE,
"SetMemoryAttributes(): MMU page 0x%lx length 0x%lx to %lx\n",
BaseAddress, ChunkLength, Attributes));
BaseAddress,
ChunkLength,
Attributes
));
Status = UpdatePageEntries (BaseAddress, ChunkLength, Attributes,
&FlushTlbs);
Status = UpdatePageEntries (
BaseAddress,
ChunkLength,
Attributes,
&FlushTlbs
);
}
if (EFI_ERROR (Status)) {
@@ -391,19 +402,20 @@ ArmSetMemoryAttributes (
}
BaseAddress += ChunkLength;
Length -= ChunkLength;
Length -= ChunkLength;
}
if (FlushTlbs) {
ArmInvalidateTlb ();
}
return Status;
}
EFI_STATUS
ArmSetMemoryRegionNoExec (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
)
{
return ArmSetMemoryAttributes (BaseAddress, Length, EFI_MEMORY_XP);
@@ -411,8 +423,8 @@ ArmSetMemoryRegionNoExec (
EFI_STATUS
ArmClearMemoryRegionNoExec (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
)
{
return ArmSetMemoryAttributes (BaseAddress, Length, __EFI_MEMORY_RWX);
@@ -420,8 +432,8 @@ ArmClearMemoryRegionNoExec (
EFI_STATUS
ArmSetMemoryRegionReadOnly (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
)
{
return ArmSetMemoryAttributes (BaseAddress, Length, EFI_MEMORY_RO);
@@ -429,8 +441,8 @@ ArmSetMemoryRegionReadOnly (
EFI_STATUS
ArmClearMemoryRegionReadOnly (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length
)
{
return ArmSetMemoryAttributes (BaseAddress, Length, __EFI_MEMORY_RWX);