ArmPkg/AArch64Mmu: disable MMU during page table manipulations

On ARM, manipulating live page tables is cumbersome since the architecture
mandates the use of break-before-make, i.e., replacing a block entry with
a table entry requires an intermediate step via an invalid entry, or TLB
conflicts may occur.

Since it is not generally feasible to decide in the page table manipulation
routines whether such an invalid entry will result in those routines
themselves to become unavailable, use a function that is callable with
the MMU off (i.e., a leaf function that does not access the stack) to
perform the change of a block entry into a table entry.

Note that the opposite should never occur, i.e., table entries are never
coalesced into block entries.

Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
This commit is contained in:
Ard Biesheuvel
2016-04-11 15:47:24 +02:00
parent d354963f0d
commit 61b02ba1f2
5 changed files with 124 additions and 2 deletions

View File

@@ -56,6 +56,8 @@ GCC_ASM_EXPORT (ArmReadIdPfr1)
GCC_ASM_EXPORT (ArmWriteHcr)
GCC_ASM_EXPORT (ArmReadHcr)
GCC_ASM_EXPORT (ArmReadCurrentEL)
GCC_ASM_EXPORT (ArmReplaceLiveTranslationEntry)
GCC_ASM_EXPORT (ArmReplaceLiveTranslationEntrySize)
.set CTRL_M_BIT, (1 << 0)
.set CTRL_A_BIT, (1 << 1)
@@ -481,4 +483,64 @@ ASM_PFX(ArmReadCurrentEL):
mrs x0, CurrentEL
ret
.macro __replace_entry, el
// disable the MMU
mrs x8, sctlr_el\el
bic x9, x8, #CTRL_M_BIT
msr sctlr_el\el, x9
isb
// write updated entry
str x1, [x0]
// invalidate again to get rid of stale clean cachelines that may
// have been filled speculatively since the last invalidate
dmb sy
dc ivac, x0
// flush the TLBs
.if \el == 1
tlbi vmalle1
.else
tlbi alle\el
.endif
dsb sy
// re-enable the MMU
msr sctlr_el\el, x8
isb
.endm
//VOID
//ArmReplaceLiveTranslationEntry (
// IN UINT64 *Entry,
// IN UINT64 Value
// )
ASM_PFX(ArmReplaceLiveTranslationEntry):
// disable interrupts
mrs x2, daif
msr daifset, #0xf
isb
// clean and invalidate first so that we don't clobber
// adjacent entries that are dirty in the caches
dc civac, x0
dsb ish
EL1_OR_EL2_OR_EL3(x3)
1:__replace_entry 1
b 4f
2:__replace_entry 2
b 4f
3:__replace_entry 3
4:msr daif, x2
ret
ASM_PFX(ArmReplaceLiveTranslationEntrySize):
.long . - ArmReplaceLiveTranslationEntry
ASM_FUNCTION_REMOVE_IF_UNREFERENCED