The helper that updates live page table entries writes a zero entry, invalidates the covered address range from the TLBs, and finally writes the actual entry. This ensures that no TLB conflicts can occur. Writing the final entry needs to complete before any translations can be performed, as otherwise, the zero entry, which describes an invalid translation, may be observed by the page table walker, resulting in a translation fault. For this reason, the final write is followed by a DSB barrier instruction. However, this barrier will not stall the pipeline, and instruction fetches may still hit this invalid translation, as has been observed and reported by Oliver. To ensure that the new translation is fully active before returning from this helper, we have to insert an ISB barrier as well. Reported-by: Oliver Steffen <osteffen@redhat.com> Tested-by: Oliver Steffen <osteffen@redhat.com> Reviewed-by: Leif Lindholm <quic_llindhol@quicinc.com> Acked-by: Michael D Kinney <michael.d.kinney@intel.com> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
109 lines
2.4 KiB
ArmAsm
109 lines
2.4 KiB
ArmAsm
#------------------------------------------------------------------------------
|
|
#
|
|
# Copyright (c) 2016, Linaro Limited. All rights reserved.
|
|
#
|
|
# SPDX-License-Identifier: BSD-2-Clause-Patent
|
|
#
|
|
#------------------------------------------------------------------------------
|
|
|
|
#include <AsmMacroIoLibV8.h>
|
|
|
|
.set CTRL_M_BIT, (1 << 0)
|
|
|
|
.macro __replace_entry, el
|
|
|
|
// check whether we should disable the MMU
|
|
cbz x3, .L1_\@
|
|
|
|
// clean and invalidate first so that we don't clobber
|
|
// adjacent entries that are dirty in the caches
|
|
dc civac, x0
|
|
dsb nsh
|
|
|
|
// disable the MMU
|
|
mrs x8, sctlr_el\el
|
|
bic x9, x8, #CTRL_M_BIT
|
|
msr sctlr_el\el, x9
|
|
isb
|
|
|
|
// write updated entry
|
|
str x1, [x0]
|
|
|
|
// invalidate again to get rid of stale clean cachelines that may
|
|
// have been filled speculatively since the last invalidate
|
|
dmb sy
|
|
dc ivac, x0
|
|
|
|
// flush translations for the target address from the TLBs
|
|
lsr x2, x2, #12
|
|
.if \el == 1
|
|
tlbi vaae1, x2
|
|
.else
|
|
tlbi vae\el, x2
|
|
.endif
|
|
dsb nsh
|
|
|
|
// re-enable the MMU
|
|
msr sctlr_el\el, x8
|
|
isb
|
|
b .L2_\@
|
|
|
|
.L1_\@:
|
|
// write invalid entry
|
|
str xzr, [x0]
|
|
dsb nshst
|
|
|
|
// flush translations for the target address from the TLBs
|
|
lsr x2, x2, #12
|
|
.if \el == 1
|
|
tlbi vaae1, x2
|
|
.else
|
|
tlbi vae\el, x2
|
|
.endif
|
|
dsb nsh
|
|
|
|
// write updated entry
|
|
str x1, [x0]
|
|
dsb nshst
|
|
isb
|
|
|
|
.L2_\@:
|
|
.endm
|
|
|
|
//VOID
|
|
//ArmReplaceLiveTranslationEntry (
|
|
// IN UINT64 *Entry,
|
|
// IN UINT64 Value,
|
|
// IN UINT64 Address
|
|
// )
|
|
//
|
|
// Align this routine to a log2 upper bound of its size, so that it is
|
|
// guaranteed not to cross a page or block boundary.
|
|
ASM_FUNC_ALIGN(ArmReplaceLiveTranslationEntry, 0x200)
|
|
|
|
// disable interrupts
|
|
mrs x4, daif
|
|
msr daifset, #0xf
|
|
isb
|
|
|
|
EL1_OR_EL2_OR_EL3(x5)
|
|
1:__replace_entry 1
|
|
b 4f
|
|
2:__replace_entry 2
|
|
b 4f
|
|
3:__replace_entry 3
|
|
|
|
4:msr daif, x4
|
|
ret
|
|
|
|
ASM_GLOBAL ASM_PFX(ArmReplaceLiveTranslationEntrySize)
|
|
|
|
ASM_PFX(ArmReplaceLiveTranslationEntrySize):
|
|
.long . - ArmReplaceLiveTranslationEntry
|
|
|
|
// Double check that we did not overrun the assumed maximum size or cross a
|
|
// 0x200 boundary (and thus implicitly not any larger power of two, including
|
|
// the page size).
|
|
.balign 0x200
|
|
.org ArmReplaceLiveTranslationEntry + 0x200
|