ArmPkg/ArmLib: don't invalidate entire I-cache on range operation
Instead of cleaning the data cache to the PoU by virtual address and subsequently invalidating the entire I-cache, invalidate only the range that we just cleaned. This way, we don't invalidate other cachelines unnecessarily. Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Leif Lindholm <leif.lindholm@linaro.org>
This commit is contained in:
@@ -18,6 +18,7 @@
|
||||
|
||||
GCC_ASM_EXPORT (ArmInvalidateInstructionCache)
|
||||
GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)
|
||||
GCC_ASM_EXPORT (ArmInvalidateInstructionCacheEntryToPoUByMVA)
|
||||
GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)
|
||||
GCC_ASM_EXPORT (ArmCleanDataCacheEntryToPoUByMVA)
|
||||
GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)
|
||||
@@ -74,6 +75,10 @@ ASM_PFX(ArmCleanDataCacheEntryToPoUByMVA):
|
||||
mcr p15, 0, r0, c7, c11, 1 @clean single data cache line to PoU
|
||||
bx lr
|
||||
|
||||
ASM_PFX(ArmInvalidateInstructionCacheEntryToPoUByMVA):
|
||||
mcr p15, 0, r0, c7, c5, 1 @Invalidate single instruction cache line to PoU
|
||||
mcr p15, 0, r0, c7, c5, 7 @Invalidate branch predictor
|
||||
bx lr
|
||||
|
||||
ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):
|
||||
mcr p15, 0, r0, c7, c14, 1 @clean and invalidate single data cache line
|
||||
|
@@ -34,6 +34,12 @@ CTRL_I_BIT EQU (1 << 12)
|
||||
bx lr
|
||||
|
||||
|
||||
RVCT_ASM_EXPORT ArmInvalidateInstructionCacheEntryToPoUByMVA
|
||||
mcr p15, 0, r0, c7, c5, 1 ; invalidate single instruction cache line to PoU
|
||||
mcr p15, 0, r0, c7, c5, 7 ; invalidate branch predictor
|
||||
bx lr
|
||||
|
||||
|
||||
RVCT_ASM_EXPORT ArmCleanDataCacheEntryToPoUByMVA
|
||||
mcr p15, 0, r0, c7, c11, 1 ; clean single data cache line to PoU
|
||||
bx lr
|
||||
|
Reference in New Issue
Block a user