ArmPkg/ArmLib: remove unused ArmCleanDataCacheToPoU()
The function ArmCleanDataCacheToPoU() has no users, and its purpose is unclear, since it uses cache maintenance by set/way to perform the clean to PoU, which is a dubious practice to begin with. So remove the declaration and all definitions. Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Acked-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Leif Lindholm <leif.lindholm@linaro.org> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@18752 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
parent
cf93a37859
commit
acdb6dc8b7
@ -241,12 +241,6 @@ ArmCleanDataCache (
|
|||||||
VOID
|
VOID
|
||||||
);
|
);
|
||||||
|
|
||||||
VOID
|
|
||||||
EFIAPI
|
|
||||||
ArmCleanDataCacheToPoU (
|
|
||||||
VOID
|
|
||||||
);
|
|
||||||
|
|
||||||
VOID
|
VOID
|
||||||
EFIAPI
|
EFIAPI
|
||||||
ArmInvalidateInstructionCache (
|
ArmInvalidateInstructionCache (
|
||||||
|
@ -206,26 +206,6 @@ AArch64DataCacheOperation (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
VOID
|
|
||||||
AArch64PoUDataCacheOperation (
|
|
||||||
IN AARCH64_CACHE_OPERATION DataCacheOperation
|
|
||||||
)
|
|
||||||
{
|
|
||||||
UINTN SavedInterruptState;
|
|
||||||
|
|
||||||
SavedInterruptState = ArmGetInterruptState ();
|
|
||||||
ArmDisableInterrupts ();
|
|
||||||
|
|
||||||
AArch64PerformPoUDataCacheOperation (DataCacheOperation);
|
|
||||||
|
|
||||||
ArmDrainWriteBuffer ();
|
|
||||||
|
|
||||||
if (SavedInterruptState) {
|
|
||||||
ArmEnableInterrupts ();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
VOID
|
VOID
|
||||||
EFIAPI
|
EFIAPI
|
||||||
ArmInvalidateDataCache (
|
ArmInvalidateDataCache (
|
||||||
@ -255,13 +235,3 @@ ArmCleanDataCache (
|
|||||||
ArmDrainWriteBuffer ();
|
ArmDrainWriteBuffer ();
|
||||||
AArch64DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
|
AArch64DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
|
||||||
}
|
}
|
||||||
|
|
||||||
VOID
|
|
||||||
EFIAPI
|
|
||||||
ArmCleanDataCacheToPoU (
|
|
||||||
VOID
|
|
||||||
)
|
|
||||||
{
|
|
||||||
ArmDrainWriteBuffer ();
|
|
||||||
AArch64PoUDataCacheOperation (ArmCleanDataCacheEntryBySetWay);
|
|
||||||
}
|
|
||||||
|
@ -18,12 +18,6 @@
|
|||||||
|
|
||||||
typedef VOID (*AARCH64_CACHE_OPERATION)(UINTN);
|
typedef VOID (*AARCH64_CACHE_OPERATION)(UINTN);
|
||||||
|
|
||||||
|
|
||||||
VOID
|
|
||||||
AArch64PerformPoUDataCacheOperation (
|
|
||||||
IN AARCH64_CACHE_OPERATION DataCacheOperation
|
|
||||||
);
|
|
||||||
|
|
||||||
VOID
|
VOID
|
||||||
AArch64AllDataCachesOperation (
|
AArch64AllDataCachesOperation (
|
||||||
IN AARCH64_CACHE_OPERATION DataCacheOperation
|
IN AARCH64_CACHE_OPERATION DataCacheOperation
|
||||||
|
@ -40,7 +40,6 @@ GCC_ASM_EXPORT (ArmEnableAlignmentCheck)
|
|||||||
GCC_ASM_EXPORT (ArmEnableBranchPrediction)
|
GCC_ASM_EXPORT (ArmEnableBranchPrediction)
|
||||||
GCC_ASM_EXPORT (ArmDisableBranchPrediction)
|
GCC_ASM_EXPORT (ArmDisableBranchPrediction)
|
||||||
GCC_ASM_EXPORT (AArch64AllDataCachesOperation)
|
GCC_ASM_EXPORT (AArch64AllDataCachesOperation)
|
||||||
GCC_ASM_EXPORT (AArch64PerformPoUDataCacheOperation)
|
|
||||||
GCC_ASM_EXPORT (ArmDataMemoryBarrier)
|
GCC_ASM_EXPORT (ArmDataMemoryBarrier)
|
||||||
GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)
|
GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)
|
||||||
GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
|
GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
|
||||||
@ -324,19 +323,6 @@ ASM_PFX(AArch64AllDataCachesOperation):
|
|||||||
// right to ease the access to CSSELR and the Set/Way operation.
|
// right to ease the access to CSSELR and the Set/Way operation.
|
||||||
cbz x3, L_Finished // No need to clean if LoC is 0
|
cbz x3, L_Finished // No need to clean if LoC is 0
|
||||||
mov x10, #0 // Start clean at cache level 0
|
mov x10, #0 // Start clean at cache level 0
|
||||||
b Loop1
|
|
||||||
|
|
||||||
ASM_PFX(AArch64PerformPoUDataCacheOperation):
|
|
||||||
// We can use regs 0-7 and 9-15 without having to save/restore.
|
|
||||||
// Save our link register on the stack. - The stack must always be quad-word aligned
|
|
||||||
str x30, [sp, #-16]!
|
|
||||||
mov x1, x0 // Save Function call in x1
|
|
||||||
mrs x6, clidr_el1 // Read EL1 CLIDR
|
|
||||||
and x3, x6, #0x38000000 // Mask out all but Point of Unification (PoU)
|
|
||||||
lsr x3, x3, #26 // Left align cache level value - the level is shifted by 1 to the
|
|
||||||
// right to ease the access to CSSELR and the Set/Way operation.
|
|
||||||
cbz x3, L_Finished // No need to clean if LoC is 0
|
|
||||||
mov x10, #0 // Start clean at cache level 0
|
|
||||||
|
|
||||||
Loop1:
|
Loop1:
|
||||||
add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info
|
add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info
|
||||||
|
@ -208,26 +208,6 @@ ArmV7DataCacheOperation (
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
VOID
|
|
||||||
ArmV7PoUDataCacheOperation (
|
|
||||||
IN ARM_V7_CACHE_OPERATION DataCacheOperation
|
|
||||||
)
|
|
||||||
{
|
|
||||||
UINTN SavedInterruptState;
|
|
||||||
|
|
||||||
SavedInterruptState = ArmGetInterruptState ();
|
|
||||||
ArmDisableInterrupts ();
|
|
||||||
|
|
||||||
ArmV7PerformPoUDataCacheOperation (DataCacheOperation);
|
|
||||||
|
|
||||||
ArmDrainWriteBuffer ();
|
|
||||||
|
|
||||||
if (SavedInterruptState) {
|
|
||||||
ArmEnableInterrupts ();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
VOID
|
VOID
|
||||||
EFIAPI
|
EFIAPI
|
||||||
ArmInvalidateDataCache (
|
ArmInvalidateDataCache (
|
||||||
@ -257,13 +237,3 @@ ArmCleanDataCache (
|
|||||||
ArmDrainWriteBuffer ();
|
ArmDrainWriteBuffer ();
|
||||||
ArmV7DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
|
ArmV7DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
|
||||||
}
|
}
|
||||||
|
|
||||||
VOID
|
|
||||||
EFIAPI
|
|
||||||
ArmCleanDataCacheToPoU (
|
|
||||||
VOID
|
|
||||||
)
|
|
||||||
{
|
|
||||||
ArmDrainWriteBuffer ();
|
|
||||||
ArmV7PoUDataCacheOperation (ArmCleanDataCacheEntryBySetWay);
|
|
||||||
}
|
|
||||||
|
@ -17,12 +17,6 @@
|
|||||||
|
|
||||||
typedef VOID (*ARM_V7_CACHE_OPERATION)(UINT32);
|
typedef VOID (*ARM_V7_CACHE_OPERATION)(UINT32);
|
||||||
|
|
||||||
|
|
||||||
VOID
|
|
||||||
ArmV7PerformPoUDataCacheOperation (
|
|
||||||
IN ARM_V7_CACHE_OPERATION DataCacheOperation
|
|
||||||
);
|
|
||||||
|
|
||||||
VOID
|
VOID
|
||||||
ArmV7AllDataCachesOperation (
|
ArmV7AllDataCachesOperation (
|
||||||
IN ARM_V7_CACHE_OPERATION DataCacheOperation
|
IN ARM_V7_CACHE_OPERATION DataCacheOperation
|
||||||
|
@ -38,7 +38,6 @@ GCC_ASM_EXPORT (ArmDisableBranchPrediction)
|
|||||||
GCC_ASM_EXPORT (ArmSetLowVectors)
|
GCC_ASM_EXPORT (ArmSetLowVectors)
|
||||||
GCC_ASM_EXPORT (ArmSetHighVectors)
|
GCC_ASM_EXPORT (ArmSetHighVectors)
|
||||||
GCC_ASM_EXPORT (ArmV7AllDataCachesOperation)
|
GCC_ASM_EXPORT (ArmV7AllDataCachesOperation)
|
||||||
GCC_ASM_EXPORT (ArmV7PerformPoUDataCacheOperation)
|
|
||||||
GCC_ASM_EXPORT (ArmDataMemoryBarrier)
|
GCC_ASM_EXPORT (ArmDataMemoryBarrier)
|
||||||
GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)
|
GCC_ASM_EXPORT (ArmDataSynchronizationBarrier)
|
||||||
GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
|
GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
|
||||||
@ -268,55 +267,6 @@ L_Finished:
|
|||||||
ldmfd SP!, {r4-r12, lr}
|
ldmfd SP!, {r4-r12, lr}
|
||||||
bx LR
|
bx LR
|
||||||
|
|
||||||
ASM_PFX(ArmV7PerformPoUDataCacheOperation):
|
|
||||||
stmfd SP!,{r4-r12, LR}
|
|
||||||
mov R1, R0 @ Save Function call in R1
|
|
||||||
mrc p15, 1, R6, c0, c0, 1 @ Read CLIDR
|
|
||||||
ands R3, R6, #0x38000000 @ Mask out all but Level of Unification (LoU)
|
|
||||||
mov R3, R3, LSR #26 @ Cache level value (naturally aligned)
|
|
||||||
beq Finished2
|
|
||||||
mov R10, #0
|
|
||||||
|
|
||||||
Loop4:
|
|
||||||
add R2, R10, R10, LSR #1 @ Work out 3xcachelevel
|
|
||||||
mov R12, R6, LSR R2 @ bottom 3 bits are the Cache type for this level
|
|
||||||
and R12, R12, #7 @ get those 3 bits alone
|
|
||||||
cmp R12, #2
|
|
||||||
blt Skip2 @ no cache or only instruction cache at this level
|
|
||||||
mcr p15, 2, R10, c0, c0, 0 @ write the Cache Size selection register (CSSELR) // OR in 1 for Instruction
|
|
||||||
isb @ isb to sync the change to the CacheSizeID reg
|
|
||||||
mrc p15, 1, R12, c0, c0, 0 @ reads current Cache Size ID register (CCSIDR)
|
|
||||||
and R2, R12, #0x7 @ extract the line length field
|
|
||||||
add R2, R2, #4 @ add 4 for the line length offset (log2 16 bytes)
|
|
||||||
ldr R4, =0x3FF
|
|
||||||
ands R4, R4, R12, LSR #3 @ R4 is the max number on the way size (right aligned)
|
|
||||||
clz R5, R4 @ R5 is the bit position of the way size increment
|
|
||||||
ldr R7, =0x00007FFF
|
|
||||||
ands R7, R7, R12, LSR #13 @ R7 is the max number of the index size (right aligned)
|
|
||||||
|
|
||||||
Loop5:
|
|
||||||
mov R9, R4 @ R9 working copy of the max way size (right aligned)
|
|
||||||
|
|
||||||
Loop6:
|
|
||||||
orr R0, R10, R9, LSL R5 @ factor in the way number and cache number into R11
|
|
||||||
orr R0, R0, R7, LSL R2 @ factor in the index number
|
|
||||||
|
|
||||||
blx R1
|
|
||||||
|
|
||||||
subs R9, R9, #1 @ decrement the way number
|
|
||||||
bge Loop6
|
|
||||||
subs R7, R7, #1 @ decrement the index
|
|
||||||
bge Loop5
|
|
||||||
Skip2:
|
|
||||||
add R10, R10, #2 @ increment the cache number
|
|
||||||
cmp R3, R10
|
|
||||||
bgt Loop4
|
|
||||||
|
|
||||||
Finished2:
|
|
||||||
dsb
|
|
||||||
ldmfd SP!, {r4-r12, lr}
|
|
||||||
bx LR
|
|
||||||
|
|
||||||
ASM_PFX(ArmDataMemoryBarrier):
|
ASM_PFX(ArmDataMemoryBarrier):
|
||||||
dmb
|
dmb
|
||||||
bx LR
|
bx LR
|
||||||
|
@ -35,7 +35,6 @@
|
|||||||
EXPORT ArmSetLowVectors
|
EXPORT ArmSetLowVectors
|
||||||
EXPORT ArmSetHighVectors
|
EXPORT ArmSetHighVectors
|
||||||
EXPORT ArmV7AllDataCachesOperation
|
EXPORT ArmV7AllDataCachesOperation
|
||||||
EXPORT ArmV7PerformPoUDataCacheOperation
|
|
||||||
EXPORT ArmDataMemoryBarrier
|
EXPORT ArmDataMemoryBarrier
|
||||||
EXPORT ArmDataSynchronizationBarrier
|
EXPORT ArmDataSynchronizationBarrier
|
||||||
EXPORT ArmInstructionSynchronizationBarrier
|
EXPORT ArmInstructionSynchronizationBarrier
|
||||||
@ -262,55 +261,6 @@ Finished
|
|||||||
ldmfd SP!, {r4-r12, lr}
|
ldmfd SP!, {r4-r12, lr}
|
||||||
bx LR
|
bx LR
|
||||||
|
|
||||||
ArmV7PerformPoUDataCacheOperation
|
|
||||||
stmfd SP!,{r4-r12, LR}
|
|
||||||
mov R1, R0 ; Save Function call in R1
|
|
||||||
mrc p15, 1, R6, c0, c0, 1 ; Read CLIDR
|
|
||||||
ands R3, R6, #&38000000 ; Mask out all but Level of Unification (LoU)
|
|
||||||
mov R3, R3, LSR #26 ; Cache level value (naturally aligned)
|
|
||||||
beq Finished2
|
|
||||||
mov R10, #0
|
|
||||||
|
|
||||||
Loop4
|
|
||||||
add R2, R10, R10, LSR #1 ; Work out 3xcachelevel
|
|
||||||
mov R12, R6, LSR R2 ; bottom 3 bits are the Cache type for this level
|
|
||||||
and R12, R12, #7 ; get those 3 bits alone
|
|
||||||
cmp R12, #2
|
|
||||||
blt Skip2 ; no cache or only instruction cache at this level
|
|
||||||
mcr p15, 2, R10, c0, c0, 0 ; write the Cache Size selection register (CSSELR) // OR in 1 for Instruction
|
|
||||||
isb ; isb to sync the change to the CacheSizeID reg
|
|
||||||
mrc p15, 1, R12, c0, c0, 0 ; reads current Cache Size ID register (CCSIDR)
|
|
||||||
and R2, R12, #&7 ; extract the line length field
|
|
||||||
add R2, R2, #4 ; add 4 for the line length offset (log2 16 bytes)
|
|
||||||
ldr R4, =0x3FF
|
|
||||||
ands R4, R4, R12, LSR #3 ; R4 is the max number on the way size (right aligned)
|
|
||||||
clz R5, R4 ; R5 is the bit position of the way size increment
|
|
||||||
ldr R7, =0x00007FFF
|
|
||||||
ands R7, R7, R12, LSR #13 ; R7 is the max number of the index size (right aligned)
|
|
||||||
|
|
||||||
Loop5
|
|
||||||
mov R9, R4 ; R9 working copy of the max way size (right aligned)
|
|
||||||
|
|
||||||
Loop6
|
|
||||||
orr R0, R10, R9, LSL R5 ; factor in the way number and cache number into R11
|
|
||||||
orr R0, R0, R7, LSL R2 ; factor in the index number
|
|
||||||
|
|
||||||
blx R1
|
|
||||||
|
|
||||||
subs R9, R9, #1 ; decrement the way number
|
|
||||||
bge Loop6
|
|
||||||
subs R7, R7, #1 ; decrement the index
|
|
||||||
bge Loop5
|
|
||||||
Skip2
|
|
||||||
add R10, R10, #2 ; increment the cache number
|
|
||||||
cmp R3, R10
|
|
||||||
bgt Loop4
|
|
||||||
|
|
||||||
Finished2
|
|
||||||
dsb
|
|
||||||
ldmfd SP!, {r4-r12, lr}
|
|
||||||
bx LR
|
|
||||||
|
|
||||||
ArmDataMemoryBarrier
|
ArmDataMemoryBarrier
|
||||||
dmb
|
dmb
|
||||||
bx LR
|
bx LR
|
||||||
|
Loading…
x
Reference in New Issue
Block a user