This patchs introduces level specific data cache maintenance operations to cache_helpers.S. It's derived form ARM trusted firmware repository. Please reference here. https://github.com/ARM-software/arm-trusted-firmware/blob/master/ lib/aarch64/cache_helpers.S BRANCH=none BUG=none TEST=boot on smaug/foster Change-Id: Ib58a6d6f95eb51ce5d80749ff51d9d389b0d1343 Signed-off-by: Patrick Georgi <pgeorgi@chromium.org> Original-Commit-Id: b3d1a16bd0089740f1f2257146c771783beece82 Original-Change-Id: Ifcd1dbcd868331107d0d47af73545a3a159fdff6 Original-Signed-off-by: Joseph Lo <josephl@nvidia.com> Original-Reviewed-on: https://chromium-review.googlesource.com/265826 Original-Reviewed-by: Aaron Durbin <adurbin@chromium.org> Reviewed-on: http://review.coreboot.org/9979 Tested-by: build bot (Jenkins) Reviewed-by: Marc Jones <marc.jones@se-eng.com>
77 lines
2.3 KiB
ArmAsm
77 lines
2.3 KiB
ArmAsm
/*
|
|
* Based on arch/arm/include/asm/cacheflush.h
|
|
*
|
|
* Copyright (C) 1999-2002 Russell King.
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU General Public License
|
|
* along with this program; if not, write to the Free Software
|
|
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
|
*/
|
|
|
|
#include <arch/asm.h>
|
|
#include <arch/cache_helpers.h>
|
|
|
|
/*
|
|
* Bring an ARMv8 processor we just gained control of (e.g. from IROM) into a
|
|
* known state regarding caches/SCTLR. Completely cleans and invalidates
|
|
* icache/dcache, disables MMU and dcache (if active), and enables unaligned
|
|
* accesses, icache and branch prediction (if inactive). Clobbers x4 and x5.
|
|
*/
|
|
ENTRY(arm_init_caches)
|
|
/* w4: SCTLR, return address: x8 (stay valid for the whole function) */
|
|
mov x8, x30
|
|
/* XXX: Assume that we always start running at EL3 */
|
|
mrs x4, sctlr_el3
|
|
|
|
/* FIXME: How to enable branch prediction on ARMv8? */
|
|
|
|
/* Flush and invalidate dcache */
|
|
mov x0, #DCCISW
|
|
bl flush_dcache_all
|
|
|
|
/* Deactivate MMU (0), Alignment Check (1) and DCache (2) */
|
|
and x4, x4, # ~(1 << 0) & ~(1 << 1) & ~(1 << 2)
|
|
/* Activate ICache (12) already for speed */
|
|
orr x4, x4, #(1 << 12)
|
|
msr sctlr_el3, x4
|
|
|
|
/* Invalidate icache and TLB for good measure */
|
|
ic iallu
|
|
tlbi alle3
|
|
dsb sy
|
|
isb
|
|
|
|
ret x8
|
|
ENDPROC(arm_init_caches)
|
|
|
|
/* Based on u-boot transition.S */
|
|
ENTRY(switch_el3_to_el2)
|
|
mov x0, #0x5b1 /* Non-secure EL0/EL1 | HVC | 64bit EL2 */
|
|
msr scr_el3, x0
|
|
msr cptr_el3, xzr /* Disable coprocessor traps to EL3 */
|
|
mov x0, #0x33ff
|
|
msr cptr_el2, x0 /* Disable coprocessor traps to EL2 */
|
|
|
|
/* Return to the EL2_SP2 mode from EL3 */
|
|
mov x0, sp
|
|
msr sp_el2, x0 /* Migrate SP */
|
|
mrs x0, vbar_el3
|
|
msr vbar_el2, x0 /* Migrate VBAR */
|
|
mrs x0, sctlr_el3
|
|
msr sctlr_el2, x0 /* Migrate SCTLR */
|
|
mov x0, #0x3c9
|
|
msr spsr_el3, x0 /* EL2_SP2 | D | A | I | F */
|
|
msr elr_el3, x30
|
|
eret
|
|
ENDPROC(switch_el3_to_el2)
|