arch/arm64: Add EL1/EL2/EL3 support for arm64
Currently, arch/arm64 requires coreboot to run on EL3 due to EL3 register access. This might be an issue when, for example, one boots into TF-A first and drops into EL2 for coreboot afterwards. This patch aims at making arch/arm64 more versatile by removing the current EL3 constraint and allowing arm64 coreboot to run on EL1, EL2 and EL3. The strategy here, is to add a Kconfig option (ARM64_CURRENT_EL) which lets us specify coreboot's EL upon entry. Based on that, we access the appropriate ELx registers. So, for example, when running coreboot on EL1, we would not access vbar_el3 or vbar_el2 but instead vbar_el1. This way, we don't generate faults when accessing higher-EL registers. Currently only tested on the qemu-aarch64 target. Exceptions were tested by enabling FATAL_ASSERTS. Signed-off-by: David Milosevic <David.Milosevic@9elements.com> Change-Id: Iae1c57f0846c8d0585384f7e54102a837e701e7e Reviewed-on: https://review.coreboot.org/c/coreboot/+/74798 Reviewed-by: Werner Zeh <werner.zeh@siemens.com> Reviewed-by: ron minnich <rminnich@gmail.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Julius Werner <jwerner@chromium.org>
This commit is contained in:
parent
93cbbbfc7f
commit
41ba11229a
@ -23,6 +23,20 @@ source "src/arch/arm64/armv8/Kconfig"
|
|||||||
|
|
||||||
if ARCH_ARM64
|
if ARCH_ARM64
|
||||||
|
|
||||||
|
config ARM64_CURRENT_EL
|
||||||
|
int
|
||||||
|
default 3
|
||||||
|
range 1 3
|
||||||
|
help
|
||||||
|
The exception level on which coreboot is started. Accepted
|
||||||
|
values are: 1 (EL1), 2 (EL2) and 3 (EL3). This option can be
|
||||||
|
used to restrict access to available control registers in case
|
||||||
|
prior firmware already dropped to a lower exception level. By default,
|
||||||
|
coreboot is the first firmware that runs on the system and should thus
|
||||||
|
always run on EL3. This option is only provided for edge-case platforms
|
||||||
|
that require running a different firmware before coreboot which drops
|
||||||
|
to a lower exception level.
|
||||||
|
|
||||||
config ARM64_USE_ARCH_TIMER
|
config ARM64_USE_ARCH_TIMER
|
||||||
bool
|
bool
|
||||||
default n
|
default n
|
||||||
@ -30,7 +44,7 @@ config ARM64_USE_ARCH_TIMER
|
|||||||
config ARM64_USE_ARM_TRUSTED_FIRMWARE
|
config ARM64_USE_ARM_TRUSTED_FIRMWARE
|
||||||
bool
|
bool
|
||||||
default n
|
default n
|
||||||
depends on ARCH_RAMSTAGE_ARM64
|
depends on ARCH_RAMSTAGE_ARM64 && ARM64_CURRENT_EL = 3
|
||||||
|
|
||||||
config ARM64_BL31_EXTERNAL_FILE
|
config ARM64_BL31_EXTERNAL_FILE
|
||||||
string "Path to external BL31.ELF (leave empty to build from source)"
|
string "Path to external BL31.ELF (leave empty to build from source)"
|
||||||
|
@ -148,10 +148,12 @@ void dcache_invalidate_by_mva(void const *addr, size_t len)
|
|||||||
*/
|
*/
|
||||||
void arch_segment_loaded(uintptr_t start, size_t size, int flags)
|
void arch_segment_loaded(uintptr_t start, size_t size, int flags)
|
||||||
{
|
{
|
||||||
uint32_t sctlr = raw_read_sctlr_el3();
|
uint32_t sctlr = raw_read_sctlr();
|
||||||
|
|
||||||
if (sctlr & SCTLR_C)
|
if (sctlr & SCTLR_C)
|
||||||
dcache_clean_by_mva((void *)start, size);
|
dcache_clean_by_mva((void *)start, size);
|
||||||
else if (sctlr & SCTLR_I)
|
else if (sctlr & SCTLR_I)
|
||||||
dcache_clean_invalidate_by_mva((void *)start, size);
|
dcache_clean_invalidate_by_mva((void *)start, size);
|
||||||
|
|
||||||
icache_invalidate_all();
|
icache_invalidate_all();
|
||||||
}
|
}
|
||||||
|
@ -77,10 +77,10 @@ ENDPROC(dcache_clean_invalidate_all)
|
|||||||
memory (e.g. the stack) in between disabling and flushing the cache. */
|
memory (e.g. the stack) in between disabling and flushing the cache. */
|
||||||
ENTRY(mmu_disable)
|
ENTRY(mmu_disable)
|
||||||
str x30, [sp, #-0x8]
|
str x30, [sp, #-0x8]
|
||||||
mrs x0, sctlr_el3
|
mrs x0, CURRENT_EL(sctlr)
|
||||||
mov x1, #~(SCTLR_C | SCTLR_M)
|
mov x1, #~(SCTLR_C | SCTLR_M)
|
||||||
and x0, x0, x1
|
and x0, x0, x1
|
||||||
msr sctlr_el3, x0
|
msr CURRENT_EL(sctlr), x0
|
||||||
isb
|
isb
|
||||||
bl dcache_clean_invalidate_all
|
bl dcache_clean_invalidate_all
|
||||||
ldr x30, [sp, #-0x8]
|
ldr x30, [sp, #-0x8]
|
||||||
@ -102,12 +102,11 @@ ENTRY(arm64_init_cpu)
|
|||||||
|
|
||||||
/* x22: SCTLR, return address: x23 (callee-saved by subroutine) */
|
/* x22: SCTLR, return address: x23 (callee-saved by subroutine) */
|
||||||
mov x23, x30
|
mov x23, x30
|
||||||
/* TODO: Assert that we always start running at EL3 */
|
mrs x22, CURRENT_EL(sctlr)
|
||||||
mrs x22, sctlr_el3
|
|
||||||
|
|
||||||
/* Activate ICache already for speed during cache flush below. */
|
/* Activate ICache already for speed during cache flush below. */
|
||||||
orr x22, x22, #SCTLR_I
|
orr x22, x22, #SCTLR_I
|
||||||
msr sctlr_el3, x22
|
msr CURRENT_EL(sctlr), x22
|
||||||
isb
|
isb
|
||||||
|
|
||||||
/* Invalidate dcache */
|
/* Invalidate dcache */
|
||||||
@ -116,13 +115,15 @@ ENTRY(arm64_init_cpu)
|
|||||||
/* Reinitialize SCTLR from scratch to known-good state.
|
/* Reinitialize SCTLR from scratch to known-good state.
|
||||||
This may disable MMU or DCache. */
|
This may disable MMU or DCache. */
|
||||||
ldr w22, =(SCTLR_RES1 | SCTLR_I | SCTLR_SA)
|
ldr w22, =(SCTLR_RES1 | SCTLR_I | SCTLR_SA)
|
||||||
msr sctlr_el3, x22
|
msr CURRENT_EL(sctlr), x22
|
||||||
|
|
||||||
|
#if CONFIG_ARM64_CURRENT_EL == EL3
|
||||||
/* Initialize SCR to unmask all interrupts (so that if we get a spurious
|
/* Initialize SCR to unmask all interrupts (so that if we get a spurious
|
||||||
IRQ/SError we'll see it when it happens, not hang in BL31). This will
|
IRQ/SError we'll see it when it happens, not hang in BL31). This will
|
||||||
only have an effect after we DAIFClr in exception_init(). */
|
only have an effect after we DAIFClr in exception_init(). */
|
||||||
mov x22, #SCR_RES1 | SCR_IRQ | SCR_FIQ | SCR_EA
|
mov x22, #SCR_RES1 | SCR_IRQ | SCR_FIQ | SCR_EA
|
||||||
msr scr_el3, x22
|
msr scr_el3, x22
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Invalidate icache and TLB for good measure */
|
/* Invalidate icache and TLB for good measure */
|
||||||
ic iallu
|
ic iallu
|
||||||
|
@ -51,9 +51,10 @@ static void print_regs(struct exc_state *exc_state)
|
|||||||
struct regs *regs = &exc_state->regs;
|
struct regs *regs = &exc_state->regs;
|
||||||
|
|
||||||
printk(BIOS_DEBUG, "ELR = 0x%016llx ESR = 0x%08llx\n",
|
printk(BIOS_DEBUG, "ELR = 0x%016llx ESR = 0x%08llx\n",
|
||||||
elx->elr, raw_read_esr_el3());
|
elx->elr, raw_read_esr());
|
||||||
printk(BIOS_DEBUG, "FAR = 0x%016llx SPSR = 0x%08llx\n",
|
printk(BIOS_DEBUG, "FAR = 0x%016llx SPSR = 0x%08llx\n",
|
||||||
raw_read_far_el3(), raw_read_spsr_el3());
|
raw_read_far(), raw_read_spsr());
|
||||||
|
|
||||||
for (i = 0; i < 30; i += 2) {
|
for (i = 0; i < 30; i += 2) {
|
||||||
printk(BIOS_DEBUG,
|
printk(BIOS_DEBUG,
|
||||||
"X%02d = 0x%016llx X%02d = 0x%016llx\n",
|
"X%02d = 0x%016llx X%02d = 0x%016llx\n",
|
||||||
@ -173,7 +174,8 @@ static int test_exception_handler(struct exc_state *state, uint64_t vector_id)
|
|||||||
{
|
{
|
||||||
/* Update instruction pointer to next instruction. */
|
/* Update instruction pointer to next instruction. */
|
||||||
state->elx.elr += sizeof(uint32_t);
|
state->elx.elr += sizeof(uint32_t);
|
||||||
raw_write_elr_el3(state->elx.elr);
|
raw_write_elr(state->elx.elr);
|
||||||
|
|
||||||
return EXC_RET_HANDLED;
|
return EXC_RET_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -224,7 +224,7 @@ void mmu_config_range(void *start, size_t size, uint64_t tag)
|
|||||||
|
|
||||||
/* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
|
/* ARMv8 MMUs snoop L1 data cache, no need to flush it. */
|
||||||
dsb();
|
dsb();
|
||||||
tlbiall_el3();
|
tlbiall();
|
||||||
dsb();
|
dsb();
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
@ -245,15 +245,15 @@ void mmu_init(void)
|
|||||||
assert((u8 *)root == _ttb);
|
assert((u8 *)root == _ttb);
|
||||||
|
|
||||||
/* Initialize TTBR */
|
/* Initialize TTBR */
|
||||||
raw_write_ttbr0_el3((uintptr_t)root);
|
raw_write_ttbr0((uintptr_t)root);
|
||||||
|
|
||||||
/* Initialize MAIR indices */
|
/* Initialize MAIR indices */
|
||||||
raw_write_mair_el3(MAIR_ATTRIBUTES);
|
raw_write_mair(MAIR_ATTRIBUTES);
|
||||||
|
|
||||||
/* Initialize TCR flags */
|
/* Initialize TCR flags */
|
||||||
raw_write_tcr_el3(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
|
raw_write_tcr(TCR_TOSZ | TCR_IRGN0_NM_WBWAC | TCR_ORGN0_NM_WBWAC |
|
||||||
TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
|
TCR_SH0_IS | TCR_TG0_4KB | TCR_PS_256TB |
|
||||||
TCR_TBI_USED);
|
TCR_TBI_USED);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Func : mmu_save_context
|
/* Func : mmu_save_context
|
||||||
@ -264,10 +264,10 @@ void mmu_save_context(struct mmu_context *mmu_context)
|
|||||||
assert(mmu_context);
|
assert(mmu_context);
|
||||||
|
|
||||||
/* Back-up MAIR_ATTRIBUTES */
|
/* Back-up MAIR_ATTRIBUTES */
|
||||||
mmu_context->mair = raw_read_mair_el3();
|
mmu_context->mair = raw_read_mair();
|
||||||
|
|
||||||
/* Back-up TCR value */
|
/* Back-up TCR value */
|
||||||
mmu_context->tcr = raw_read_tcr_el3();
|
mmu_context->tcr = raw_read_tcr();
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Func : mmu_restore_context
|
/* Func : mmu_restore_context
|
||||||
@ -278,13 +278,13 @@ void mmu_restore_context(const struct mmu_context *mmu_context)
|
|||||||
assert(mmu_context);
|
assert(mmu_context);
|
||||||
|
|
||||||
/* Restore TTBR */
|
/* Restore TTBR */
|
||||||
raw_write_ttbr0_el3((uintptr_t)_ttb);
|
raw_write_ttbr0((uintptr_t)_ttb);
|
||||||
|
|
||||||
/* Restore MAIR indices */
|
/* Restore MAIR indices */
|
||||||
raw_write_mair_el3(mmu_context->mair);
|
raw_write_mair(mmu_context->mair);
|
||||||
|
|
||||||
/* Restore TCR flags */
|
/* Restore TCR flags */
|
||||||
raw_write_tcr_el3(mmu_context->tcr);
|
raw_write_tcr(mmu_context->tcr);
|
||||||
|
|
||||||
/* invalidate tlb since ttbr is updated. */
|
/* invalidate tlb since ttbr is updated. */
|
||||||
tlb_invalidate_all();
|
tlb_invalidate_all();
|
||||||
@ -295,8 +295,8 @@ void mmu_enable(void)
|
|||||||
assert_correct_ttb_mapping(_ttb);
|
assert_correct_ttb_mapping(_ttb);
|
||||||
assert_correct_ttb_mapping((void *)((uintptr_t)_ettb - 1));
|
assert_correct_ttb_mapping((void *)((uintptr_t)_ettb - 1));
|
||||||
|
|
||||||
uint32_t sctlr = raw_read_sctlr_el3();
|
uint32_t sctlr = raw_read_sctlr();
|
||||||
sctlr |= SCTLR_C | SCTLR_M | SCTLR_I;
|
raw_write_sctlr(sctlr | SCTLR_C | SCTLR_M | SCTLR_I);
|
||||||
raw_write_sctlr_el3(sctlr);
|
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,10 @@ static void run_payload(struct prog *prog)
|
|||||||
|
|
||||||
if (CONFIG(ARM64_USE_ARM_TRUSTED_FIRMWARE))
|
if (CONFIG(ARM64_USE_ARM_TRUSTED_FIRMWARE))
|
||||||
run_bl31((u64)doit, (u64)arg, payload_spsr);
|
run_bl31((u64)doit, (u64)arg, payload_spsr);
|
||||||
else
|
else if (CONFIG_ARM64_CURRENT_EL == EL3)
|
||||||
transition_to_el2(doit, arg, payload_spsr);
|
transition_to_el2(doit, arg, payload_spsr);
|
||||||
|
else
|
||||||
|
doit(arg);
|
||||||
}
|
}
|
||||||
|
|
||||||
void arch_prog_run(struct prog *prog)
|
void arch_prog_run(struct prog *prog)
|
||||||
|
@ -28,4 +28,14 @@
|
|||||||
ENTRY(name) \
|
ENTRY(name) \
|
||||||
.weak name \
|
.weak name \
|
||||||
|
|
||||||
|
#if CONFIG_ARM64_CURRENT_EL == 1
|
||||||
|
#define CURRENT_EL(reg) reg##_el1
|
||||||
|
#elif CONFIG_ARM64_CURRENT_EL == 2
|
||||||
|
#define CURRENT_EL(reg) reg##_el2
|
||||||
|
#elif CONFIG_ARM64_CURRENT_EL == 3
|
||||||
|
#define CURRENT_EL(reg) reg##_el3
|
||||||
|
#else
|
||||||
|
#error "Invalid setting for CONFIG_ARM64_CURRENT_EL!"
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* __ARM_ARM64_ASM_H */
|
#endif /* __ARM_ARM64_ASM_H */
|
||||||
|
@ -55,7 +55,8 @@ unsigned int dcache_line_bytes(void);
|
|||||||
static inline void tlb_invalidate_all(void)
|
static inline void tlb_invalidate_all(void)
|
||||||
{
|
{
|
||||||
/* TLBIALL includes dTLB and iTLB on systems that have them. */
|
/* TLBIALL includes dTLB and iTLB on systems that have them. */
|
||||||
tlbiall_el3();
|
|
||||||
|
tlbiall();
|
||||||
dsb();
|
dsb();
|
||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
@ -113,10 +113,49 @@
|
|||||||
: : "r" (value) : "memory"); \
|
: : "r" (value) : "memory"); \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In order to allow easy access to current EL's registers,
|
||||||
|
* we export following two functions for each EL register, that
|
||||||
|
* was passed to the MAKE_REGISTER_ACCESSORS_CURRENT_EL macro. Doing
|
||||||
|
* that, eliminates, or at least hides, repetitive branching on the
|
||||||
|
* current EL across the arm64 codebase.
|
||||||
|
*
|
||||||
|
* MAKE_REGISTER_ACCESSORS_CURRENT_EL was hooked into MAKE_REGISTER_ACCESSORS_EL123,
|
||||||
|
* in order to automatically generate current_el accessors only for registers which
|
||||||
|
* exist on EL1, EL2 and EL3.
|
||||||
|
*
|
||||||
|
* Note, that we don't handle EL0 here, as most of the defined registers do not
|
||||||
|
* have an EL0 variant (see MAKE_REGISTER_ACCESSORS_EL123).
|
||||||
|
*
|
||||||
|
* Important:
|
||||||
|
* - target register should be specified without the '_elx' suffix
|
||||||
|
* - only registers which exist in EL1, EL2 and EL3 should be passed
|
||||||
|
* to the MAKE_REGISTER_ACCESSORS_CURRENT_EL macro
|
||||||
|
*/
|
||||||
|
#define MAKE_REGISTER_ACCESSORS_CURRENT_EL(reg) \
|
||||||
|
static inline uint64_t raw_read_##reg(void) \
|
||||||
|
{ \
|
||||||
|
if (CONFIG_ARM64_CURRENT_EL == EL1) \
|
||||||
|
return raw_read_##reg##_el1(); \
|
||||||
|
else if (CONFIG_ARM64_CURRENT_EL == EL2) \
|
||||||
|
return raw_read_##reg##_el2(); \
|
||||||
|
return raw_read_##reg##_el3(); \
|
||||||
|
} \
|
||||||
|
static inline void raw_write_##reg(uint64_t value) \
|
||||||
|
{ \
|
||||||
|
if (CONFIG_ARM64_CURRENT_EL == EL1) \
|
||||||
|
raw_write_##reg##_el1(value); \
|
||||||
|
else if (CONFIG_ARM64_CURRENT_EL == EL2) \
|
||||||
|
raw_write_##reg##_el2(value); \
|
||||||
|
else \
|
||||||
|
raw_write_##reg##_el3(value); \
|
||||||
|
}
|
||||||
|
|
||||||
#define MAKE_REGISTER_ACCESSORS_EL123(reg) \
|
#define MAKE_REGISTER_ACCESSORS_EL123(reg) \
|
||||||
MAKE_REGISTER_ACCESSORS(reg##_el1) \
|
MAKE_REGISTER_ACCESSORS(reg##_el1) \
|
||||||
MAKE_REGISTER_ACCESSORS(reg##_el2) \
|
MAKE_REGISTER_ACCESSORS(reg##_el2) \
|
||||||
MAKE_REGISTER_ACCESSORS(reg##_el3)
|
MAKE_REGISTER_ACCESSORS(reg##_el3) \
|
||||||
|
MAKE_REGISTER_ACCESSORS_CURRENT_EL(reg)
|
||||||
|
|
||||||
/* Architectural register accessors */
|
/* Architectural register accessors */
|
||||||
MAKE_REGISTER_ACCESSORS_EL123(actlr)
|
MAKE_REGISTER_ACCESSORS_EL123(actlr)
|
||||||
@ -318,6 +357,16 @@ static inline void tlbiall_el3(void)
|
|||||||
__asm__ __volatile__("tlbi alle3\n\t" : : : "memory");
|
__asm__ __volatile__("tlbi alle3\n\t" : : : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void tlbiall(void)
|
||||||
|
{
|
||||||
|
if (CONFIG_ARM64_CURRENT_EL == EL1)
|
||||||
|
tlbiall_el1();
|
||||||
|
else if (CONFIG_ARM64_CURRENT_EL == EL2)
|
||||||
|
tlbiall_el2();
|
||||||
|
else
|
||||||
|
tlbiall_el3();
|
||||||
|
}
|
||||||
|
|
||||||
static inline void tlbiallis_el1(void)
|
static inline void tlbiallis_el1(void)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__("tlbi alle1is\n\t" : : : "memory");
|
__asm__ __volatile__("tlbi alle1is\n\t" : : : "memory");
|
||||||
|
@ -13,12 +13,13 @@ static enum {
|
|||||||
|
|
||||||
static int abort_checker(struct exc_state *state, uint64_t vector_id)
|
static int abort_checker(struct exc_state *state, uint64_t vector_id)
|
||||||
{
|
{
|
||||||
if (raw_read_esr_el3() >> 26 != 0x25)
|
if (raw_read_esr() >> 26 != 0x25)
|
||||||
return EXC_RET_IGNORED; /* Not a data abort. */
|
return EXC_RET_IGNORED; /* Not a data abort. */
|
||||||
|
|
||||||
abort_state = ABORT_CHECKER_TRIGGERED;
|
abort_state = ABORT_CHECKER_TRIGGERED;
|
||||||
state->elx.elr += sizeof(uint32_t); /* Jump over faulting instruction. */
|
state->elx.elr += sizeof(uint32_t); /* Jump over faulting instruction. */
|
||||||
raw_write_elr_el3(state->elx.elr);
|
raw_write_elr(state->elx.elr);
|
||||||
|
|
||||||
return EXC_RET_HANDLED;
|
return EXC_RET_HANDLED;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,9 @@ void exc_entry(struct exc_state *exc_state, uint64_t id)
|
|||||||
struct regs *regs = &exc_state->regs;
|
struct regs *regs = &exc_state->regs;
|
||||||
uint8_t elx_mode;
|
uint8_t elx_mode;
|
||||||
|
|
||||||
elx->spsr = raw_read_spsr_el3();
|
elx->spsr = raw_read_spsr();
|
||||||
|
elx->elr = raw_read_elr();
|
||||||
|
|
||||||
elx_mode = get_mode_from_spsr(elx->spsr);
|
elx_mode = get_mode_from_spsr(elx->spsr);
|
||||||
|
|
||||||
if (elx_mode == SPSR_USE_H)
|
if (elx_mode == SPSR_USE_H)
|
||||||
@ -25,8 +27,6 @@ void exc_entry(struct exc_state *exc_state, uint64_t id)
|
|||||||
else
|
else
|
||||||
regs->sp = raw_read_sp_el0();
|
regs->sp = raw_read_sp_el0();
|
||||||
|
|
||||||
elx->elr = raw_read_elr_el3();
|
|
||||||
|
|
||||||
exc_dispatch(exc_state, id);
|
exc_dispatch(exc_state, id);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,19 +142,16 @@ ENTRY(exc_exit)
|
|||||||
ENDPROC(exc_exit)
|
ENDPROC(exc_exit)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* exception_init_asm: Initialize VBAR and point SP_EL3 to exception stack.
|
* exception_init_asm: Initialize VBAR and point SP_ELx to exception stack.
|
||||||
* Also unmask aborts now that we can report them. x0 = end of exception stack
|
* Also unmask aborts now that we can report them. x0 = end of exception stack
|
||||||
*/
|
*/
|
||||||
ENTRY(exception_init_asm)
|
ENTRY(exception_init_asm)
|
||||||
msr SPSel, #SPSR_USE_H
|
msr SPSel, #SPSR_USE_H
|
||||||
mov sp, x0
|
mov sp, x0
|
||||||
msr SPSel, #SPSR_USE_L
|
msr SPSel, #SPSR_USE_L
|
||||||
|
|
||||||
adr x0, exc_vectors
|
adr x0, exc_vectors
|
||||||
msr vbar_el3, x0
|
msr CURRENT_EL(vbar), x0
|
||||||
|
|
||||||
msr DAIFClr, #0xf
|
msr DAIFClr, #0xf
|
||||||
|
|
||||||
dsb sy
|
dsb sy
|
||||||
isb
|
isb
|
||||||
ret
|
ret
|
||||||
|
Loading…
x
Reference in New Issue
Block a user