diff --git a/ArmPkg/ArmPkg.dsc b/ArmPkg/ArmPkg.dsc index 688244bd7c..df5be6e21f 100644 --- a/ArmPkg/ArmPkg.dsc +++ b/ArmPkg/ArmPkg.dsc @@ -63,6 +63,7 @@ UncachedMemoryAllocationLib|ArmPkg/Library/UncachedMemoryAllocationLib/UncachedMemoryAllocationLib.inf DxeServicesTableLib|MdePkg/Library/DxeServicesTableLib/DxeServicesTableLib.inf DefaultExceptionHandlerLib|ArmPkg/Library/DefaultExceptionHandlerLib/DefaultExceptionHandlerLib.inf + CpuExceptionHandlerLib|ArmPkg/Library/ArmExceptionLib/ArmExceptionLib.inf CpuLib|MdePkg/Library/BaseCpuLib/BaseCpuLib.inf ArmGicLib|ArmPkg/Drivers/ArmGic/ArmGicLib.inf @@ -122,6 +123,8 @@ ArmPkg/Library/SemihostLib/SemihostLib.inf ArmPkg/Library/UncachedMemoryAllocationLib/UncachedMemoryAllocationLib.inf ArmPkg/Library/ArmPsciResetSystemLib/ArmPsciResetSystemLib.inf + ArmPkg/Library/ArmExceptionLib/ArmExceptionLib.inf + ArmPkg/Library/ArmExceptionLib/ArmRelocateExceptionLib.inf ArmPkg/Drivers/CpuDxe/CpuDxe.inf ArmPkg/Drivers/CpuPei/CpuPei.inf diff --git a/ArmPkg/Library/ArmExceptionLib/AArch64/AArch64Exception.c b/ArmPkg/Library/ArmExceptionLib/AArch64/AArch64Exception.c new file mode 100644 index 0000000000..3d6eb4974d --- /dev/null +++ b/ArmPkg/Library/ArmExceptionLib/AArch64/AArch64Exception.c @@ -0,0 +1,44 @@ +/** @file +* Exception Handling support specific for AArch64 +* +* Copyright (c) 2016 HP Development Company, L.P. +* +* This program and the accompanying materials +* are licensed and made available under the terms and conditions of the BSD License +* which accompanies this distribution. The full text of the license may be found at +* http://opensource.org/licenses/bsd-license.php +* +* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +* +**/ + +#include + +#include + +#include // for MAX_AARCH64_EXCEPTION + +UINTN gMaxExceptionNumber = MAX_AARCH64_EXCEPTION; +EFI_EXCEPTION_CALLBACK gExceptionHandlers[MAX_AARCH64_EXCEPTION + 1] = { 0 }; +EFI_EXCEPTION_CALLBACK gDebuggerExceptionHandlers[MAX_AARCH64_EXCEPTION + 1] = { 0 }; +PHYSICAL_ADDRESS gExceptionVectorAlignmentMask = ARM_VECTOR_TABLE_ALIGNMENT; +UINTN gDebuggerNoHandlerValue = 0; // todo: define for AArch64 + +RETURN_STATUS ArchVectorConfig( + IN UINTN VectorBaseAddress + ) +{ + UINTN HcrReg; + + if (ArmReadCurrentEL() == AARCH64_EL2) { + HcrReg = ArmReadHcr(); + + // Trap General Exceptions. All exceptions that would be routed to EL1 are routed to EL2 + HcrReg |= ARM_HCR_TGE; + + ArmWriteHcr(HcrReg); + } + + return RETURN_SUCCESS; +} diff --git a/ArmPkg/Library/ArmExceptionLib/AArch64/ExceptionSupport.S b/ArmPkg/Library/ArmExceptionLib/AArch64/ExceptionSupport.S new file mode 100644 index 0000000000..790ce009b8 --- /dev/null +++ b/ArmPkg/Library/ArmExceptionLib/AArch64/ExceptionSupport.S @@ -0,0 +1,425 @@ +// +// Copyright (c) 2011 - 2014 ARM LTD. All rights reserved.
+// Portion of Copyright (c) 2014 NVIDIA Corporation. All rights reserved.
+// Copyright (c) 2016 HP Development Company, L.P. +// +// This program and the accompanying materials +// are licensed and made available under the terms and conditions of the BSD License +// which accompanies this distribution. The full text of the license may be found at +// http://opensource.org/licenses/bsd-license.php +// +// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +// +//------------------------------------------------------------------------------ + +#include +#include +#include +#include // for exception type definitions + +/* + This is the stack constructed by the exception handler (low address to high address). + X0 to FAR makes up the EFI_SYSTEM_CONTEXT for AArch64. + + UINT64 X0; 0x000 + UINT64 X1; 0x008 + UINT64 X2; 0x010 + UINT64 X3; 0x018 + UINT64 X4; 0x020 + UINT64 X5; 0x028 + UINT64 X6; 0x030 + UINT64 X7; 0x038 + UINT64 X8; 0x040 + UINT64 X9; 0x048 + UINT64 X10; 0x050 + UINT64 X11; 0x058 + UINT64 X12; 0x060 + UINT64 X13; 0x068 + UINT64 X14; 0x070 + UINT64 X15; 0x078 + UINT64 X16; 0x080 + UINT64 X17; 0x088 + UINT64 X18; 0x090 + UINT64 X19; 0x098 + UINT64 X20; 0x0a0 + UINT64 X21; 0x0a8 + UINT64 X22; 0x0b0 + UINT64 X23; 0x0b8 + UINT64 X24; 0x0c0 + UINT64 X25; 0x0c8 + UINT64 X26; 0x0d0 + UINT64 X27; 0x0d8 + UINT64 X28; 0x0e0 + UINT64 FP; 0x0e8 // x29 - Frame Pointer + UINT64 LR; 0x0f0 // x30 - Link Register + UINT64 SP; 0x0f8 // x31 - Stack Pointer + + // FP/SIMD Registers. 128bit if used as Q-regs. + UINT64 V0[2]; 0x100 + UINT64 V1[2]; 0x110 + UINT64 V2[2]; 0x120 + UINT64 V3[2]; 0x130 + UINT64 V4[2]; 0x140 + UINT64 V5[2]; 0x150 + UINT64 V6[2]; 0x160 + UINT64 V7[2]; 0x170 + UINT64 V8[2]; 0x180 + UINT64 V9[2]; 0x190 + UINT64 V10[2]; 0x1a0 + UINT64 V11[2]; 0x1b0 + UINT64 V12[2]; 0x1c0 + UINT64 V13[2]; 0x1d0 + UINT64 V14[2]; 0x1e0 + UINT64 V15[2]; 0x1f0 + UINT64 V16[2]; 0x200 + UINT64 V17[2]; 0x210 + UINT64 V18[2]; 0x220 + UINT64 V19[2]; 0x230 + UINT64 V20[2]; 0x240 + UINT64 V21[2]; 0x250 + UINT64 V22[2]; 0x260 + UINT64 V23[2]; 0x270 + UINT64 V24[2]; 0x280 + UINT64 V25[2]; 0x290 + UINT64 V26[2]; 0x2a0 + UINT64 V27[2]; 0x2b0 + UINT64 V28[2]; 0x2c0 + UINT64 V29[2]; 0x2d0 + UINT64 V30[2]; 0x2e0 + UINT64 V31[2]; 0x2f0 + + // System Context + UINT64 ELR; 0x300 // Exception Link Register + UINT64 SPSR; 0x308 // Saved Processor Status Register + UINT64 FPSR; 0x310 // Floating Point Status Register + UINT64 ESR; 0x318 // Exception syndrome register + UINT64 FAR; 0x320 // Fault Address Register + UINT64 Padding;0x328 // Required for stack alignment +*/ + +GCC_ASM_EXPORT(ExceptionHandlersEnd) +GCC_ASM_EXPORT(CommonExceptionEntry) +GCC_ASM_EXPORT(AsmCommonExceptionEntry) +GCC_ASM_EXPORT(CommonCExceptionHandler) + +.text + +#define GP_CONTEXT_SIZE (32 * 8) +#define FP_CONTEXT_SIZE (32 * 16) +#define SYS_CONTEXT_SIZE ( 6 * 8) // 5 SYS regs + Alignment requirement (ie: the stack must be aligned on 0x10) + +// Cannot str x31 directly +#define ALL_GP_REGS \ + REG_PAIR (x0, x1, 0x000, GP_CONTEXT_SIZE); \ + REG_PAIR (x2, x3, 0x010, GP_CONTEXT_SIZE); \ + REG_PAIR (x4, x5, 0x020, GP_CONTEXT_SIZE); \ + REG_PAIR (x6, x7, 0x030, GP_CONTEXT_SIZE); \ + REG_PAIR (x8, x9, 0x040, GP_CONTEXT_SIZE); \ + REG_PAIR (x10, x11, 0x050, GP_CONTEXT_SIZE); \ + REG_PAIR (x12, x13, 0x060, GP_CONTEXT_SIZE); \ + REG_PAIR (x14, x15, 0x070, GP_CONTEXT_SIZE); \ + REG_PAIR (x16, x17, 0x080, GP_CONTEXT_SIZE); \ + REG_PAIR (x18, x19, 0x090, GP_CONTEXT_SIZE); \ + REG_PAIR (x20, x21, 0x0a0, GP_CONTEXT_SIZE); \ + REG_PAIR (x22, x23, 0x0b0, GP_CONTEXT_SIZE); \ + REG_PAIR (x24, x25, 0x0c0, GP_CONTEXT_SIZE); \ + REG_PAIR (x26, x27, 0x0d0, GP_CONTEXT_SIZE); \ + REG_PAIR (x28, x29, 0x0e0, GP_CONTEXT_SIZE); \ + REG_ONE (x30, 0x0f0, GP_CONTEXT_SIZE); + +// In order to save the SP we need to put it somewhere else first. +// STR only works with XZR/WZR directly +#define SAVE_SP \ + add x1, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE); \ + REG_ONE (x1, 0x0f8, GP_CONTEXT_SIZE); + +#define ALL_FP_REGS \ + REG_PAIR (q0, q1, 0x000, FP_CONTEXT_SIZE); \ + REG_PAIR (q2, q3, 0x020, FP_CONTEXT_SIZE); \ + REG_PAIR (q4, q5, 0x040, FP_CONTEXT_SIZE); \ + REG_PAIR (q6, q7, 0x060, FP_CONTEXT_SIZE); \ + REG_PAIR (q8, q9, 0x080, FP_CONTEXT_SIZE); \ + REG_PAIR (q10, q11, 0x0a0, FP_CONTEXT_SIZE); \ + REG_PAIR (q12, q13, 0x0c0, FP_CONTEXT_SIZE); \ + REG_PAIR (q14, q15, 0x0e0, FP_CONTEXT_SIZE); \ + REG_PAIR (q16, q17, 0x100, FP_CONTEXT_SIZE); \ + REG_PAIR (q18, q19, 0x120, FP_CONTEXT_SIZE); \ + REG_PAIR (q20, q21, 0x140, FP_CONTEXT_SIZE); \ + REG_PAIR (q22, q23, 0x160, FP_CONTEXT_SIZE); \ + REG_PAIR (q24, q25, 0x180, FP_CONTEXT_SIZE); \ + REG_PAIR (q26, q27, 0x1a0, FP_CONTEXT_SIZE); \ + REG_PAIR (q28, q29, 0x1c0, FP_CONTEXT_SIZE); \ + REG_PAIR (q30, q31, 0x1e0, FP_CONTEXT_SIZE); + +#define ALL_SYS_REGS \ + REG_PAIR (x1, x2, 0x000, SYS_CONTEXT_SIZE); \ + REG_PAIR (x3, x4, 0x010, SYS_CONTEXT_SIZE); \ + REG_ONE (x5, 0x020, SYS_CONTEXT_SIZE); + +// +// There are two methods for installing AArch64 exception vectors: +// 1. Install a copy of the vectors to a location specified by a PCD +// 2. Write VBAR directly, requiring that vectors have proper alignment (2K) +// The conditional below adjusts the alignment requirement based on which +// exception vector initialization method is used. +// + +#if defined(ARM_RELOCATE_VECTORS) +GCC_ASM_EXPORT(ExceptionHandlersStart) +ASM_PFX(ExceptionHandlersStart): +#else +VECTOR_BASE(ExceptionHandlersStart) +#endif + +// +// Current EL with SP0 : 0x0 - 0x180 +// +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SYNC) +ASM_PFX(SynchronousExceptionSP0): + b ASM_PFX(SynchronousExceptionEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_IRQ) +ASM_PFX(IrqSP0): + b ASM_PFX(IrqEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_FIQ) +ASM_PFX(FiqSP0): + b ASM_PFX(FiqEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SP0_SERR) +ASM_PFX(SErrorSP0): + b ASM_PFX(SErrorEntry) + +// +// Current EL with SPx: 0x200 - 0x380 +// +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SYNC) +ASM_PFX(SynchronousExceptionSPx): + b ASM_PFX(SynchronousExceptionEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_IRQ) +ASM_PFX(IrqSPx): + b ASM_PFX(IrqEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_FIQ) +ASM_PFX(FiqSPx): + b ASM_PFX(FiqEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_CUR_SPx_SERR) +ASM_PFX(SErrorSPx): + b ASM_PFX(SErrorEntry) + +// +// Lower EL using AArch64 : 0x400 - 0x580 +// +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SYNC) +ASM_PFX(SynchronousExceptionA64): + b ASM_PFX(SynchronousExceptionEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_IRQ) +ASM_PFX(IrqA64): + b ASM_PFX(IrqEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_FIQ) +ASM_PFX(FiqA64): + b ASM_PFX(FiqEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A64_SERR) +ASM_PFX(SErrorA64): + b ASM_PFX(SErrorEntry) + +// +// Lower EL using AArch32 : 0x600 - 0x780 +// +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SYNC) +ASM_PFX(SynchronousExceptionA32): + b ASM_PFX(SynchronousExceptionEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_IRQ) +ASM_PFX(IrqA32): + b ASM_PFX(IrqEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_FIQ) +ASM_PFX(FiqA32): + b ASM_PFX(FiqEntry) + +VECTOR_ENTRY(ExceptionHandlersStart, ARM_VECTOR_LOW_A32_SERR) +ASM_PFX(SErrorA32): + b ASM_PFX(SErrorEntry) + +VECTOR_END(ExceptionHandlersStart) + +#undef REG_PAIR +#undef REG_ONE +#define REG_PAIR(REG1, REG2, OFFSET, CONTEXT_SIZE) stp REG1, REG2, [sp, #(OFFSET-CONTEXT_SIZE)] +#define REG_ONE(REG1, OFFSET, CONTEXT_SIZE) stur REG1, [sp, #(OFFSET-CONTEXT_SIZE)] + +ASM_PFX(SynchronousExceptionEntry): + // Move the stackpointer so we can reach our structure with the str instruction. + sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE) + + // Save all the General regs before touching x0 and x1. + // This does not save r31(SP) as it is special. We do that later. + ALL_GP_REGS + + // Record the type of exception that occurred. + mov x0, #EXCEPT_AARCH64_SYNCHRONOUS_EXCEPTIONS + + // Jump to our general handler to deal with all the common parts and process the exception. + ldr x1, ASM_PFX(CommonExceptionEntry) + br x1 + +ASM_PFX(IrqEntry): + sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE) + ALL_GP_REGS + mov x0, #EXCEPT_AARCH64_IRQ + ldr x1, ASM_PFX(CommonExceptionEntry) + br x1 + +ASM_PFX(FiqEntry): + sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE) + ALL_GP_REGS + mov x0, #EXCEPT_AARCH64_FIQ + ldr x1, ASM_PFX(CommonExceptionEntry) + br x1 + +ASM_PFX(SErrorEntry): + sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE) + ALL_GP_REGS + mov x0, #EXCEPT_AARCH64_SERROR + ldr x1, ASM_PFX(CommonExceptionEntry) + br x1 + + +// +// This gets patched by the C code that patches in the vector table +// +.align 3 +ASM_PFX(CommonExceptionEntry): + .8byte ASM_PFX(AsmCommonExceptionEntry) + +ASM_PFX(ExceptionHandlersEnd): + + + +// +// This code runs from CpuDxe driver loaded address. It is patched into +// CommonExceptionEntry. +// +ASM_PFX(AsmCommonExceptionEntry): + /* NOTE: + We have to break up the save code because the immediate value to be used + with the SP is too big to do it all in one step so we need to shuffle the SP + along as we go. (we only have 9bits of immediate to work with) */ + + // Save the current Stack pointer before we start modifying it. + SAVE_SP + + // Preserve the stack pointer we came in with before we modify it + EL1_OR_EL2_OR_EL3(x1) +1:mrs x1, elr_el1 // Exception Link Register + mrs x2, spsr_el1 // Saved Processor Status Register 32bit + mrs x3, fpsr // Floating point Status Register 32bit + mrs x4, esr_el1 // EL1 Exception syndrome register 32bit + mrs x5, far_el1 // EL1 Fault Address Register + b 4f + +2:mrs x1, elr_el2 // Exception Link Register + mrs x2, spsr_el2 // Saved Processor Status Register 32bit + mrs x3, fpsr // Floating point Status Register 32bit + mrs x4, esr_el2 // EL2 Exception syndrome register 32bit + mrs x5, far_el2 // EL2 Fault Address Register + b 4f + +3:mrs x1, elr_el3 // Exception Link Register + mrs x2, spsr_el3 // Saved Processor Status Register 32bit + mrs x3, fpsr // Floating point Status Register 32bit + mrs x4, esr_el3 // EL3 Exception syndrome register 32bit + mrs x5, far_el3 // EL3 Fault Address Register + + // Adjust SP to save next set +4:add sp, sp, #FP_CONTEXT_SIZE + + // Push FP regs to Stack. + ALL_FP_REGS + + // Adjust SP to save next set + add sp, sp, #SYS_CONTEXT_SIZE + + // Save the SYS regs + ALL_SYS_REGS + + // Point to top of struct after all regs saved + sub sp, sp, #(GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE) + + // x0 still holds the exception type. + // Set x1 to point to the top of our struct on the Stack + mov x1, sp + +// CommonCExceptionHandler ( +// IN EFI_EXCEPTION_TYPE ExceptionType, R0 +// IN OUT EFI_SYSTEM_CONTEXT SystemContext R1 +// ) + + // Call the handler as defined above + + // For now we spin in the handler if we received an abort of some kind. + // We do not try to recover. + bl ASM_PFX(CommonCExceptionHandler) // Call exception handler + + +// Defines for popping from stack + +#undef REG_PAIR +#undef REG_ONE +#define REG_PAIR(REG1, REG2, OFFSET, CONTEXT_SIZE) ldp REG1, REG2, [sp, #(OFFSET-CONTEXT_SIZE)] +#define REG_ONE(REG1, OFFSET, CONTEXT_SIZE) ldur REG1, [sp, #(OFFSET-CONTEXT_SIZE)] + + // + // Disable interrupt(IRQ and FIQ) before restoring context, + // or else the context will be corrupted by interrupt reentrance. + // Interrupt mask will be restored from spsr by hardware when we call eret + // + msr daifset, #3 + isb + + // Adjust SP to pop system registers + add sp, sp, #(GP_CONTEXT_SIZE + FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE) + ALL_SYS_REGS + + EL1_OR_EL2_OR_EL3(x6) +1:msr elr_el1, x1 // Exception Link Register + msr spsr_el1,x2 // Saved Processor Status Register 32bit + msr fpsr, x3 // Floating point Status Register 32bit + msr esr_el1, x4 // EL1 Exception syndrome register 32bit + msr far_el1, x5 // EL1 Fault Address Register + b 4f +2:msr elr_el2, x1 // Exception Link Register + msr spsr_el2,x2 // Saved Processor Status Register 32bit + msr fpsr, x3 // Floating point Status Register 32bit + msr esr_el2, x4 // EL2 Exception syndrome register 32bit + msr far_el2, x5 // EL2 Fault Address Register + b 4f +3:msr elr_el3, x1 // Exception Link Register + msr spsr_el3,x2 // Saved Processor Status Register 32bit + msr fpsr, x3 // Floating point Status Register 32bit + msr esr_el3, x4 // EL3 Exception syndrome register 32bit + msr far_el3, x5 // EL3 Fault Address Register +4:// pop all regs and return from exception. + sub sp, sp, #(FP_CONTEXT_SIZE + SYS_CONTEXT_SIZE) + ALL_GP_REGS + + // Adjust SP to pop next set + add sp, sp, #FP_CONTEXT_SIZE + // Pop FP regs to Stack. + ALL_FP_REGS + + // Adjust SP to be where we started from when we came into the handler. + // The handler can not change the SP. + add sp, sp, #SYS_CONTEXT_SIZE + + eret + +#undef REG_PAIR +#undef REG_ONE diff --git a/ArmPkg/Library/ArmExceptionLib/Arm/ArmException.c b/ArmPkg/Library/ArmExceptionLib/Arm/ArmException.c new file mode 100644 index 0000000000..0a0c2a22d2 --- /dev/null +++ b/ArmPkg/Library/ArmExceptionLib/Arm/ArmException.c @@ -0,0 +1,50 @@ +/** @file +* Exception handling support specific for ARM +* +* Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.
+* Copyright (c) 2014, ARM Limited. All rights reserved.
+* Copyright (c) 2016 HP Development Company, L.P.
+* +* This program and the accompanying materials +* are licensed and made available under the terms and conditions of the BSD License +* which accompanies this distribution. The full text of the license may be found at +* http://opensource.org/licenses/bsd-license.php +* +* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +* +**/ + +#include + +#include + +#include + +#include // for MAX_ARM_EXCEPTION + +UINTN gMaxExceptionNumber = MAX_ARM_EXCEPTION; +EFI_EXCEPTION_CALLBACK gExceptionHandlers[MAX_ARM_EXCEPTION + 1] = { 0 }; +EFI_EXCEPTION_CALLBACK gDebuggerExceptionHandlers[MAX_ARM_EXCEPTION + 1] = { 0 }; +PHYSICAL_ADDRESS gExceptionVectorAlignmentMask = ARM_VECTOR_TABLE_ALIGNMENT; + +// Exception handler contains branch to vector location (jmp $) so no handler +// NOTE: This code assumes vectors are ARM and not Thumb code +UINTN gDebuggerNoHandlerValue = 0xEAFFFFFE; + +RETURN_STATUS ArchVectorConfig( + IN UINTN VectorBaseAddress + ) +{ + // if the vector address corresponds to high vectors + if (VectorBaseAddress == 0xFFFF0000) { + // set SCTLR.V to enable high vectors + ArmSetHighVectors(); + } + else { + // Set SCTLR.V to 0 to enable VBAR to be used + ArmSetLowVectors(); + } + + return RETURN_SUCCESS; +} diff --git a/ArmPkg/Library/ArmExceptionLib/Arm/ExceptionSupport.S b/ArmPkg/Library/ArmExceptionLib/Arm/ExceptionSupport.S new file mode 100644 index 0000000000..fa4087cfab --- /dev/null +++ b/ArmPkg/Library/ArmExceptionLib/Arm/ExceptionSupport.S @@ -0,0 +1,305 @@ +#------------------------------------------------------------------------------ +# +# Use ARMv6 instruction to operate on a single stack +# +# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.
+# Copyright (c) 2014, ARM Limited. All rights reserved.
+# Copyright (c) 2016 HP Development Company, L.P.
+# +# This program and the accompanying materials +# are licensed and made available under the terms and conditions of the BSD License +# which accompanies this distribution. The full text of the license may be found at +# http://opensource.org/licenses/bsd-license.php +# +# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +# +#------------------------------------------------------------------------------ + +#include + +/* + +This is the stack constructed by the exception handler (low address to high address) + # R0 - IFAR is EFI_SYSTEM_CONTEXT for ARM + Reg Offset + === ====== + R0 0x00 # stmfd SP!,{R0-R12} + R1 0x04 + R2 0x08 + R3 0x0c + R4 0x10 + R5 0x14 + R6 0x18 + R7 0x1c + R8 0x20 + R9 0x24 + R10 0x28 + R11 0x2c + R12 0x30 + SP 0x34 # reserved via subtraction 0x20 (32) from SP + LR 0x38 + PC 0x3c + CPSR 0x40 + DFSR 0x44 + DFAR 0x48 + IFSR 0x4c + IFAR 0x50 + + LR 0x54 # SVC Link register (we need to restore it) + + LR 0x58 # pushed by srsfd + CPSR 0x5c + + */ + + +GCC_ASM_EXPORT(ExceptionHandlersStart) +GCC_ASM_EXPORT(ExceptionHandlersEnd) +GCC_ASM_EXPORT(CommonExceptionEntry) +GCC_ASM_EXPORT(AsmCommonExceptionEntry) +GCC_ASM_EXPORT(CommonCExceptionHandler) + +.text +.syntax unified +#if !defined(__APPLE__) +.fpu neon @ makes vpush/vpop assemble +#endif +.align 5 + + +// +// This code gets copied to the ARM vector table +// ExceptionHandlersStart - ExceptionHandlersEnd gets copied +// +ASM_PFX(ExceptionHandlersStart): + +ASM_PFX(Reset): + b ASM_PFX(ResetEntry) + +ASM_PFX(UndefinedInstruction): + b ASM_PFX(UndefinedInstructionEntry) + +ASM_PFX(SoftwareInterrupt): + b ASM_PFX(SoftwareInterruptEntry) + +ASM_PFX(PrefetchAbort): + b ASM_PFX(PrefetchAbortEntry) + +ASM_PFX(DataAbort): + b ASM_PFX(DataAbortEntry) + +ASM_PFX(ReservedException): + b ASM_PFX(ReservedExceptionEntry) + +ASM_PFX(Irq): + b ASM_PFX(IrqEntry) + +ASM_PFX(Fiq): + b ASM_PFX(FiqEntry) + +ASM_PFX(ResetEntry): + srsdb #0x13! @ Store return state on SVC stack + @ We are already in SVC mode + + stmfd SP!,{LR} @ Store the link register for the current mode + sub SP,SP,#0x20 @ Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} @ Store the register state + + mov R0,#0 @ ExceptionType + ldr R1,ASM_PFX(CommonExceptionEntry) + bx R1 + +ASM_PFX(UndefinedInstructionEntry): + sub LR, LR, #4 @ Only -2 for Thumb, adjust in CommonExceptionEntry + srsdb #0x13! @ Store return state on SVC stack + cps #0x13 @ Switch to SVC for common stack + stmfd SP!,{LR} @ Store the link register for the current mode + sub SP,SP,#0x20 @ Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} @ Store the register state + + mov R0,#1 @ ExceptionType + ldr R1,ASM_PFX(CommonExceptionEntry) + bx R1 + +ASM_PFX(SoftwareInterruptEntry): + srsdb #0x13! @ Store return state on SVC stack + @ We are already in SVC mode + stmfd SP!,{LR} @ Store the link register for the current mode + sub SP,SP,#0x20 @ Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} @ Store the register state + + mov R0,#2 @ ExceptionType + ldr R1,ASM_PFX(CommonExceptionEntry) + bx R1 + +ASM_PFX(PrefetchAbortEntry): + sub LR,LR,#4 + srsdb #0x13! @ Store return state on SVC stack + cps #0x13 @ Switch to SVC for common stack + stmfd SP!,{LR} @ Store the link register for the current mode + sub SP,SP,#0x20 @ Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} @ Store the register state + + mov R0,#3 @ ExceptionType + ldr R1,ASM_PFX(CommonExceptionEntry) + bx R1 + +ASM_PFX(DataAbortEntry): + sub LR,LR,#8 + srsdb #0x13! @ Store return state on SVC stack + cps #0x13 @ Switch to SVC for common stack + stmfd SP!,{LR} @ Store the link register for the current mode + sub SP,SP,#0x20 @ Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} @ Store the register state + + mov R0,#4 + ldr R1,ASM_PFX(CommonExceptionEntry) + bx R1 + +ASM_PFX(ReservedExceptionEntry): + srsdb #0x13! @ Store return state on SVC stack + cps #0x13 @ Switch to SVC for common stack + stmfd SP!,{LR} @ Store the link register for the current mode + sub SP,SP,#0x20 @ Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} @ Store the register state + + mov R0,#5 + ldr R1,ASM_PFX(CommonExceptionEntry) + bx R1 + +ASM_PFX(IrqEntry): + sub LR,LR,#4 + srsdb #0x13! @ Store return state on SVC stack + cps #0x13 @ Switch to SVC for common stack + stmfd SP!,{LR} @ Store the link register for the current mode + sub SP,SP,#0x20 @ Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} @ Store the register state + + mov R0,#6 @ ExceptionType + ldr R1,ASM_PFX(CommonExceptionEntry) + bx R1 + +ASM_PFX(FiqEntry): + sub LR,LR,#4 + srsdb #0x13! @ Store return state on SVC stack + cps #0x13 @ Switch to SVC for common stack + stmfd SP!,{LR} @ Store the link register for the current mode + sub SP,SP,#0x20 @ Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} @ Store the register state + @ Since we have already switch to SVC R8_fiq - R12_fiq + @ never get used or saved + mov R0,#7 @ ExceptionType + ldr R1,ASM_PFX(CommonExceptionEntry) + bx R1 + +// +// This gets patched by the C code that patches in the vector table +// +ASM_PFX(CommonExceptionEntry): + .word ASM_PFX(AsmCommonExceptionEntry) + +ASM_PFX(ExceptionHandlersEnd): + +// +// This code runs from CpuDxe driver loaded address. It is patched into +// CommonExceptionEntry. +// +ASM_PFX(AsmCommonExceptionEntry): + mrc p15, 0, R1, c6, c0, 2 @ Read IFAR + str R1, [SP, #0x50] @ Store it in EFI_SYSTEM_CONTEXT_ARM.IFAR + + mrc p15, 0, R1, c5, c0, 1 @ Read IFSR + str R1, [SP, #0x4c] @ Store it in EFI_SYSTEM_CONTEXT_ARM.IFSR + + mrc p15, 0, R1, c6, c0, 0 @ Read DFAR + str R1, [SP, #0x48] @ Store it in EFI_SYSTEM_CONTEXT_ARM.DFAR + + mrc p15, 0, R1, c5, c0, 0 @ Read DFSR + str R1, [SP, #0x44] @ Store it in EFI_SYSTEM_CONTEXT_ARM.DFSR + + ldr R1, [SP, #0x5c] @ srsdb saved pre-exception CPSR on the stack + str R1, [SP, #0x40] @ Store it in EFI_SYSTEM_CONTEXT_ARM.CPSR + + add R2, SP, #0x38 @ Make R2 point to EFI_SYSTEM_CONTEXT_ARM.LR + and R3, R1, #0x1f @ Check CPSR to see if User or System Mode + cmp R3, #0x1f @ if ((CPSR == 0x10) || (CPSR == 0x1f)) + cmpne R3, #0x10 @ + stmdaeq R2, {lr}^ @ save unbanked lr + @ else + stmdane R2, {lr} @ save SVC lr + + + ldr R5, [SP, #0x58] @ PC is the LR pushed by srsfd + @ Check to see if we have to adjust for Thumb entry + sub r4, r0, #1 @ if (ExceptionType == 1 || ExceptionType == 2)) { + cmp r4, #1 @ // UND & SVC have differnt LR adjust for Thumb + bhi NoAdjustNeeded + + tst r1, #0x20 @ if ((CPSR & T)) == T) { // Thumb Mode on entry + addne R5, R5, #2 @ PC += 2; + strne R5,[SP,#0x58] @ Update LR value pushed by srsfd + +NoAdjustNeeded: + + str R5, [SP, #0x3c] @ Store it in EFI_SYSTEM_CONTEXT_ARM.PC + + add R1, SP, #0x60 @ We pushed 0x60 bytes on the stack + str R1, [SP, #0x34] @ Store it in EFI_SYSTEM_CONTEXT_ARM.SP + + @ R0 is ExceptionType + mov R1,SP @ R1 is SystemContext + +#if (FixedPcdGet32(PcdVFPEnabled)) + vpush {d0-d15} @ save vstm registers in case they are used in optimizations +#endif + + mov R4, SP @ Save current SP + tst R4, #4 + subne SP, SP, #4 @ Adjust SP if not 8-byte aligned + +/* +VOID +EFIAPI +CommonCExceptionHandler ( + IN EFI_EXCEPTION_TYPE ExceptionType, R0 + IN OUT EFI_SYSTEM_CONTEXT SystemContext R1 + ) + +*/ + blx ASM_PFX(CommonCExceptionHandler) @ Call exception handler + + mov SP, R4 @ Restore SP + +#if (FixedPcdGet32(PcdVFPEnabled)) + vpop {d0-d15} +#endif + + ldr R1, [SP, #0x4c] @ Restore EFI_SYSTEM_CONTEXT_ARM.IFSR + mcr p15, 0, R1, c5, c0, 1 @ Write IFSR + + ldr R1, [SP, #0x44] @ Restore EFI_SYSTEM_CONTEXT_ARM.DFSR + mcr p15, 0, R1, c5, c0, 0 @ Write DFSR + + ldr R1,[SP,#0x3c] @ EFI_SYSTEM_CONTEXT_ARM.PC + str R1,[SP,#0x58] @ Store it back to srsfd stack slot so it can be restored + + ldr R1,[SP,#0x40] @ EFI_SYSTEM_CONTEXT_ARM.CPSR + str R1,[SP,#0x5c] @ Store it back to srsfd stack slot so it can be restored + + add R3, SP, #0x54 @ Make R3 point to SVC LR saved on entry + add R2, SP, #0x38 @ Make R2 point to EFI_SYSTEM_CONTEXT_ARM.LR + and R1, R1, #0x1f @ Check to see if User or System Mode + cmp R1, #0x1f @ if ((CPSR == 0x10) || (CPSR == 0x1f)) + cmpne R1, #0x10 @ + ldmibeq R2, {lr}^ @ restore unbanked lr + @ else + ldmibne R3, {lr} @ restore SVC lr, via ldmfd SP!, {LR} + + ldmfd SP!,{R0-R12} @ Restore general purpose registers + @ Exception handler can not change SP + + add SP,SP,#0x20 @ Clear out the remaining stack space + ldmfd SP!,{LR} @ restore the link register for this context + rfefd SP! @ return from exception via srsfd stack slot + diff --git a/ArmPkg/Library/ArmExceptionLib/Arm/ExceptionSupport.asm b/ArmPkg/Library/ArmExceptionLib/Arm/ExceptionSupport.asm new file mode 100644 index 0000000000..848a54babf --- /dev/null +++ b/ArmPkg/Library/ArmExceptionLib/Arm/ExceptionSupport.asm @@ -0,0 +1,302 @@ +//------------------------------------------------------------------------------ +// +// Use ARMv6 instruction to operate on a single stack +// +// Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.
+// Copyright (c) 2014, ARM Limited. All rights reserved.
+// Copyright (c) 2016 HP Development Company, L.P.
+// +// This program and the accompanying materials +// are licensed and made available under the terms and conditions of the BSD License +// which accompanies this distribution. The full text of the license may be found at +// http://opensource.org/licenses/bsd-license.php +// +// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +// +//------------------------------------------------------------------------------ + +#include + +/* + +This is the stack constructed by the exception handler (low address to high address) + # R0 - IFAR is EFI_SYSTEM_CONTEXT for ARM + Reg Offset + === ====== + R0 0x00 # stmfd SP!,{R0-R12} + R1 0x04 + R2 0x08 + R3 0x0c + R4 0x10 + R5 0x14 + R6 0x18 + R7 0x1c + R8 0x20 + R9 0x24 + R10 0x28 + R11 0x2c + R12 0x30 + SP 0x34 # reserved via subtraction 0x20 (32) from SP + LR 0x38 + PC 0x3c + CPSR 0x40 + DFSR 0x44 + DFAR 0x48 + IFSR 0x4c + IFAR 0x50 + + LR 0x54 # SVC Link register (we need to restore it) + + LR 0x58 # pushed by srsfd + CPSR 0x5c + + */ + + + EXPORT ExceptionHandlersStart + EXPORT ExceptionHandlersEnd + EXPORT CommonExceptionEntry + EXPORT AsmCommonExceptionEntry + IMPORT CommonCExceptionHandler + + PRESERVE8 + AREA DxeExceptionHandlers, CODE, READONLY, CODEALIGN, ALIGN=5 + +// +// This code gets copied to the ARM vector table +// ExceptionHandlersStart - ExceptionHandlersEnd gets copied +// +ExceptionHandlersStart + +Reset + b ResetEntry + +UndefinedInstruction + b UndefinedInstructionEntry + +SoftwareInterrupt + b SoftwareInterruptEntry + +PrefetchAbort + b PrefetchAbortEntry + +DataAbort + b DataAbortEntry + +ReservedException + b ReservedExceptionEntry + +Irq + b IrqEntry + +Fiq + b FiqEntry + +ResetEntry + srsfd #0x13! ; Store return state on SVC stack + ; We are already in SVC mode + stmfd SP!,{LR} ; Store the link register for the current mode + sub SP,SP,#0x20 ; Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} ; Store the register state + + mov R0,#0 ; ExceptionType + ldr R1,CommonExceptionEntry + bx R1 + +UndefinedInstructionEntry + sub LR, LR, #4 ; Only -2 for Thumb, adjust in CommonExceptionEntry + srsfd #0x13! ; Store return state on SVC stack + cps #0x13 ; Switch to SVC for common stack + stmfd SP!,{LR} ; Store the link register for the current mode + sub SP,SP,#0x20 ; Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} ; Store the register state + + mov R0,#1 ; ExceptionType + ldr R1,CommonExceptionEntry; + bx R1 + +SoftwareInterruptEntry + srsfd #0x13! ; Store return state on SVC stack + ; We are already in SVC mode + stmfd SP!,{LR} ; Store the link register for the current mode + sub SP,SP,#0x20 ; Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} ; Store the register state + + mov R0,#2 ; ExceptionType + ldr R1,CommonExceptionEntry + bx R1 + +PrefetchAbortEntry + sub LR,LR,#4 + srsfd #0x13! ; Store return state on SVC stack + cps #0x13 ; Switch to SVC for common stack + stmfd SP!,{LR} ; Store the link register for the current mode + sub SP,SP,#0x20 ; Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} ; Store the register state + + mov R0,#3 ; ExceptionType + ldr R1,CommonExceptionEntry + bx R1 + +DataAbortEntry + sub LR,LR,#8 + srsfd #0x13! ; Store return state on SVC stack + cps #0x13 ; Switch to SVC for common stack + stmfd SP!,{LR} ; Store the link register for the current mode + sub SP,SP,#0x20 ; Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} ; Store the register state + + mov R0,#4 ; ExceptionType + ldr R1,CommonExceptionEntry + bx R1 + +ReservedExceptionEntry + srsfd #0x13! ; Store return state on SVC stack + cps #0x13 ; Switch to SVC for common stack + stmfd SP!,{LR} ; Store the link register for the current mode + sub SP,SP,#0x20 ; Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} ; Store the register state + + mov R0,#5 ; ExceptionType + ldr R1,CommonExceptionEntry + bx R1 + +IrqEntry + sub LR,LR,#4 + srsfd #0x13! ; Store return state on SVC stack + cps #0x13 ; Switch to SVC for common stack + stmfd SP!,{LR} ; Store the link register for the current mode + sub SP,SP,#0x20 ; Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} ; Store the register state + + mov R0,#6 ; ExceptionType + ldr R1,CommonExceptionEntry + bx R1 + +FiqEntry + sub LR,LR,#4 + srsfd #0x13! ; Store return state on SVC stack + cps #0x13 ; Switch to SVC for common stack + stmfd SP!,{LR} ; Store the link register for the current mode + sub SP,SP,#0x20 ; Save space for SP, LR, PC, IFAR - CPSR + stmfd SP!,{R0-R12} ; Store the register state + ; Since we have already switch to SVC R8_fiq - R12_fiq + ; never get used or saved + mov R0,#7 ; ExceptionType + ldr R1,CommonExceptionEntry + bx R1 + +// +// This gets patched by the C code that patches in the vector table +// +CommonExceptionEntry + dcd AsmCommonExceptionEntry + +ExceptionHandlersEnd + +// +// This code runs from CpuDxe driver loaded address. It is patched into +// CommonExceptionEntry. +// +AsmCommonExceptionEntry + mrc p15, 0, R1, c6, c0, 2 ; Read IFAR + str R1, [SP, #0x50] ; Store it in EFI_SYSTEM_CONTEXT_ARM.IFAR + + mrc p15, 0, R1, c5, c0, 1 ; Read IFSR + str R1, [SP, #0x4c] ; Store it in EFI_SYSTEM_CONTEXT_ARM.IFSR + + mrc p15, 0, R1, c6, c0, 0 ; Read DFAR + str R1, [SP, #0x48] ; Store it in EFI_SYSTEM_CONTEXT_ARM.DFAR + + mrc p15, 0, R1, c5, c0, 0 ; Read DFSR + str R1, [SP, #0x44] ; Store it in EFI_SYSTEM_CONTEXT_ARM.DFSR + + ldr R1, [SP, #0x5c] ; srsfd saved pre-exception CPSR on the stack + str R1, [SP, #0x40] ; Store it in EFI_SYSTEM_CONTEXT_ARM.CPSR + + add R2, SP, #0x38 ; Make R2 point to EFI_SYSTEM_CONTEXT_ARM.LR + and R3, R1, #0x1f ; Check CPSR to see if User or System Mode + cmp R3, #0x1f ; if ((CPSR == 0x10) || (CPSR == 0x1f)) + cmpne R3, #0x10 ; + stmeqed R2, {lr}^ ; save unbanked lr + ; else + stmneed R2, {lr} ; save SVC lr + + + ldr R5, [SP, #0x58] ; PC is the LR pushed by srsfd + ; Check to see if we have to adjust for Thumb entry + sub r4, r0, #1 ; if (ExceptionType == 1 || ExceptionType == 2)) { + cmp r4, #1 ; // UND & SVC have differnt LR adjust for Thumb + bhi NoAdjustNeeded + + tst r1, #0x20 ; if ((CPSR & T)) == T) { // Thumb Mode on entry + addne R5, R5, #2 ; PC += 2; + strne R5,[SP,#0x58] ; Update LR value pushed by srsfd + +NoAdjustNeeded + + str R5, [SP, #0x3c] ; Store it in EFI_SYSTEM_CONTEXT_ARM.PC + + add R1, SP, #0x60 ; We pushed 0x60 bytes on the stack + str R1, [SP, #0x34] ; Store it in EFI_SYSTEM_CONTEXT_ARM.SP + + ; R0 is ExceptionType + mov R1,SP ; R1 is SystemContext + +#if (FixedPcdGet32(PcdVFPEnabled)) + vpush {d0-d15} ; save vstm registers in case they are used in optimizations +#endif + + mov R4, SP ; Save current SP + tst R4, #4 + subne SP, SP, #4 ; Adjust SP if not 8-byte aligned + +/* +VOID +EFIAPI +CommonCExceptionHandler ( + IN EFI_EXCEPTION_TYPE ExceptionType, R0 + IN OUT EFI_SYSTEM_CONTEXT SystemContext R1 + ) + +*/ + blx CommonCExceptionHandler ; Call exception handler + + mov SP, R4 ; Restore SP + +#if (FixedPcdGet32(PcdVFPEnabled)) + vpop {d0-d15} +#endif + + ldr R1, [SP, #0x4c] ; Restore EFI_SYSTEM_CONTEXT_ARM.IFSR + mcr p15, 0, R1, c5, c0, 1 ; Write IFSR + + ldr R1, [SP, #0x44] ; Restore EFI_SYSTEM_CONTEXT_ARM.DFSR + mcr p15, 0, R1, c5, c0, 0 ; Write DFSR + + ldr R1,[SP,#0x3c] ; EFI_SYSTEM_CONTEXT_ARM.PC + str R1,[SP,#0x58] ; Store it back to srsfd stack slot so it can be restored + + ldr R1,[SP,#0x40] ; EFI_SYSTEM_CONTEXT_ARM.CPSR + str R1,[SP,#0x5c] ; Store it back to srsfd stack slot so it can be restored + + add R3, SP, #0x54 ; Make R3 point to SVC LR saved on entry + add R2, SP, #0x38 ; Make R2 point to EFI_SYSTEM_CONTEXT_ARM.LR + and R1, R1, #0x1f ; Check to see if User or System Mode + cmp R1, #0x1f ; if ((CPSR == 0x10) || (CPSR == 0x1f)) + cmpne R1, #0x10 ; + ldmeqed R2, {lr}^ ; restore unbanked lr + ; else + ldmneed R3, {lr} ; restore SVC lr, via ldmfd SP!, {LR} + + ldmfd SP!,{R0-R12} ; Restore general purpose registers + ; Exception handler can not change SP + + add SP,SP,#0x20 ; Clear out the remaining stack space + ldmfd SP!,{LR} ; restore the link register for this context + rfefd SP! ; return from exception via srsfd stack slot + + END + + diff --git a/ArmPkg/Library/ArmExceptionLib/ArmExceptionLib.c b/ArmPkg/Library/ArmExceptionLib/ArmExceptionLib.c new file mode 100644 index 0000000000..5977a3e8fa --- /dev/null +++ b/ArmPkg/Library/ArmExceptionLib/ArmExceptionLib.c @@ -0,0 +1,320 @@ +/* @file +* Main file supporting the SEC Phase for Versatile Express +* +* Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.
+* Copyright (c) 2011-2014, ARM Limited. All rights reserved. +* Copyright (c) 2016 HP Development Company, L.P. +* +* This program and the accompanying materials +* are licensed and made available under the terms and conditions of the BSD License +* which accompanies this distribution. The full text of the license may be found at +* http://opensource.org/licenses/bsd-license.php +* +* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +* +**/ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +RETURN_STATUS +CopyExceptionHandlers( + IN PHYSICAL_ADDRESS BaseAddress + ); + +EFI_STATUS +EFIAPI +RegisterExceptionHandler( + IN EFI_EXCEPTION_TYPE ExceptionType, + IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler + ); + +VOID +ExceptionHandlersStart( + VOID + ); + +VOID +ExceptionHandlersEnd( + VOID + ); + +RETURN_STATUS ArchVectorConfig( + IN UINTN VectorBaseAddress + ); + +// these globals are provided by the architecture specific source (Arm or AArch64) +extern UINTN gMaxExceptionNumber; +extern EFI_EXCEPTION_CALLBACK gExceptionHandlers[]; +extern EFI_EXCEPTION_CALLBACK gDebuggerExceptionHandlers[]; +extern PHYSICAL_ADDRESS gExceptionVectorAlignmentMask; +extern UINTN gDebuggerNoHandlerValue; + +// A compiler flag adjusts the compilation of this library to a variant where +// the vectors are relocated (copied) to another location versus using the +// vectors in-place. Since this effects an assembly .align directive we must +// address this at library build time. Since this affects the build of the +// library we cannot represent this in a PCD since PCDs are evaluated on +// a per-module basis. +#if defined(ARM_RELOCATE_VECTORS) +BOOLEAN gArmRelocateVectorTable = TRUE; +#else +BOOLEAN gArmRelocateVectorTable = FALSE; +#endif + + +/** +Initializes all CPU exceptions entries and provides the default exception handlers. + +Caller should try to get an array of interrupt and/or exception vectors that are in use and need to +persist by EFI_VECTOR_HANDOFF_INFO defined in PI 1.3 specification. +If caller cannot get reserved vector list or it does not exists, set VectorInfo to NULL. +If VectorInfo is not NULL, the exception vectors will be initialized per vector attribute accordingly. + +@param[in] VectorInfo Pointer to reserved vector list. + +@retval EFI_SUCCESS CPU Exception Entries have been successfully initialized +with default exception handlers. +@retval EFI_INVALID_PARAMETER VectorInfo includes the invalid content if VectorInfo is not NULL. +@retval EFI_UNSUPPORTED This function is not supported. + +**/ +EFI_STATUS +EFIAPI +InitializeCpuExceptionHandlers( + IN EFI_VECTOR_HANDOFF_INFO *VectorInfo OPTIONAL + ) +{ + RETURN_STATUS Status; + UINTN VectorBase; + + Status = EFI_SUCCESS; + + // if we are requested to copy exceptin handlers to another location + if (gArmRelocateVectorTable) { + + VectorBase = PcdGet32(PcdCpuVectorBaseAddress); + Status = CopyExceptionHandlers(VectorBase); + + } + else { // use VBAR to point to where our exception handlers are + + // The vector table must be aligned for the architecture. If this + // assertion fails ensure the appropriate FFS alignment is in effect, + // which can be accomplished by ensuring the proper Align=X statement + // in the platform packaging rules. For ARM Align=32 is required and + // for AArch64 Align=4K is required. Align=Auto can be used but this + // is known to cause an issue with populating the reset vector area + // for encapsulated FVs. + ASSERT(((UINTN)ExceptionHandlersStart & gExceptionVectorAlignmentMask) == 0); + + // We do not copy the Exception Table at PcdGet32(PcdCpuVectorBaseAddress). We just set Vector + // Base Address to point into CpuDxe code. + VectorBase = (UINTN)ExceptionHandlersStart; + + Status = RETURN_SUCCESS; + } + + if (!RETURN_ERROR(Status)) { + // call the architecture-specific routine to prepare for the new vector + // configuration to take effect + ArchVectorConfig(VectorBase); + + ArmWriteVBar(VectorBase); + } + + return RETURN_SUCCESS; +} + +/** +Copies exception handlers to the speciifed address. + +Caller should try to get an array of interrupt and/or exception vectors that are in use and need to +persist by EFI_VECTOR_HANDOFF_INFO defined in PI 1.3 specification. +If caller cannot get reserved vector list or it does not exists, set VectorInfo to NULL. +If VectorInfo is not NULL, the exception vectors will be initialized per vector attribute accordingly. + +@param[in] VectorInfo Pointer to reserved vector list. + +@retval EFI_SUCCESS CPU Exception Entries have been successfully initialized +with default exception handlers. +@retval EFI_INVALID_PARAMETER VectorInfo includes the invalid content if VectorInfo is not NULL. +@retval EFI_UNSUPPORTED This function is not supported. + +**/ +RETURN_STATUS +CopyExceptionHandlers( + IN PHYSICAL_ADDRESS BaseAddress + ) +{ + RETURN_STATUS Status; + UINTN Length; + UINTN Index; + UINT32 *VectorBase; + + // ensure that the destination value specifies an address meeting the vector alignment requirements + ASSERT ((BaseAddress & gExceptionVectorAlignmentMask) == 0); + + // + // Copy an implementation of the exception vectors to PcdCpuVectorBaseAddress. + // + Length = (UINTN)ExceptionHandlersEnd - (UINTN)ExceptionHandlersStart; + + VectorBase = (UINT32 *)(UINTN)BaseAddress; + + if (FeaturePcdGet(PcdDebuggerExceptionSupport) == TRUE) { + // Save existing vector table, in case debugger is already hooked in + CopyMem((VOID *)gDebuggerExceptionHandlers, (VOID *)VectorBase, sizeof (EFI_EXCEPTION_CALLBACK)* (gMaxExceptionNumber+1)); + } + + // Copy our assembly code into the page that contains the exception vectors. + CopyMem((VOID *)VectorBase, (VOID *)ExceptionHandlersStart, Length); + + // + // Initialize the C entry points for interrupts + // + for (Index = 0; Index <= gMaxExceptionNumber; Index++) { + if (!FeaturePcdGet(PcdDebuggerExceptionSupport) || + (gDebuggerExceptionHandlers[Index] == 0) || (gDebuggerExceptionHandlers[Index] == (VOID *)gDebuggerNoHandlerValue)) { + + Status = RegisterExceptionHandler(Index, NULL); + ASSERT_EFI_ERROR(Status); + } + else { + // If the debugger has already hooked put its vector back + VectorBase[Index] = (UINT32)(UINTN)gDebuggerExceptionHandlers[Index]; + } + } + + // Flush Caches since we updated executable stuff + InvalidateInstructionCacheRange((VOID *)(UINTN)BaseAddress, Length); + + return RETURN_SUCCESS; +} + + +/** +Initializes all CPU interrupt/exceptions entries and provides the default interrupt/exception handlers. + +Caller should try to get an array of interrupt and/or exception vectors that are in use and need to +persist by EFI_VECTOR_HANDOFF_INFO defined in PI 1.3 specification. +If caller cannot get reserved vector list or it does not exists, set VectorInfo to NULL. +If VectorInfo is not NULL, the exception vectors will be initialized per vector attribute accordingly. + +@param[in] VectorInfo Pointer to reserved vector list. + +@retval EFI_SUCCESS All CPU interrupt/exception entries have been successfully initialized +with default interrupt/exception handlers. +@retval EFI_INVALID_PARAMETER VectorInfo includes the invalid content if VectorInfo is not NULL. +@retval EFI_UNSUPPORTED This function is not supported. + +**/ +EFI_STATUS +EFIAPI +InitializeCpuInterruptHandlers( +IN EFI_VECTOR_HANDOFF_INFO *VectorInfo OPTIONAL +) +{ + // not needed, this is what the CPU driver is for + return EFI_UNSUPPORTED; +} + +/** +Registers a function to be called from the processor exception handler. (On ARM/AArch64 this only +provides exception handlers, not interrupt handling which is provided through the Hardware Interrupt +Protocol.) + +This function registers and enables the handler specified by ExceptionHandler for a processor +interrupt or exception type specified by ExceptionType. If ExceptionHandler is NULL, then the +handler for the processor interrupt or exception type specified by ExceptionType is uninstalled. +The installed handler is called once for each processor interrupt or exception. +NOTE: This function should be invoked after InitializeCpuExceptionHandlers() or +InitializeCpuInterruptHandlers() invoked, otherwise EFI_UNSUPPORTED returned. + +@param[in] ExceptionType Defines which interrupt or exception to hook. +@param[in] ExceptionHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER that is called +when a processor interrupt occurs. If this parameter is NULL, then the handler +will be uninstalled. + +@retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled. +@retval EFI_ALREADY_STARTED ExceptionHandler is not NULL, and a handler for ExceptionType was +previously installed. +@retval EFI_INVALID_PARAMETER ExceptionHandler is NULL, and a handler for ExceptionType was not +previously installed. +@retval EFI_UNSUPPORTED The interrupt specified by ExceptionType is not supported, +or this function is not supported. +**/ +RETURN_STATUS +RegisterCpuInterruptHandler( + IN EFI_EXCEPTION_TYPE ExceptionType, + IN EFI_CPU_INTERRUPT_HANDLER ExceptionHandler + ) { + if (ExceptionType > gMaxExceptionNumber) { + return RETURN_UNSUPPORTED; + } + + if ((ExceptionHandler != NULL) && (gExceptionHandlers[ExceptionType] != NULL)) { + return RETURN_ALREADY_STARTED; + } + + gExceptionHandlers[ExceptionType] = ExceptionHandler; + + return RETURN_SUCCESS; +} + +/** +Register exception handler. + +@param This A pointer to the SMM_CPU_SERVICE_PROTOCOL instance. +@param ExceptionType Defines which interrupt or exception to hook. Type EFI_EXCEPTION_TYPE and +the valid values for this parameter are defined in EFI_DEBUG_SUPPORT_PROTOCOL +of the UEFI 2.0 specification. +@param InterruptHandler A pointer to a function of type EFI_CPU_INTERRUPT_HANDLER +that is called when a processor interrupt occurs. +If this parameter is NULL, then the handler will be uninstalled. + +@retval EFI_SUCCESS The handler for the processor interrupt was successfully installed or uninstalled. +@retval EFI_ALREADY_STARTED InterruptHandler is not NULL, and a handler for InterruptType was previously installed. +@retval EFI_INVALID_PARAMETER InterruptHandler is NULL, and a handler for InterruptType was not previously installed. +@retval EFI_UNSUPPORTED The interrupt specified by InterruptType is not supported. + +**/ +EFI_STATUS +EFIAPI +RegisterExceptionHandler( + IN EFI_EXCEPTION_TYPE ExceptionType, + IN EFI_CPU_INTERRUPT_HANDLER InterruptHandler + ) +{ + return RegisterCpuInterruptHandler(ExceptionType, InterruptHandler); +} + +VOID +EFIAPI +CommonCExceptionHandler( + IN EFI_EXCEPTION_TYPE ExceptionType, + IN OUT EFI_SYSTEM_CONTEXT SystemContext + ) +{ + if (ExceptionType <= gMaxExceptionNumber) { + if (gExceptionHandlers[ExceptionType]) { + gExceptionHandlers[ExceptionType](ExceptionType, SystemContext); + return; + } + } + else { + DEBUG((EFI_D_ERROR, "Unknown exception type %d\n", ExceptionType)); + ASSERT(FALSE); + } + + DefaultExceptionHandler(ExceptionType, SystemContext); +} diff --git a/ArmPkg/Library/ArmExceptionLib/ArmExceptionLib.inf b/ArmPkg/Library/ArmExceptionLib/ArmExceptionLib.inf new file mode 100644 index 0000000000..10d9ae0f4a --- /dev/null +++ b/ArmPkg/Library/ArmExceptionLib/ArmExceptionLib.inf @@ -0,0 +1,63 @@ +## @file +# Instance of CpuExceptionHandlerLib Library for ARM/AArch64 architectures +# +# This library instance is used for modules that will implement exception +# handlers in-place (by programming VBAR). The exception handlers will be +# generated with alignment as required by the processor architecture. The +# alignment must be propagated into the parent FFS/FV through FDF build rules +# for the relevant module types (i.e. Align=Auto). +# +# Note that using this library instance can cause growth to the size of the FV +# due to the padding added by the build tools to meet the vector alignment +# requirements and may not be desirable for space-sensitive FVs (uncompressed / +# XIP components). The alternative library instance, ArmRelocateExceptionLib +# should be considered for these applications. +# +# Copyright (c) 2011-2012, ARM Limited. All rights reserved. +# Copyright (c) 2016 HP Development Company, L.P. +# +# This program and the accompanying materials +# are licensed and made available under the terms and conditions of the BSD License +# which accompanies this distribution. The full text of the license may be found at +# http://opensource.org/licenses/bsd-license.php +# +# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +# +# + +[Defines] + INF_VERSION = 0x00010005 + BASE_NAME = ArmExceptionLib + FILE_GUID = A9796991-4E88-47F0-87C5-D96A1D270539 + MODULE_TYPE = BASE + VERSION_STRING = 1.0 + LIBRARY_CLASS = CpuExceptionHandlerLib + +[Sources.common] + ArmExceptionLib.c + +[Sources.Arm] + Arm/ArmException.c + Arm/ExceptionSupport.asm | RVCT + Arm/ExceptionSupport.S | GCC + +[Sources.AARCH64] + AArch64/AArch64Exception.c + AArch64/ExceptionSupport.S + +[Packages] + MdePkg/MdePkg.dec + MdeModulePkg/MdeModulePkg.dec + ArmPkg/ArmPkg.dec + +[LibraryClasses] + ArmLib + DebugLib + DefaultExceptionHandlerLib + BaseMemoryLib + CacheMaintenanceLib + +[Pcd] + gArmTokenSpaceGuid.PcdDebuggerExceptionSupport + gArmTokenSpaceGuid.PcdCpuVectorBaseAddress diff --git a/ArmPkg/Library/ArmExceptionLib/ArmRelocateExceptionLib.inf b/ArmPkg/Library/ArmExceptionLib/ArmRelocateExceptionLib.inf new file mode 100644 index 0000000000..340812be25 --- /dev/null +++ b/ArmPkg/Library/ArmExceptionLib/ArmRelocateExceptionLib.inf @@ -0,0 +1,65 @@ +## @file +# Instance of CpuExceptionHandlerLib Library for ARM/AArch64 architectures +# +# This library instance is used when exception vectors must be relocated to +# a specific address. The address is specified by PcdCpuVectorBaseAddress. +# Since the alignment requirement for in-place exception handlers causes +# image size to increase, this instance is useful for modules that need to +# minimize space used in their FV (like XIP modules). See ArmExceptionLib.inf +# for the in-place exception handler alternative. +# +# Copyright (c) 2011-2012, ARM Limited. All rights reserved. +# Copyright (c) 2016 HP Development Company, L.P. +# +# This program and the accompanying materials +# are licensed and made available under the terms and conditions of the BSD License +# which accompanies this distribution. The full text of the license may be found at +# http://opensource.org/licenses/bsd-license.php +# +# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. +# +# + +[Defines] + INF_VERSION = 0x00010005 + BASE_NAME = ArmRelocateExceptionLib + FILE_GUID = 62AA447A-1FBA-429E-9E0D-CE0D2D8DCF58 + MODULE_TYPE = BASE + VERSION_STRING = 1.0 + LIBRARY_CLASS = CpuExceptionHandlerLib + +[Sources.common] + ArmExceptionLib.c + +[Sources.Arm] + Arm/ArmException.c + Arm/ExceptionSupport.asm | RVCT + Arm/ExceptionSupport.S | GCC + +[Sources.AARCH64] + AArch64/AArch64Exception.c + AArch64/ExceptionSupport.S + +[Packages] + MdePkg/MdePkg.dec + MdeModulePkg/MdeModulePkg.dec + ArmPkg/ArmPkg.dec + +[LibraryClasses] + ArmLib + DebugLib + DefaultExceptionHandlerLib + BaseMemoryLib + CacheMaintenanceLib + +[Pcd] + gArmTokenSpaceGuid.PcdDebuggerExceptionSupport + gArmTokenSpaceGuid.PcdCpuVectorBaseAddress + +[BuildOptions] + # We must pass a define to specify that we are relocating vectors so the + # vector alignment is relaxed (space savings); note that this must be done + # as a define and not a PCD since it affects assembly directives. + *_*_*_PP_FLAGS = -DARM_RELOCATE_VECTORS + *_*_*_CC_FLAGS = -DARM_RELOCATE_VECTORS