ArmPkg: Added Aarch64 support

Contributed-under: TianoCore Contribution Agreement 1.0
Signed-off-by: Harry Liebel <Harry.Liebel@arm.com>
Signed-off-by: Olivier Martin <olivier.martin@arm.com>



git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@14486 6f19259b-4bc3-4df7-8a09-765794883524
This commit is contained in:
Harry Liebel
2013-07-18 18:07:46 +00:00
committed by oliviermartin
parent 8477cb6e15
commit 25402f5d06
58 changed files with 4786 additions and 43 deletions

View File

@@ -19,7 +19,7 @@
#include <Library/TimerLib.h>
#include <Library/DebugLib.h>
#include <Library/PcdLib.h>
#include <Library/ArmV7ArchTimerLib.h>
#include <Library/ArmArchTimerLib.h>
#include <Chipset/ArmV7.h>
#define TICKS_PER_MICRO_SEC (PcdGet32 (PcdArmArchTimerFreqInHz)/1000000U)
@@ -43,15 +43,18 @@ TimerConstructor (
// manual lower bound of the frequency is in the range of 1-10MHz
ASSERT (TICKS_PER_MICRO_SEC);
#ifdef MDE_CPU_ARM
// Only set the frequency for ARMv7. We expect the secure firmware to have already do it
// If the security extensions are not implemented set Timer Frequency
if ((ArmReadIdPfr1 () & 0xF0) == 0x0) {
ArmArchTimerSetTimerFreq (PcdGet32 (PcdArmArchTimerFreqInHz));
}
#endif
// Architectural Timer Frequency must be set in the Secure privileged(if secure extensions are supported) mode.
// If the reset value (0) is returned just ASSERT.
TimerFreq = ArmArchTimerGetTimerFreq ();
ASSERT (TimerFreq);
ASSERT (TimerFreq != 0);
} else {
DEBUG ((EFI_D_ERROR, "ARM Architectural Timer is not available in the CPU, hence this library can not be used.\n"));

View File

@@ -0,0 +1,275 @@
/** @file
*
* Copyright (c) 2011-2013, ARM Limited. All rights reserved.
*
* This program and the accompanying materials
* are licensed and made available under the terms and conditions of the BSD License
* which accompanies this distribution. The full text of the license may be found at
* http://opensource.org/licenses/bsd-license.php
*
* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
*
**/
#include <Uefi.h>
#include <Chipset/AArch64.h>
#include <Library/BaseMemoryLib.h>
#include <Library/MemoryAllocationLib.h>
#include <Library/ArmLib.h>
#include <Library/BaseLib.h>
#include <Library/DebugLib.h>
#include "AArch64Lib.h"
#include "ArmLibPrivate.h"
#include <Library/ArmArchTimerLib.h>
VOID
EFIAPI
ArmArchTimerReadReg (
IN ARM_ARCH_TIMER_REGS Reg,
OUT VOID *DstBuf
)
{
// Check if the Generic/Architecture timer is implemented
if (ArmIsArchTimerImplemented ()) {
switch (Reg) {
case CntFrq:
*((UINTN *)DstBuf) = ArmReadCntFrq ();
break;
case CntPct:
*((UINT64 *)DstBuf) = ArmReadCntPct ();
break;
case CntkCtl:
*((UINTN *)DstBuf) = ArmReadCntkCtl();
break;
case CntpTval:
*((UINTN *)DstBuf) = ArmReadCntpTval ();
break;
case CntpCtl:
*((UINTN *)DstBuf) = ArmReadCntpCtl ();
break;
case CntvTval:
*((UINTN *)DstBuf) = ArmReadCntvTval ();
break;
case CntvCtl:
*((UINTN *)DstBuf) = ArmReadCntvCtl ();
break;
case CntvCt:
*((UINT64 *)DstBuf) = ArmReadCntvCt ();
break;
case CntpCval:
*((UINT64 *)DstBuf) = ArmReadCntpCval ();
break;
case CntvCval:
*((UINT64 *)DstBuf) = ArmReadCntvCval ();
break;
case CntvOff:
*((UINT64 *)DstBuf) = ArmReadCntvOff ();
break;
case CnthCtl:
case CnthpTval:
case CnthpCtl:
case CnthpCval:
DEBUG ((EFI_D_ERROR, "The register is related to Hypervisor Mode. Can't perform requested operation\n "));
break;
default:
DEBUG ((EFI_D_ERROR, "Unknown ARM Generic Timer register %x. \n ", Reg));
}
} else {
DEBUG ((EFI_D_ERROR, "Attempt to read ARM Generic Timer registers. But ARM Generic Timer extension is not implemented \n "));
ASSERT (0);
}
}
VOID
EFIAPI
ArmArchTimerWriteReg (
IN ARM_ARCH_TIMER_REGS Reg,
IN VOID *SrcBuf
)
{
// Check if the Generic/Architecture timer is implemented
if (ArmIsArchTimerImplemented ()) {
switch (Reg) {
case CntFrq:
ArmWriteCntFrq (*((UINTN *)SrcBuf));
break;
case CntPct:
DEBUG ((EFI_D_ERROR, "Can't write to Read Only Register: CNTPCT \n"));
break;
case CntkCtl:
ArmWriteCntkCtl (*((UINTN *)SrcBuf));
break;
case CntpTval:
ArmWriteCntpTval (*((UINTN *)SrcBuf));
break;
case CntpCtl:
ArmWriteCntpCtl (*((UINTN *)SrcBuf));
break;
case CntvTval:
ArmWriteCntvTval (*((UINTN *)SrcBuf));
break;
case CntvCtl:
ArmWriteCntvCtl (*((UINTN *)SrcBuf));
break;
case CntvCt:
DEBUG ((EFI_D_ERROR, "Can't write to Read Only Register: CNTVCT \n"));
break;
case CntpCval:
ArmWriteCntpCval (*((UINT64 *)SrcBuf) );
break;
case CntvCval:
ArmWriteCntvCval (*((UINT64 *)SrcBuf) );
break;
case CntvOff:
ArmWriteCntvOff (*((UINT64 *)SrcBuf));
break;
case CnthCtl:
case CnthpTval:
case CnthpCtl:
case CnthpCval:
DEBUG ((EFI_D_ERROR, "The register is related to Hypervisor Mode. Can't perform requested operation\n "));
break;
default:
DEBUG ((EFI_D_ERROR, "Unknown ARM Generic Timer register %x. \n ", Reg));
}
} else {
DEBUG ((EFI_D_ERROR, "Attempt to write to ARM Generic Timer registers. But ARM Generic Timer extension is not implemented \n "));
ASSERT (0);
}
}
VOID
EFIAPI
ArmArchTimerEnableTimer (
VOID
)
{
UINTN TimerCtrlReg;
ArmArchTimerReadReg (CntpCtl, (VOID *)&TimerCtrlReg);
TimerCtrlReg |= ARM_ARCH_TIMER_ENABLE;
ArmArchTimerWriteReg (CntpCtl, (VOID *)&TimerCtrlReg);
}
VOID
EFIAPI
ArmArchTimerDisableTimer (
VOID
)
{
UINTN TimerCtrlReg;
ArmArchTimerReadReg (CntpCtl, (VOID *)&TimerCtrlReg);
TimerCtrlReg &= ~ARM_ARCH_TIMER_ENABLE;
ArmArchTimerWriteReg (CntpCtl, (VOID *)&TimerCtrlReg);
}
VOID
EFIAPI
ArmArchTimerSetTimerFreq (
IN UINTN FreqInHz
)
{
ArmArchTimerWriteReg (CntFrq, (VOID *)&FreqInHz);
}
UINTN
EFIAPI
ArmArchTimerGetTimerFreq (
VOID
)
{
UINTN ArchTimerFreq = 0;
ArmArchTimerReadReg (CntFrq, (VOID *)&ArchTimerFreq);
return ArchTimerFreq;
}
UINTN
EFIAPI
ArmArchTimerGetTimerVal (
VOID
)
{
UINTN ArchTimerVal;
ArmArchTimerReadReg (CntpTval, (VOID *)&ArchTimerVal);
return ArchTimerVal;
}
VOID
EFIAPI
ArmArchTimerSetTimerVal (
IN UINTN Val
)
{
ArmArchTimerWriteReg (CntpTval, (VOID *)&Val);
}
UINT64
EFIAPI
ArmArchTimerGetSystemCount (
VOID
)
{
UINT64 SystemCount;
ArmArchTimerReadReg (CntPct, (VOID *)&SystemCount);
return SystemCount;
}
UINTN
EFIAPI
ArmArchTimerGetTimerCtrlReg (
VOID
)
{
UINTN Val;
ArmArchTimerReadReg (CntpCtl, (VOID *)&Val);
return Val;
}
VOID
EFIAPI
ArmArchTimerSetTimerCtrlReg (
UINTN Val
)
{
ArmArchTimerWriteReg (CntpCtl, (VOID *)&Val);
}
VOID
EFIAPI
ArmArchTimerSetCompareVal (
IN UINT64 Val
)
{
ArmArchTimerWriteReg (CntpCval, (VOID *)&Val);
}

View File

@@ -0,0 +1,140 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#------------------------------------------------------------------------------
.text
.align 2
ASM_GLOBAL ASM_PFX(ArmReadCntFrq)
ASM_GLOBAL ASM_PFX(ArmWriteCntFrq)
ASM_GLOBAL ASM_PFX(ArmReadCntPct)
ASM_GLOBAL ASM_PFX(ArmReadCntkCtl)
ASM_GLOBAL ASM_PFX(ArmWriteCntkCtl)
ASM_GLOBAL ASM_PFX(ArmReadCntpTval)
ASM_GLOBAL ASM_PFX(ArmWriteCntpTval)
ASM_GLOBAL ASM_PFX(ArmReadCntpCtl)
ASM_GLOBAL ASM_PFX(ArmWriteCntpCtl)
ASM_GLOBAL ASM_PFX(ArmReadCntvTval)
ASM_GLOBAL ASM_PFX(ArmWriteCntvTval)
ASM_GLOBAL ASM_PFX(ArmReadCntvCtl)
ASM_GLOBAL ASM_PFX(ArmWriteCntvCtl)
ASM_GLOBAL ASM_PFX(ArmReadCntvCt)
ASM_GLOBAL ASM_PFX(ArmReadCntpCval)
ASM_GLOBAL ASM_PFX(ArmWriteCntpCval)
ASM_GLOBAL ASM_PFX(ArmReadCntvCval)
ASM_GLOBAL ASM_PFX(ArmWriteCntvCval)
ASM_GLOBAL ASM_PFX(ArmReadCntvOff)
ASM_GLOBAL ASM_PFX(ArmWriteCntvOff)
ASM_PFX(ArmReadCntFrq):
mrs x0, cntfrq_el0 // Read CNTFRQ
ret
# NOTE - Can only write while at highest implemented EL level (EL3 on model). Else ReadOnly (EL2, EL1, EL0)
ASM_PFX(ArmWriteCntFrq):
msr cntfrq_el0, x0 // Write to CNTFRQ
ret
ASM_PFX(ArmReadCntPct):
mrs x0, cntpct_el0 // Read CNTPCT (Physical counter register)
ret
ASM_PFX(ArmReadCntkCtl):
mrs x0, cntkctl_el1 // Read CNTK_CTL (Timer PL1 Control Register)
ret
ASM_PFX(ArmWriteCntkCtl):
mrs x0, cntkctl_el1 // Write to CNTK_CTL (Timer PL1 Control Register)
ret
ASM_PFX(ArmReadCntpTval):
mrs x0, cntp_tval_el0 // Read CNTP_TVAL (PL1 physical timer value register)
ret
ASM_PFX(ArmWriteCntpTval):
msr cntp_tval_el0, x0 // Write to CNTP_TVAL (PL1 physical timer value register)
ret
ASM_PFX(ArmReadCntpCtl):
mrs x0, cntp_ctl_el0 // Read CNTP_CTL (PL1 Physical Timer Control Register)
ret
ASM_PFX(ArmWriteCntpCtl):
msr cntp_ctl_el0, x0 // Write to CNTP_CTL (PL1 Physical Timer Control Register)
ret
ASM_PFX(ArmReadCntvTval):
mrs x0, cntv_tval_el0 // Read CNTV_TVAL (Virtual Timer Value register)
ret
ASM_PFX(ArmWriteCntvTval):
msr cntv_tval_el0, x0 // Write to CNTV_TVAL (Virtual Timer Value register)
ret
ASM_PFX(ArmReadCntvCtl):
mrs x0, cntv_ctl_el0 // Read CNTV_CTL (Virtual Timer Control Register)
ret
ASM_PFX(ArmWriteCntvCtl):
msr cntv_ctl_el0, x0 // Write to CNTV_CTL (Virtual Timer Control Register)
ret
ASM_PFX(ArmReadCntvCt):
mrs x0, cntvct_el0 // Read CNTVCT (Virtual Count Register)
ret
ASM_PFX(ArmReadCntpCval):
mrs x0, cntp_cval_el0 // Read CNTP_CTVAL (Physical Timer Compare Value Register)
ret
ASM_PFX(ArmWriteCntpCval):
msr cntp_cval_el0, x0 // Write to CNTP_CTVAL (Physical Timer Compare Value Register)
ret
ASM_PFX(ArmReadCntvCval):
mrs x0, cntv_cval_el0 // Read CNTV_CTVAL (Virtual Timer Compare Value Register)
ret
ASM_PFX(ArmWriteCntvCval):
msr cntv_cval_el0, x0 // write to CNTV_CTVAL (Virtual Timer Compare Value Register)
ret
ASM_PFX(ArmReadCntvOff):
mrs x0, cntvoff_el2 // Read CNTVOFF (virtual Offset register)
ret
ASM_PFX(ArmWriteCntvOff):
msr cntvoff_el2, x0 // Write to CNTVOFF (Virtual Offset register)
ret
ASM_FUNCTION_REMOVE_IF_UNREFERENCED

View File

@@ -0,0 +1,263 @@
/** @file
Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include <Uefi.h>
#include <Chipset/AArch64.h>
#include <Library/ArmLib.h>
#include <Library/BaseLib.h>
#include <Library/IoLib.h>
#include "AArch64Lib.h"
#include "ArmLibPrivate.h"
ARM_CACHE_TYPE
EFIAPI
ArmCacheType (
VOID
)
{
return ARM_CACHE_TYPE_WRITE_BACK;
}
ARM_CACHE_ARCHITECTURE
EFIAPI
ArmCacheArchitecture (
VOID
)
{
UINT32 CLIDR = ReadCLIDR ();
return (ARM_CACHE_ARCHITECTURE)CLIDR; // BugBug Fix Me
}
BOOLEAN
EFIAPI
ArmDataCachePresent (
VOID
)
{
UINT32 CLIDR = ReadCLIDR ();
if ((CLIDR & 0x2) == 0x2) {
// Instruction cache exists
return TRUE;
}
if ((CLIDR & 0x7) == 0x4) {
// Unified cache
return TRUE;
}
return FALSE;
}
UINTN
EFIAPI
ArmDataCacheSize (
VOID
)
{
UINT32 NumSets;
UINT32 Associativity;
UINT32 LineSize;
UINT32 CCSIDR = ReadCCSIDR (0);
LineSize = (1 << ((CCSIDR & 0x7) + 2));
Associativity = ((CCSIDR >> 3) & 0x3ff) + 1;
NumSets = ((CCSIDR >> 13) & 0x7fff) + 1;
// LineSize is in words (4 byte chunks)
return NumSets * Associativity * LineSize * 4;
}
UINTN
EFIAPI
ArmDataCacheAssociativity (
VOID
)
{
UINT32 CCSIDR = ReadCCSIDR (0);
return ((CCSIDR >> 3) & 0x3ff) + 1;
}
UINTN
ArmDataCacheSets (
VOID
)
{
UINT32 CCSIDR = ReadCCSIDR (0);
return ((CCSIDR >> 13) & 0x7fff) + 1;
}
UINTN
EFIAPI
ArmDataCacheLineLength (
VOID
)
{
UINT32 CCSIDR = ReadCCSIDR (0) & 7;
// * 4 converts to bytes
return (1 << (CCSIDR + 2)) * 4;
}
BOOLEAN
EFIAPI
ArmInstructionCachePresent (
VOID
)
{
UINT32 CLIDR = ReadCLIDR ();
if ((CLIDR & 1) == 1) {
// Instruction cache exists
return TRUE;
}
if ((CLIDR & 0x7) == 0x4) {
// Unified cache
return TRUE;
}
return FALSE;
}
UINTN
EFIAPI
ArmInstructionCacheSize (
VOID
)
{
UINT32 NumSets;
UINT32 Associativity;
UINT32 LineSize;
UINT32 CCSIDR = ReadCCSIDR (1);
LineSize = (1 << ((CCSIDR & 0x7) + 2));
Associativity = ((CCSIDR >> 3) & 0x3ff) + 1;
NumSets = ((CCSIDR >> 13) & 0x7fff) + 1;
// LineSize is in words (4 byte chunks)
return NumSets * Associativity * LineSize * 4;
}
UINTN
EFIAPI
ArmInstructionCacheAssociativity (
VOID
)
{
UINT32 CCSIDR = ReadCCSIDR (1);
return ((CCSIDR >> 3) & 0x3ff) + 1;
}
UINTN
EFIAPI
ArmInstructionCacheSets (
VOID
)
{
UINT32 CCSIDR = ReadCCSIDR (1);
return ((CCSIDR >> 13) & 0x7fff) + 1;
}
UINTN
EFIAPI
ArmInstructionCacheLineLength (
VOID
)
{
UINT32 CCSIDR = ReadCCSIDR (1) & 7;
// * 4 converts to bytes
return (1 << (CCSIDR + 2)) * 4;
}
VOID
AArch64DataCacheOperation (
IN AARCH64_CACHE_OPERATION DataCacheOperation
)
{
UINTN SavedInterruptState;
SavedInterruptState = ArmGetInterruptState ();
ArmDisableInterrupts();
AArch64AllDataCachesOperation (DataCacheOperation);
ArmDrainWriteBuffer ();
if (SavedInterruptState) {
ArmEnableInterrupts ();
}
}
VOID
AArch64PoUDataCacheOperation (
IN AARCH64_CACHE_OPERATION DataCacheOperation
)
{
UINTN SavedInterruptState;
SavedInterruptState = ArmGetInterruptState ();
ArmDisableInterrupts ();
AArch64PerformPoUDataCacheOperation (DataCacheOperation);
ArmDrainWriteBuffer ();
if (SavedInterruptState) {
ArmEnableInterrupts ();
}
}
VOID
EFIAPI
ArmInvalidateDataCache (
VOID
)
{
AArch64DataCacheOperation (ArmInvalidateDataCacheEntryBySetWay);
}
VOID
EFIAPI
ArmCleanInvalidateDataCache (
VOID
)
{
AArch64DataCacheOperation (ArmCleanInvalidateDataCacheEntryBySetWay);
}
VOID
EFIAPI
ArmCleanDataCache (
VOID
)
{
AArch64DataCacheOperation (ArmCleanDataCacheEntryBySetWay);
}
VOID
EFIAPI
ArmCleanDataCacheToPoU (
VOID
)
{
AArch64PoUDataCacheOperation (ArmCleanDataCacheEntryBySetWay);
}

View File

@@ -0,0 +1,98 @@
/** @file
Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
Portions Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef __AARCH64_LIB_H__
#define __AARCH64_LIB_H__
typedef VOID (*AARCH64_CACHE_OPERATION)(UINTN);
VOID
EFIAPI
ArmDrainWriteBuffer (
VOID
);
VOID
EFIAPI
ArmInvalidateDataCacheEntryBySetWay (
IN UINTN SetWayFormat
);
VOID
EFIAPI
ArmCleanDataCacheEntryBySetWay (
IN UINTN SetWayFormat
);
VOID
EFIAPI
ArmCleanDataCacheToPoUEntryBySetWay (
IN UINTN SetWayFormat
);
VOID
EFIAPI
ArmCleanInvalidateDataCacheEntryBySetWay (
IN UINTN SetWayFormat
);
VOID
EFIAPI
ArmEnableAsynchronousAbort (
VOID
);
UINTN
EFIAPI
ArmDisableAsynchronousAbort (
VOID
);
VOID
EFIAPI
ArmEnableIrq (
VOID
);
UINTN
EFIAPI
ArmDisableIrq (
VOID
);
VOID
EFIAPI
ArmEnableFiq (
VOID
);
UINTN
EFIAPI
ArmDisableFiq (
VOID
);
VOID
AArch64PerformPoUDataCacheOperation (
IN AARCH64_CACHE_OPERATION DataCacheOperation
);
VOID
AArch64AllDataCachesOperation (
IN AARCH64_CACHE_OPERATION DataCacheOperation
);
#endif // __AARCH64_LIB_H__

View File

@@ -0,0 +1,44 @@
#/** @file
#
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
# Portions copyright (c) 2011-2013, ARM Limited. All rights reserved.
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#
#**/
[Defines]
INF_VERSION = 0x00010005
BASE_NAME = AArch64Lib
FILE_GUID = ef20ddf5-b334-47b3-94cf-52ff44c29138
MODULE_TYPE = DXE_DRIVER
VERSION_STRING = 1.0
LIBRARY_CLASS = ArmLib
[Sources.AARCH64]
AArch64Lib.c
AArch64Mmu.c
AArch64ArchTimer.c
ArmLibSupportV8.S | GCC
../Common/AArch64/ArmLibSupport.S | GCC
AArch64Support.S | GCC
AArch64ArchTimerSupport.S | GCC
[Packages]
ArmPkg/ArmPkg.dec
MdePkg/MdePkg.dec
[LibraryClasses]
MemoryAllocationLib
[Protocols]
gEfiCpuArchProtocolGuid
[FixedPcd]
gArmTokenSpaceGuid.PcdArmCacheOperationThreshold

View File

@@ -0,0 +1,48 @@
#/** @file
#
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
# Portions copyright (c) 2011-2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#
#**/
[Defines]
INF_VERSION = 0x00010005
BASE_NAME = AArch64LibPrePi
FILE_GUID = fd72688d-dbd8-4cf2-91a3-15171dea7816
MODULE_TYPE = BASE
VERSION_STRING = 1.0
LIBRARY_CLASS = ArmLib
[Sources.common]
ArmLibSupportV8.S | GCC
AArch64Support.S | GCC
../Common/AArch64/ArmLibSupport.S | GCC
../Common/ArmLib.c
AArch64Lib.c
AArch64Mmu.c
AArch64ArchTimer.c
AArch64ArchTimerSupport.S | GCC
[Packages]
ArmPkg/ArmPkg.dec
MdePkg/MdePkg.dec
[LibraryClasses]
PrePiLib
[Protocols]
gEfiCpuArchProtocolGuid
[FixedPcd]
gArmTokenSpaceGuid.PcdArmCacheOperationThreshold

View File

@@ -0,0 +1,43 @@
#/* @file
#
# Copyright (c) 2011-2013, ARM Limited. All rights reserved.
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#*/
[Defines]
INF_VERSION = 0x00010005
BASE_NAME = AArch64Lib
FILE_GUID = eb7441e4-3ddf-48b8-a009-14f428b19e49
MODULE_TYPE = BASE
VERSION_STRING = 1.0
LIBRARY_CLASS = ArmLib
[Sources.common]
ArmLibSupportV8.S | GCC
AArch64Support.S | GCC
ArmLib.c
../Common/AArch64/ArmLibSupport.S | GCC
AArch64Lib.c
AArch64ArchTimer.c
AArch64ArchTimerSupport.S | GCC
[Packages]
ArmPkg/ArmPkg.dec
MdePkg/MdePkg.dec
[Protocols]
gEfiCpuArchProtocolGuid
[FixedPcd]
gArmTokenSpaceGuid.PcdArmCacheOperationThreshold

View File

@@ -0,0 +1,644 @@
/** @file
* File managing the MMU for ARMv8 architecture
*
* Copyright (c) 2011-2013, ARM Limited. All rights reserved.
*
* This program and the accompanying materials
* are licensed and made available under the terms and conditions of the BSD License
* which accompanies this distribution. The full text of the license may be found at
* http://opensource.org/licenses/bsd-license.php
*
* THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
* WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
*
**/
#include <Uefi.h>
#include <Chipset/AArch64.h>
#include <Library/BaseMemoryLib.h>
#include <Library/MemoryAllocationLib.h>
#include <Library/ArmLib.h>
#include <Library/BaseLib.h>
#include <Library/DebugLib.h>
#include "AArch64Lib.h"
#include "ArmLibPrivate.h"
// We use this index definition to define an invalid block entry
#define TT_ATTR_INDX_INVALID ((UINT32)~0)
STATIC
UINT64
ArmMemoryAttributeToPageAttribute (
IN ARM_MEMORY_REGION_ATTRIBUTES Attributes
)
{
switch (Attributes) {
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK:
return TT_ATTR_INDX_MEMORY_WRITE_BACK;
case ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH:
return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
case ARM_MEMORY_REGION_ATTRIBUTE_DEVICE:
return TT_ATTR_INDX_DEVICE_MEMORY;
case ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED:
return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK:
return TT_ATTR_INDX_MEMORY_WRITE_BACK;
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH:
return TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_DEVICE:
return TT_ATTR_INDX_DEVICE_MEMORY;
case ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED:
return TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
default:
ASSERT(0);
return TT_ATTR_INDX_DEVICE_MEMORY;
}
}
UINT64
PageAttributeToGcdAttribute (
IN UINT64 PageAttributes
)
{
UINT64 GcdAttributes;
switch (PageAttributes & TT_ATTR_INDX_MASK) {
case TT_ATTR_INDX_DEVICE_MEMORY:
GcdAttributes = EFI_MEMORY_UC;
break;
case TT_ATTR_INDX_MEMORY_NON_CACHEABLE:
GcdAttributes = EFI_MEMORY_WC;
break;
case TT_ATTR_INDX_MEMORY_WRITE_THROUGH:
GcdAttributes = EFI_MEMORY_WT;
break;
case TT_ATTR_INDX_MEMORY_WRITE_BACK:
GcdAttributes = EFI_MEMORY_WB;
break;
default:
DEBUG ((EFI_D_ERROR, "PageAttributeToGcdAttribute: PageAttributes:0x%lX not supported.\n", PageAttributes));
ASSERT (0);
// The Global Coherency Domain (GCD) value is defined as a bit set.
// Returning 0 means no attribute has been set.
GcdAttributes = 0;
}
// Determine protection attributes
if (((PageAttributes & TT_AP_MASK) == TT_AP_NO_RO) || ((PageAttributes & TT_AP_MASK) == TT_AP_RO_RO)) {
// Read only cases map to write-protect
GcdAttributes |= EFI_MEMORY_WP;
}
// Process eXecute Never attribute
if ((PageAttributes & (TT_PXN_MASK | TT_UXN_MASK)) != 0 ) {
GcdAttributes |= EFI_MEMORY_XP;
}
return GcdAttributes;
}
UINT64
GcdAttributeToPageAttribute (
IN UINT64 GcdAttributes
)
{
UINT64 PageAttributes;
switch (GcdAttributes & 0xFF) {
case EFI_MEMORY_UC:
PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
break;
case EFI_MEMORY_WC:
PageAttributes = TT_ATTR_INDX_MEMORY_NON_CACHEABLE;
break;
case EFI_MEMORY_WT:
PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_THROUGH;
break;
case EFI_MEMORY_WB:
PageAttributes = TT_ATTR_INDX_MEMORY_WRITE_BACK;
break;
default:
DEBUG ((EFI_D_ERROR, "GcdAttributeToPageAttribute: 0x%X attributes is not supported.\n", GcdAttributes));
ASSERT (0);
// If no match has been found then we mark the memory as device memory.
// The only side effect of using device memory should be a slow down in the performance.
PageAttributes = TT_ATTR_INDX_DEVICE_MEMORY;
}
// Determine protection attributes
if (GcdAttributes & EFI_MEMORY_WP) {
// Read only cases map to write-protect
PageAttributes |= TT_AP_RO_RO;
}
// Process eXecute Never attribute
if (GcdAttributes & EFI_MEMORY_XP) {
PageAttributes |= (TT_PXN_MASK | TT_UXN_MASK);
}
return PageAttributes;
}
ARM_MEMORY_REGION_ATTRIBUTES
GcdAttributeToArmAttribute (
IN UINT64 GcdAttributes
)
{
switch (GcdAttributes & 0xFF) {
case EFI_MEMORY_UC:
return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
case EFI_MEMORY_WC:
return ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED;
case EFI_MEMORY_WT:
return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH;
case EFI_MEMORY_WB:
return ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK;
default:
DEBUG ((EFI_D_ERROR, "GcdAttributeToArmAttribute: 0x%lX attributes is not supported.\n", GcdAttributes));
ASSERT (0);
return ARM_MEMORY_REGION_ATTRIBUTE_DEVICE;
}
}
// Describe the T0SZ values for each translation table level
typedef struct {
UINTN MinT0SZ;
UINTN MaxT0SZ;
UINTN LargestT0SZ; // Generally (MaxT0SZ == LargestT0SZ) but at the Level3 Table
// the MaxT0SZ is not at the boundary of the table
} T0SZ_DESCRIPTION_PER_LEVEL;
// Map table for the corresponding Level of Table
STATIC CONST T0SZ_DESCRIPTION_PER_LEVEL T0SZPerTableLevel[] = {
{ 16, 24, 24 }, // Table Level 0
{ 25, 33, 33 }, // Table Level 1
{ 34, 39, 42 } // Table Level 2
};
VOID
GetRootTranslationTableInfo (
IN UINTN T0SZ,
OUT UINTN *TableLevel,
OUT UINTN *TableEntryCount
)
{
UINTN Index;
// Identify the level of the root table from the given T0SZ
for (Index = 0; Index < sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL); Index++) {
if (T0SZ <= T0SZPerTableLevel[Index].MaxT0SZ) {
break;
}
}
// If we have not found the corresponding maximum T0SZ then we use the last one
if (Index == sizeof (T0SZPerTableLevel) / sizeof (T0SZ_DESCRIPTION_PER_LEVEL)) {
Index--;
}
// Get the level of the root table
if (TableLevel) {
*TableLevel = Index;
}
// The Size of the Table is 2^(T0SZ-LargestT0SZ)
if (TableEntryCount) {
*TableEntryCount = 1 << (T0SZPerTableLevel[Index].LargestT0SZ - T0SZ + 1);
}
}
STATIC
VOID
LookupAddresstoRootTable (
IN UINT64 MaxAddress,
OUT UINTN *T0SZ,
OUT UINTN *TableEntryCount
)
{
UINTN TopBit;
// Check the parameters are not NULL
ASSERT ((T0SZ != NULL) && (TableEntryCount != NULL));
// Look for the highest bit set in MaxAddress
for (TopBit = 63; TopBit != 0; TopBit--) {
if ((1ULL << TopBit) & MaxAddress) {
// MaxAddress top bit is found
TopBit = TopBit + 1;
break;
}
}
ASSERT (TopBit != 0);
// Calculate T0SZ from the top bit of the MaxAddress
*T0SZ = 64 - TopBit;
// Get the Table info from T0SZ
GetRootTranslationTableInfo (*T0SZ, NULL, TableEntryCount);
}
STATIC
UINT64*
GetBlockEntryListFromAddress (
IN UINT64 *RootTable,
IN UINT64 RegionStart,
OUT UINTN *TableLevel,
IN OUT UINT64 *BlockEntrySize,
IN OUT UINT64 **LastBlockEntry
)
{
UINTN RootTableLevel;
UINTN RootTableEntryCount;
UINT64 *TranslationTable;
UINT64 *BlockEntry;
UINT64 BlockEntryAddress;
UINTN BaseAddressAlignment;
UINTN PageLevel;
UINTN Index;
UINTN IndexLevel;
UINTN T0SZ;
UINT64 Attributes;
UINT64 TableAttributes;
// Initialize variable
BlockEntry = NULL;
// Ensure the parameters are valid
ASSERT (TableLevel && BlockEntrySize && LastBlockEntry);
// Ensure the Region is aligned on 4KB boundary
ASSERT ((RegionStart & (SIZE_4KB - 1)) == 0);
// Ensure the required size is aligned on 4KB boundary
ASSERT ((*BlockEntrySize & (SIZE_4KB - 1)) == 0);
//
// Calculate LastBlockEntry from T0SZ
//
T0SZ = ArmGetTCR () & TCR_T0SZ_MASK;
// Get the Table info from T0SZ
GetRootTranslationTableInfo (T0SZ, &RootTableLevel, &RootTableEntryCount);
// The last block of the root table depends on the number of entry in this table
*LastBlockEntry = (UINT64*)((UINTN)RootTable + (RootTableEntryCount * sizeof(UINT64)));
// If the start address is 0x0 then we use the size of the region to identify the alignment
if (RegionStart == 0) {
// Identify the highest possible alignment for the Region Size
for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
if ((1 << BaseAddressAlignment) & *BlockEntrySize) {
break;
}
}
} else {
// Identify the highest possible alignment for the Base Address
for (BaseAddressAlignment = 0; BaseAddressAlignment < 64; BaseAddressAlignment++) {
if ((1 << BaseAddressAlignment) & RegionStart) {
break;
}
}
}
// Identify the Page Level the RegionStart must belongs to
PageLevel = 3 - ((BaseAddressAlignment - 12) / 9);
// If the required size is smaller than the current block size then we need to go to the page bellow.
if (*BlockEntrySize < TT_ADDRESS_AT_LEVEL(PageLevel)) {
// It does not fit so we need to go a page level above
PageLevel++;
}
// Expose the found PageLevel to the caller
*TableLevel = PageLevel;
// Now, we have the Table Level we can get the Block Size associated to this table
*BlockEntrySize = TT_ADDRESS_AT_LEVEL(PageLevel);
//
// Get the Table Descriptor for the corresponding PageLevel. We need to decompose RegionStart to get appropriate entries
//
TranslationTable = RootTable;
for (IndexLevel = RootTableLevel; IndexLevel <= PageLevel; IndexLevel++) {
BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel, RegionStart);
if ((IndexLevel != 3) && ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_TABLE_ENTRY)) {
// Go to the next table
TranslationTable = (UINT64*)(*BlockEntry & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
// If we are at the last level then update the output
if (IndexLevel == PageLevel) {
// And get the appropriate BlockEntry at the next level
BlockEntry = (UINT64*)TT_GET_ENTRY_FOR_ADDRESS (TranslationTable, IndexLevel + 1, RegionStart);
// Set the last block for this new table
*LastBlockEntry = (UINT64*)((UINTN)TranslationTable + (TT_ENTRY_COUNT * sizeof(UINT64)));
}
} else if ((*BlockEntry & TT_TYPE_MASK) == TT_TYPE_BLOCK_ENTRY) {
// If we are not at the last level then we need to split this BlockEntry
if (IndexLevel != PageLevel) {
// Retrieve the attributes from the block entry
Attributes = *BlockEntry & TT_ATTRIBUTES_MASK;
// Convert the block entry attributes into Table descriptor attributes
TableAttributes = TT_TABLE_AP_NO_PERMISSION;
if (Attributes & TT_PXN_MASK) {
TableAttributes = TT_TABLE_PXN;
}
if (Attributes & TT_UXN_MASK) {
TableAttributes = TT_TABLE_XN;
}
if (Attributes & TT_NS) {
TableAttributes = TT_TABLE_NS;
}
// Get the address corresponding at this entry
BlockEntryAddress = RegionStart;
BlockEntryAddress = BlockEntryAddress >> TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
// Shift back to right to set zero before the effective address
BlockEntryAddress = BlockEntryAddress << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel);
// Set the correct entry type
if (IndexLevel + 1 == 3) {
Attributes |= TT_TYPE_BLOCK_ENTRY_LEVEL3;
} else {
Attributes |= TT_TYPE_BLOCK_ENTRY;
}
// Create a new translation table
TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
if (TranslationTable == NULL) {
return NULL;
}
TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
// Fill the new BlockEntry with the TranslationTable
*BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TableAttributes | TT_TYPE_TABLE_ENTRY;
// Populate the newly created lower level table
BlockEntry = TranslationTable;
for (Index = 0; Index < TT_ENTRY_COUNT; Index++) {
*BlockEntry = Attributes | (BlockEntryAddress + (Index << TT_ADDRESS_OFFSET_AT_LEVEL(IndexLevel + 1)));
BlockEntry++;
}
// Block Entry points at the beginning of the Translation Table
BlockEntry = TranslationTable;
}
} else {
// Case of Invalid Entry and we are at a page level above of the one targetted.
if (IndexLevel != PageLevel) {
// Create a new translation table
TranslationTable = (UINT64*)AllocatePages (EFI_SIZE_TO_PAGES((TT_ENTRY_COUNT * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE));
if (TranslationTable == NULL) {
return NULL;
}
TranslationTable = (UINT64*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
ZeroMem (TranslationTable, TT_ENTRY_COUNT * sizeof(UINT64));
// Fill the new BlockEntry with the TranslationTable
*BlockEntry = ((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE) | TT_TYPE_TABLE_ENTRY;
}
}
}
return BlockEntry;
}
STATIC
RETURN_STATUS
FillTranslationTable (
IN UINT64 *RootTable,
IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryRegion
)
{
UINT64 Attributes;
UINT32 Type;
UINT64 RegionStart;
UINT64 RemainingRegionLength;
UINT64 *BlockEntry;
UINT64 *LastBlockEntry;
UINT64 BlockEntrySize;
UINTN TableLevel;
// Ensure the Length is aligned on 4KB boundary
ASSERT ((MemoryRegion->Length > 0) && ((MemoryRegion->Length & (SIZE_4KB - 1)) == 0));
// Variable initialization
Attributes = ArmMemoryAttributeToPageAttribute (MemoryRegion->Attributes) | TT_AF;
RemainingRegionLength = MemoryRegion->Length;
RegionStart = MemoryRegion->VirtualBase;
do {
// Get the first Block Entry that matches the Virtual Address and also the information on the Table Descriptor
// such as the the size of the Block Entry and the address of the last BlockEntry of the Table Descriptor
BlockEntrySize = RemainingRegionLength;
BlockEntry = GetBlockEntryListFromAddress (RootTable, RegionStart, &TableLevel, &BlockEntrySize, &LastBlockEntry);
if (BlockEntry == NULL) {
// GetBlockEntryListFromAddress() return NULL when it fails to allocate new pages from the Translation Tables
return RETURN_OUT_OF_RESOURCES;
}
if (TableLevel != 3) {
Type = TT_TYPE_BLOCK_ENTRY;
} else {
Type = TT_TYPE_BLOCK_ENTRY_LEVEL3;
}
do {
// Fill the Block Entry with attribute and output block address
*BlockEntry = (RegionStart & TT_ADDRESS_MASK_BLOCK_ENTRY) | Attributes | Type;
// Go to the next BlockEntry
RegionStart += BlockEntrySize;
RemainingRegionLength -= BlockEntrySize;
BlockEntry++;
} while ((RemainingRegionLength >= BlockEntrySize) && (BlockEntry <= LastBlockEntry));
} while (RemainingRegionLength != 0);
return RETURN_SUCCESS;
}
RETURN_STATUS
SetMemoryAttributes (
IN EFI_PHYSICAL_ADDRESS BaseAddress,
IN UINT64 Length,
IN UINT64 Attributes,
IN EFI_PHYSICAL_ADDRESS VirtualMask
)
{
RETURN_STATUS Status;
ARM_MEMORY_REGION_DESCRIPTOR MemoryRegion;
UINT64 *TranslationTable;
MemoryRegion.PhysicalBase = BaseAddress;
MemoryRegion.VirtualBase = BaseAddress;
MemoryRegion.Length = Length;
MemoryRegion.Attributes = GcdAttributeToArmAttribute (Attributes);
TranslationTable = ArmGetTTBR0BaseAddress ();
Status = FillTranslationTable (TranslationTable, &MemoryRegion);
if (RETURN_ERROR (Status)) {
return Status;
}
// Flush d-cache so descriptors make it back to uncached memory for subsequent table walks
// flush and invalidate pages
ArmCleanInvalidateDataCache ();
ArmInvalidateInstructionCache ();
// Invalidate all TLB entries so changes are synced
ArmInvalidateTlb ();
return RETURN_SUCCESS;
}
RETURN_STATUS
EFIAPI
ArmConfigureMmu (
IN ARM_MEMORY_REGION_DESCRIPTOR *MemoryTable,
OUT VOID **TranslationTableBase OPTIONAL,
OUT UINTN *TranslationTableSize OPTIONAL
)
{
VOID* TranslationTable;
UINTN TranslationTablePageCount;
UINT32 TranslationTableAttribute;
ARM_MEMORY_REGION_DESCRIPTOR *MemoryTableEntry;
UINT64 MaxAddress;
UINT64 TopAddress;
UINTN T0SZ;
UINTN RootTableEntryCount;
UINT64 TCR;
RETURN_STATUS Status;
ASSERT (MemoryTable != NULL);
// Identify the highest address of the memory table
MaxAddress = MemoryTable->PhysicalBase + MemoryTable->Length - 1;
MemoryTableEntry = MemoryTable;
while (MemoryTableEntry->Length != 0) {
TopAddress = MemoryTableEntry->PhysicalBase + MemoryTableEntry->Length - 1;
if (TopAddress > MaxAddress) {
MaxAddress = TopAddress;
}
MemoryTableEntry++;
}
// Lookup the Table Level to get the information
LookupAddresstoRootTable (MaxAddress, &T0SZ, &RootTableEntryCount);
//
// Set TCR that allows us to retrieve T0SZ in the subsequent functions
//
if ((ArmReadCurrentEL () == AARCH64_EL2) || (ArmReadCurrentEL () == AARCH64_EL3)) {
//Note: Bits 23 and 31 are reserved bits in TCR_EL2 and TCR_EL3
TCR = T0SZ | (1UL << 31) | (1UL << 23) | TCR_TG0_4KB;
// Set the Physical Address Size using MaxAddress
if (MaxAddress < SIZE_4GB) {
TCR |= TCR_PS_4GB;
} else if (MaxAddress < SIZE_64GB) {
TCR |= TCR_PS_64GB;
} else if (MaxAddress < SIZE_1TB) {
TCR |= TCR_PS_1TB;
} else if (MaxAddress < SIZE_4TB) {
TCR |= TCR_PS_4TB;
} else if (MaxAddress < SIZE_16TB) {
TCR |= TCR_PS_16TB;
} else if (MaxAddress < SIZE_256TB) {
TCR |= TCR_PS_256TB;
} else {
DEBUG ((EFI_D_ERROR, "ArmConfigureMmu: The MaxAddress 0x%lX is not supported by this MMU support.\n", MaxAddress));
ASSERT (0); // Bigger than 48-bit memory space are not supported
return RETURN_UNSUPPORTED;
}
} else {
ASSERT (0); // Bigger than 48-bit memory space are not supported
return RETURN_UNSUPPORTED;
}
// Set TCR
ArmSetTCR (TCR);
// Allocate pages for translation table
TranslationTablePageCount = EFI_SIZE_TO_PAGES((RootTableEntryCount * sizeof(UINT64)) + TT_ALIGNMENT_DESCRIPTION_TABLE);
TranslationTable = AllocatePages (TranslationTablePageCount);
if (TranslationTable == NULL) {
return RETURN_OUT_OF_RESOURCES;
}
TranslationTable = (VOID*)((UINTN)TranslationTable & TT_ADDRESS_MASK_DESCRIPTION_TABLE);
// We set TTBR0 just after allocating the table to retrieve its location from the subsequent
// functions without needing to pass this value across the functions. The MMU is only enabled
// after the translation tables are populated.
ArmSetTTBR0 (TranslationTable);
if (TranslationTableBase != NULL) {
*TranslationTableBase = TranslationTable;
}
if (TranslationTableSize != NULL) {
*TranslationTableSize = RootTableEntryCount * sizeof(UINT64);
}
ZeroMem (TranslationTable, RootTableEntryCount * sizeof(UINT64));
// Disable MMU and caches. ArmDisableMmu() also invalidates the TLBs
ArmDisableMmu ();
ArmDisableDataCache ();
ArmDisableInstructionCache ();
// Make sure nothing sneaked into the cache
ArmCleanInvalidateDataCache ();
ArmInvalidateInstructionCache ();
TranslationTableAttribute = TT_ATTR_INDX_INVALID;
while (MemoryTable->Length != 0) {
// Find the memory attribute for the Translation Table
if (((UINTN)TranslationTable >= MemoryTable->PhysicalBase) &&
((UINTN)TranslationTable <= MemoryTable->PhysicalBase - 1 + MemoryTable->Length)) {
TranslationTableAttribute = MemoryTable->Attributes;
}
Status = FillTranslationTable (TranslationTable, MemoryTable);
if (RETURN_ERROR (Status)) {
goto FREE_TRANSLATION_TABLE;
}
MemoryTable++;
}
// Translate the Memory Attributes into Translation Table Register Attributes
if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_UNCACHED_UNBUFFERED) ||
(TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_UNCACHED_UNBUFFERED)) {
TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_NON_CACHEABLE | TCR_RGN_INNER_NON_CACHEABLE;
} else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_BACK) ||
(TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_BACK)) {
TCR |= TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WRITE_BACK_ALLOC | TCR_RGN_INNER_WRITE_BACK_ALLOC;
} else if ((TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_WRITE_THROUGH) ||
(TranslationTableAttribute == ARM_MEMORY_REGION_ATTRIBUTE_NONSECURE_WRITE_THROUGH)) {
TCR |= TCR_SH_NON_SHAREABLE | TCR_RGN_OUTER_WRITE_THROUGH | TCR_RGN_INNER_WRITE_THROUGH;
} else {
// If we failed to find a mapping that contains the root translation table then it probably means the translation table
// is not mapped in the given memory map.
ASSERT (0);
Status = RETURN_UNSUPPORTED;
goto FREE_TRANSLATION_TABLE;
}
ArmSetMAIR (MAIR_ATTR(TT_ATTR_INDX_DEVICE_MEMORY, MAIR_ATTR_DEVICE_MEMORY) | // mapped to EFI_MEMORY_UC
MAIR_ATTR(TT_ATTR_INDX_MEMORY_NON_CACHEABLE, MAIR_ATTR_NORMAL_MEMORY_NON_CACHEABLE) | // mapped to EFI_MEMORY_WC
MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_THROUGH, MAIR_ATTR_NORMAL_MEMORY_WRITE_THROUGH) | // mapped to EFI_MEMORY_WT
MAIR_ATTR(TT_ATTR_INDX_MEMORY_WRITE_BACK, MAIR_ATTR_NORMAL_MEMORY_WRITE_BACK)); // mapped to EFI_MEMORY_WB
ArmDisableAlignmentCheck ();
ArmEnableInstructionCache ();
ArmEnableDataCache ();
ArmEnableMmu ();
return RETURN_SUCCESS;
FREE_TRANSLATION_TABLE:
FreePages (TranslationTable, TranslationTablePageCount);
return Status;
}

View File

@@ -0,0 +1,503 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#------------------------------------------------------------------------------
#include <Chipset/AArch64.h>
#include <AsmMacroIoLibV8.h>
.text
.align 3
GCC_ASM_EXPORT (ArmInvalidateInstructionCache)
GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryByMVA)
GCC_ASM_EXPORT (ArmCleanDataCacheEntryByMVA)
GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryByMVA)
GCC_ASM_EXPORT (ArmInvalidateDataCacheEntryBySetWay)
GCC_ASM_EXPORT (ArmCleanDataCacheEntryBySetWay)
GCC_ASM_EXPORT (ArmCleanInvalidateDataCacheEntryBySetWay)
GCC_ASM_EXPORT (ArmDrainWriteBuffer)
GCC_ASM_EXPORT (ArmEnableMmu)
GCC_ASM_EXPORT (ArmDisableMmu)
GCC_ASM_EXPORT (ArmDisableCachesAndMmu)
GCC_ASM_EXPORT (ArmMmuEnabled)
GCC_ASM_EXPORT (ArmEnableDataCache)
GCC_ASM_EXPORT (ArmDisableDataCache)
GCC_ASM_EXPORT (ArmEnableInstructionCache)
GCC_ASM_EXPORT (ArmDisableInstructionCache)
GCC_ASM_EXPORT (ArmDisableAlignmentCheck)
GCC_ASM_EXPORT (ArmEnableAlignmentCheck)
GCC_ASM_EXPORT (ArmEnableBranchPrediction)
GCC_ASM_EXPORT (ArmDisableBranchPrediction)
GCC_ASM_EXPORT (AArch64AllDataCachesOperation)
GCC_ASM_EXPORT (AArch64PerformPoUDataCacheOperation)
GCC_ASM_EXPORT (ArmDataMemoryBarrier)
GCC_ASM_EXPORT (ArmDataSyncronizationBarrier)
GCC_ASM_EXPORT (ArmInstructionSynchronizationBarrier)
GCC_ASM_EXPORT (ArmWriteVBar)
GCC_ASM_EXPORT (ArmVFPImplemented)
GCC_ASM_EXPORT (ArmEnableVFP)
GCC_ASM_EXPORT (ArmCallWFI)
GCC_ASM_EXPORT (ArmInvalidateInstructionAndDataTlb)
GCC_ASM_EXPORT (ArmReadMpidr)
GCC_ASM_EXPORT (ArmReadTpidrurw)
GCC_ASM_EXPORT (ArmWriteTpidrurw)
GCC_ASM_EXPORT (ArmIsArchTimerImplemented)
GCC_ASM_EXPORT (ArmReadIdPfr0)
GCC_ASM_EXPORT (ArmReadIdPfr1)
GCC_ASM_EXPORT (ArmWriteHcr)
GCC_ASM_EXPORT (ArmReadCurrentEL)
.set CTRL_M_BIT, (1 << 0)
.set CTRL_A_BIT, (1 << 1)
.set CTRL_C_BIT, (1 << 2)
.set CTRL_I_BIT, (1 << 12)
.set CTRL_V_BIT, (1 << 12)
.set CPACR_VFP_BITS, (3 << 20)
ASM_PFX(ArmInvalidateDataCacheEntryByMVA):
dc ivac, x0 // Invalidate single data cache line
dsb sy
isb
ret
ASM_PFX(ArmCleanDataCacheEntryByMVA):
dc cvac, x0 // Clean single data cache line
dsb sy
isb
ret
ASM_PFX(ArmCleanInvalidateDataCacheEntryByMVA):
dc civac, x0 // Clean and invalidate single data cache line
dsb sy
isb
ret
ASM_PFX(ArmInvalidateDataCacheEntryBySetWay):
dc isw, x0 // Invalidate this line
dsb sy
isb
ret
ASM_PFX(ArmCleanInvalidateDataCacheEntryBySetWay):
dc cisw, x0 // Clean and Invalidate this line
dsb sy
isb
ret
ASM_PFX(ArmCleanDataCacheEntryBySetWay):
dc csw, x0 // Clean this line
dsb sy
isb
ret
ASM_PFX(ArmInvalidateInstructionCache):
ic iallu // Invalidate entire instruction cache
dsb sy
isb
ret
ASM_PFX(ArmEnableMmu):
EL1_OR_EL2_OR_EL3(x1)
1: mrs x0, sctlr_el1 // Read System control register EL1
b 4f
2: mrs x0, sctlr_el2 // Read System control register EL2
b 4f
3: mrs x0, sctlr_el3 // Read System control register EL3
4: orr x0, x0, #CTRL_M_BIT // Set MMU enable bit
EL1_OR_EL2_OR_EL3(x1)
1: tlbi alle1
isb
msr sctlr_el1, x0 // Write back
b 4f
2: tlbi alle2
isb
msr sctlr_el2, x0 // Write back
b 4f
3: tlbi alle3
isb
msr sctlr_el3, x0 // Write back
4: dsb sy
isb
ret
ASM_PFX(ArmDisableMmu):
EL1_OR_EL2_OR_EL3(x1)
1: mrs x0, sctlr_el1 // Read System Control Register EL1
b 4f
2: mrs x0, sctlr_el2 // Read System Control Register EL2
b 4f
3: mrs x0, sctlr_el3 // Read System Control Register EL3
4: bic x0, x0, #CTRL_M_BIT // Clear MMU enable bit
EL1_OR_EL2_OR_EL3(x1)
1: msr sctlr_el1, x0 // Write back
tlbi alle1
b 4f
2: msr sctlr_el2, x0 // Write back
tlbi alle2
b 4f
3: msr sctlr_el3, x0 // Write back
tlbi alle3
4: dsb sy
isb
ret
ASM_PFX(ArmDisableCachesAndMmu):
EL1_OR_EL2_OR_EL3(x1)
1: mrs x0, sctlr_el1 // Get control register EL1
b 4f
2: mrs x0, sctlr_el2 // Get control register EL2
b 4f
3: mrs x0, sctlr_el3 // Get control register EL3
4: bic x0, x0, #CTRL_M_BIT // Disable MMU
bic x0, x0, #CTRL_C_BIT // Disable D Cache
bic x0, x0, #CTRL_I_BIT // Disable I Cache
EL1_OR_EL2_OR_EL3(x1)
1: msr sctlr_el1, x0 // Write back control register
b 4f
2: msr sctlr_el2, x0 // Write back control register
b 4f
3: msr sctlr_el3, x0 // Write back control register
4: dsb sy
isb
ret
ASM_PFX(ArmMmuEnabled):
EL1_OR_EL2_OR_EL3(x1)
1: mrs x0, sctlr_el1 // Get control register EL1
b 4f
2: mrs x0, sctlr_el2 // Get control register EL2
b 4f
3: mrs x0, sctlr_el3 // Get control register EL3
4: and x0, x0, #CTRL_M_BIT
ret
ASM_PFX(ArmEnableDataCache):
EL1_OR_EL2_OR_EL3(x1)
1: mrs x0, sctlr_el1 // Get control register EL1
b 4f
2: mrs x0, sctlr_el2 // Get control register EL2
b 4f
3: mrs x0, sctlr_el3 // Get control register EL3
4: orr x0, x0, #CTRL_C_BIT // Set C bit
EL1_OR_EL2_OR_EL3(x1)
1: msr sctlr_el1, x0 // Write back control register
b 4f
2: msr sctlr_el2, x0 // Write back control register
b 4f
3: msr sctlr_el3, x0 // Write back control register
4: dsb sy
isb
ret
ASM_PFX(ArmDisableDataCache):
EL1_OR_EL2_OR_EL3(x1)
1: mrs x0, sctlr_el1 // Get control register EL1
b 4f
2: mrs x0, sctlr_el2 // Get control register EL2
b 4f
3: mrs x0, sctlr_el3 // Get control register EL3
4: bic x0, x0, #CTRL_C_BIT // Clear C bit
EL1_OR_EL2_OR_EL3(x1)
1: msr sctlr_el1, x0 // Write back control register
b 4f
2: msr sctlr_el2, x0 // Write back control register
b 4f
3: msr sctlr_el3, x0 // Write back control register
4: dsb sy
isb
ret
ASM_PFX(ArmEnableInstructionCache):
EL1_OR_EL2_OR_EL3(x1)
1: mrs x0, sctlr_el1 // Get control register EL1
b 4f
2: mrs x0, sctlr_el2 // Get control register EL2
b 4f
3: mrs x0, sctlr_el3 // Get control register EL3
4: orr x0, x0, #CTRL_I_BIT // Set I bit
EL1_OR_EL2_OR_EL3(x1)
1: msr sctlr_el1, x0 // Write back control register
b 4f
2: msr sctlr_el2, x0 // Write back control register
b 4f
3: msr sctlr_el3, x0 // Write back control register
4: dsb sy
isb
ret
ASM_PFX(ArmDisableInstructionCache):
EL1_OR_EL2_OR_EL3(x1)
1: mrs x0, sctlr_el1 // Get control register EL1
b 4f
2: mrs x0, sctlr_el2 // Get control register EL2
b 4f
3: mrs x0, sctlr_el3 // Get control register EL3
4: bic x0, x0, #CTRL_I_BIT // Clear I bit
EL1_OR_EL2_OR_EL3(x1)
1: msr sctlr_el1, x0 // Write back control register
b 4f
2: msr sctlr_el2, x0 // Write back control register
b 4f
3: msr sctlr_el3, x0 // Write back control register
4: dsb sy
isb
ret
ASM_PFX(ArmEnableAlignmentCheck):
EL1_OR_EL2(x1)
1: mrs x0, sctlr_el1 // Get control register EL1
b 3f
2: mrs x0, sctlr_el2 // Get control register EL2
3: orr x0, x0, #CTRL_A_BIT // Set A (alignment check) bit
EL1_OR_EL2(x1)
1: msr sctlr_el1, x0 // Write back control register
b 3f
2: msr sctlr_el2, x0 // Write back control register
3: dsb sy
isb
ret
ASM_PFX(ArmDisableAlignmentCheck):
EL1_OR_EL2_OR_EL3(x1)
1: mrs x0, sctlr_el1 // Get control register EL1
b 4f
2: mrs x0, sctlr_el2 // Get control register EL2
b 4f
3: mrs x0, sctlr_el3 // Get control register EL3
4: bic x0, x0, #CTRL_A_BIT // Clear A (alignment check) bit
EL1_OR_EL2_OR_EL3(x1)
1: msr sctlr_el1, x0 // Write back control register
b 4f
2: msr sctlr_el2, x0 // Write back control register
b 4f
3: msr sctlr_el3, x0 // Write back control register
4: dsb sy
isb
ret
// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now
ASM_PFX(ArmEnableBranchPrediction):
ret
// Always turned on in AArch64. Else implementation specific. Leave in for C compatibility for now.
ASM_PFX(ArmDisableBranchPrediction):
ret
ASM_PFX(AArch64AllDataCachesOperation):
// We can use regs 0-7 and 9-15 without having to save/restore.
// Save our link register on the stack.
str x30, [sp, #-0x10]!
mov x1, x0 // Save Function call in x1
mrs x6, clidr_el1 // Read EL1 CLIDR
and x3, x6, #0x7000000 // Mask out all but Level of Coherency (LoC)
lsr x3, x3, #23 // Left align cache level value
cbz x3, L_Finished // No need to clean if LoC is 0
mov x10, #0 // Start clean at cache level 0
b Loop1
ASM_PFX(AArch64PerformPoUDataCacheOperation):
// We can use regs 0-7 and 9-15 without having to save/restore.
// Save our link register on the stack.
str x30, [sp, #-0x10]!
mov x1, x0 // Save Function call in x1
mrs x6, clidr_el1 // Read EL1 CLIDR
and x3, x6, #0x38000000 // Mask out all but Point of Unification (PoU)
lsr x3, x3, #26 // Left align cache level value
cbz x3, L_Finished // No need to clean if LoC is 0
mov x10, #0 // Start clean at cache level 0
Loop1:
add x2, x10, x10, lsr #1 // Work out 3x cachelevel for cache info
lsr x12, x6, x2 // bottom 3 bits are the Cache type for this level
and x12, x12, #7 // get those 3 bits alone
cmp x12, #2 // what cache at this level?
b.lt L_Skip // no cache or only instruction cache at this level
msr csselr_el1, x10 // write the Cache Size selection register with current level (CSSELR)
isb // isb to sync the change to the CacheSizeID reg
mrs x12, ccsidr_el1 // reads current Cache Size ID register (CCSIDR)
and x2, x12, #0x7 // extract the line length field
add x2, x2, #4 // add 4 for the line length offset (log2 16 bytes)
mov x4, #0x400
sub x4, x4, #1
and x4, x4, x12, lsr #3 // x4 is the max number on the way size (right aligned)
clz w5, w4 // w5 is the bit position of the way size increment
mov x7, #0x00008000
sub x7, x7, #1
and x7, x7, x12, lsr #13 // x7 is the max number of the index size (right aligned)
Loop2:
mov x9, x4 // x9 working copy of the max way size (right aligned)
Loop3:
lsl x11, x9, x5
orr x0, x10, x11 // factor in the way number and cache number
lsl x11, x7, x2
orr x0, x0, x11 // factor in the index number
blr x1 // Goto requested cache operation
subs x9, x9, #1 // decrement the way number
b.ge Loop3
subs x7, x7, #1 // decrement the index
b.ge Loop2
L_Skip:
add x10, x10, #2 // increment the cache number
cmp x3, x10
b.gt Loop1
L_Finished:
dsb sy
isb
ldr x30, [sp], #0x10
ret
ASM_PFX(ArmDataMemoryBarrier):
dmb sy
ret
ASM_PFX(ArmDataSyncronizationBarrier):
ASM_PFX(ArmDrainWriteBuffer):
dsb sy
ret
ASM_PFX(ArmInstructionSynchronizationBarrier):
isb
ret
ASM_PFX(ArmWriteVBar):
EL1_OR_EL2_OR_EL3(x1)
1: msr vbar_el1, x0 // Set the Address of the EL1 Vector Table in the VBAR register
b 4f
2: msr vbar_el2, x0 // Set the Address of the EL2 Vector Table in the VBAR register
b 4f
3: msr vbar_el3, x0 // Set the Address of the EL3 Vector Table in the VBAR register
4: isb
ret
ASM_PFX(ArmEnableVFP):
// Check whether floating-point is implemented in the processor.
mov x1, x30 // Save LR
bl ArmReadIdPfr0 // Read EL1 Processor Feature Register (PFR0)
mov x30, x1 // Restore LR
ands x0, x0, #AARCH64_PFR0_FP// Extract bits indicating VFP implementation
cmp x0, #0 // VFP is implemented if '0'.
b.ne 4f // Exit if VFP not implemented.
// FVP is implemented.
// Make sure VFP exceptions are not trapped (to any exception level).
mrs x0, cpacr_el1 // Read EL1 Coprocessor Access Control Register (CPACR)
orr x0, x0, #CPACR_VFP_BITS // Disable FVP traps to EL1
msr cpacr_el1, x0 // Write back EL1 Coprocessor Access Control Register (CPACR)
mov x1, #AARCH64_CPTR_TFP // TFP Bit for trapping VFP Exceptions
EL1_OR_EL2_OR_EL3(x2)
1:ret // Not configurable in EL1
2:mrs x0, cptr_el2 // Disable VFP traps to EL2
bic x0, x0, x1
msr cptr_el2, x0
ret
3:mrs x0, cptr_el3 // Disable VFP traps to EL3
bic x0, x0, x1
msr cptr_el3, x0
4:ret
ASM_PFX(ArmCallWFI):
wfi
ret
ASM_PFX(ArmInvalidateInstructionAndDataTlb):
EL1_OR_EL2_OR_EL3(x0)
1: tlbi alle1
b 4f
2: tlbi alle2
b 4f
3: tlbi alle3
4: dsb sy
isb
ret
ASM_PFX(ArmReadMpidr):
mrs x0, mpidr_el1 // read EL1 MPIDR
ret
// Keep old function names for C compatibilty for now. Change later?
ASM_PFX(ArmReadTpidrurw):
mrs x0, tpidr_el0 // read tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
ret
// Keep old function names for C compatibilty for now. Change later?
ASM_PFX(ArmWriteTpidrurw):
msr tpidr_el0, x0 // write tpidr_el0 (v7 TPIDRURW) -> (v8 TPIDR_EL0)
ret
// Arch timers are mandatory on AArch64
ASM_PFX(ArmIsArchTimerImplemented):
mov x0, #1
ret
ASM_PFX(ArmReadIdPfr0):
mrs x0, id_aa64pfr0_el1 // Read ID_AA64PFR0 Register
ret
// Q: id_aa64pfr1_el1 not defined yet. What does this funtion want to access?
// A: used to setup arch timer. Check if we have security extensions, permissions to set stuff.
// See: ArmPkg/Library/ArmArchTimerLib/AArch64/ArmArchTimerLib.c
// Not defined yet, but stick in here for now, should read all zeros.
ASM_PFX(ArmReadIdPfr1):
mrs x0, id_aa64pfr1_el1 // Read ID_PFR1 Register
ret
// VOID ArmWriteHcr(UINTN Hcr)
ASM_PFX(ArmWriteHcr):
msr hcr_el2, x0 // Write the passed HCR value
ret
// UINTN ArmReadCurrentEL(VOID)
ASM_PFX(ArmReadCurrentEL):
mrs x0, CurrentEL
ret
dead:
b dead
ASM_FUNCTION_REMOVE_IF_UNREFERENCED

View File

@@ -0,0 +1,53 @@
/** @file
Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include <Base.h>
#include <Library/ArmLib.h>
#include <Library/DebugLib.h>
#include <Library/PcdLib.h>
#include "ArmLibPrivate.h"
VOID
EFIAPI
ArmCacheInformation (
OUT ARM_CACHE_INFO *CacheInfo
)
{
if (CacheInfo != NULL) {
CacheInfo->Type = ArmCacheType();
CacheInfo->Architecture = ArmCacheArchitecture();
CacheInfo->DataCachePresent = ArmDataCachePresent();
CacheInfo->DataCacheSize = ArmDataCacheSize();
CacheInfo->DataCacheAssociativity = ArmDataCacheAssociativity();
CacheInfo->DataCacheLineLength = ArmDataCacheLineLength();
CacheInfo->InstructionCachePresent = ArmInstructionCachePresent();
CacheInfo->InstructionCacheSize = ArmInstructionCacheSize();
CacheInfo->InstructionCacheAssociativity = ArmInstructionCacheAssociativity();
CacheInfo->InstructionCacheLineLength = ArmInstructionCacheLineLength();
}
}
VOID
EFIAPI
ArmSetAuxCrBit (
IN UINT32 Bits
)
{
UINT32 val = ArmReadAuxCr();
val |= Bits;
ArmWriteAuxCr(val);
}

View File

@@ -0,0 +1,82 @@
/** @file
Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#ifndef __ARM_LIB_PRIVATE_H__
#define __ARM_LIB_PRIVATE_H__
#define CACHE_SIZE_4_KB (3UL)
#define CACHE_SIZE_8_KB (4UL)
#define CACHE_SIZE_16_KB (5UL)
#define CACHE_SIZE_32_KB (6UL)
#define CACHE_SIZE_64_KB (7UL)
#define CACHE_SIZE_128_KB (8UL)
#define CACHE_ASSOCIATIVITY_DIRECT (0UL)
#define CACHE_ASSOCIATIVITY_4_WAY (2UL)
#define CACHE_ASSOCIATIVITY_8_WAY (3UL)
#define CACHE_PRESENT (0UL)
#define CACHE_NOT_PRESENT (1UL)
#define CACHE_LINE_LENGTH_32_BYTES (2UL)
#define SIZE_FIELD_TO_CACHE_SIZE(x) (((x) >> 6) & 0x0F)
#define SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(x) (((x) >> 3) & 0x07)
#define SIZE_FIELD_TO_CACHE_PRESENCE(x) (((x) >> 2) & 0x01)
#define SIZE_FIELD_TO_CACHE_LINE_LENGTH(x) (((x) >> 0) & 0x03)
#define DATA_CACHE_SIZE_FIELD(x) (((x) >> 12) & 0x0FFF)
#define INSTRUCTION_CACHE_SIZE_FIELD(x) (((x) >> 0) & 0x0FFF)
#define DATA_CACHE_SIZE(x) (SIZE_FIELD_TO_CACHE_SIZE(DATA_CACHE_SIZE_FIELD(x)))
#define DATA_CACHE_ASSOCIATIVITY(x) (SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(DATA_CACHE_SIZE_FIELD(x)))
#define DATA_CACHE_PRESENT(x) (SIZE_FIELD_TO_CACHE_PRESENCE(DATA_CACHE_SIZE_FIELD(x)))
#define DATA_CACHE_LINE_LENGTH(x) (SIZE_FIELD_TO_CACHE_LINE_LENGTH(DATA_CACHE_SIZE_FIELD(x)))
#define INSTRUCTION_CACHE_SIZE(x) (SIZE_FIELD_TO_CACHE_SIZE(INSTRUCTION_CACHE_SIZE_FIELD(x)))
#define INSTRUCTION_CACHE_ASSOCIATIVITY(x) (SIZE_FIELD_TO_CACHE_ASSOCIATIVITY(INSTRUCTION_CACHE_SIZE_FIELD(x)))
#define INSTRUCTION_CACHE_PRESENT(x) (SIZE_FIELD_TO_CACHE_PRESENCE(INSTRUCTION_CACHE_SIZE_FIELD(x)))
#define INSTRUCTION_CACHE_LINE_LENGTH(x) (SIZE_FIELD_TO_CACHE_LINE_LENGTH(INSTRUCTION_CACHE_SIZE_FIELD(x)))
#define CACHE_TYPE(x) (((x) >> 25) & 0x0F)
#define CACHE_TYPE_WRITE_BACK (0x0EUL)
#define CACHE_ARCHITECTURE(x) (((x) >> 24) & 0x01)
#define CACHE_ARCHITECTURE_UNIFIED (0UL)
#define CACHE_ARCHITECTURE_SEPARATE (1UL)
VOID
CPSRMaskInsert (
IN UINT32 Mask,
IN UINT32 Value
);
UINT32
CPSRRead (
VOID
);
UINT32
ReadCCSIDR (
IN UINT32 CSSELR
);
UINT32
ReadCLIDR (
VOID
);
#endif // __ARM_LIB_PRIVATE_H__

View File

@@ -0,0 +1,127 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
# Copyright (c) 2011 - 2013, ARM Limited. All rights reserved.
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#------------------------------------------------------------------------------
#include <AsmMacroIoLib.h>
.text
.align 3
GCC_ASM_EXPORT (ArmIsMpCore)
GCC_ASM_EXPORT (ArmEnableAsynchronousAbort)
GCC_ASM_EXPORT (ArmDisableAsynchronousAbort)
GCC_ASM_EXPORT (ArmEnableIrq)
GCC_ASM_EXPORT (ArmDisableIrq)
GCC_ASM_EXPORT (ArmEnableFiq)
GCC_ASM_EXPORT (ArmDisableFiq)
GCC_ASM_EXPORT (ArmEnableInterrupts)
GCC_ASM_EXPORT (ArmDisableInterrupts)
GCC_ASM_EXPORT (ArmDisableAllExceptions)
GCC_ASM_EXPORT (ReadCCSIDR)
GCC_ASM_EXPORT (ReadCLIDR)
#------------------------------------------------------------------------------
.set MPIDR_U_BIT, (30)
.set MPIDR_U_MASK, (1 << MPIDR_U_BIT)
.set DAIF_FIQ_BIT, (1 << 0)
.set DAIF_IRQ_BIT, (1 << 1)
.set DAIF_ABORT_BIT, (1 << 2)
.set DAIF_DEBUG_BIT, (1 << 3)
.set DAIF_INT_BITS, (DAIF_FIQ_BIT | DAIF_IRQ_BIT)
.set DAIF_ALL, (DAIF_DEBUG_BIT | DAIF_ABORT_BIT | DAIF_INT_BITS)
ASM_PFX(ArmIsMpCore):
mrs x0, mpidr_el1 // Read EL1 Mutliprocessor Affinty Reg (MPIDR)
and x0, x0, #MPIDR_U_MASK // U Bit clear, the processor is part of a multiprocessor system
lsr x0, x0, #MPIDR_U_BIT
eor x0, x0, #1
ret
ASM_PFX(ArmEnableAsynchronousAbort):
msr daifclr, #DAIF_ABORT_BIT
isb
ret
ASM_PFX(ArmDisableAsynchronousAbort):
msr daifset, #DAIF_ABORT_BIT
isb
ret
ASM_PFX(ArmEnableIrq):
msr daifclr, #DAIF_IRQ_BIT
isb
ret
ASM_PFX(ArmDisableIrq):
msr daifset, #DAIF_IRQ_BIT
isb
ret
ASM_PFX(ArmEnableFiq):
msr daifclr, #DAIF_FIQ_BIT
isb
ret
ASM_PFX(ArmDisableFiq):
msr daifset, #DAIF_FIQ_BIT
isb
ret
ASM_PFX(ArmEnableInterrupts):
msr daifclr, #DAIF_INT_BITS
isb
ret
ASM_PFX(ArmDisableInterrupts):
msr daifset, #DAIF_INT_BITS
isb
ret
ASM_PFX(ArmDisableAllExceptions):
msr daifset, #DAIF_ALL
isb
ret
// UINT32
// ReadCCSIDR (
// IN UINT32 CSSELR
// )
ASM_PFX(ReadCCSIDR):
msr csselr_el1, x0 // Write Cache Size Selection Register (CSSELR)
isb
mrs x0, ccsidr_el1 // Read current Cache Size ID Register (CCSIDR)
ret
// UINT32
// ReadCLIDR (
// IN UINT32 CSSELR
// )
ASM_PFX(ReadCLIDR):
mrs x0, clidr_el1 // Read Cache Level ID Register
ret
ASM_FUNCTION_REMOVE_IF_UNREFERENCED

View File

@@ -21,7 +21,7 @@
#include <Library/DebugLib.h>
#include "ArmV7Lib.h"
#include "ArmLibPrivate.h"
#include <Library/ArmV7ArchTimerLib.h>
#include <Library/ArmArchTimerLib.h>
VOID
EFIAPI

View File

@@ -31,6 +31,9 @@
../Common/Arm/ArmLibSupport.S | GCC
../Common/Arm/ArmLibSupport.asm | RVCT
[Sources.AARCH64]
../Common/AArch64/ArmLibSupport.S | GCC
[Packages]
ArmPkg/ArmPkg.dec
MdePkg/MdePkg.dec

View File

@@ -0,0 +1,78 @@
//
// Copyright (c) 2012-2013, ARM Limited. All rights reserved.
//
// This program and the accompanying materials
// are licensed and made available under the terms and conditions of the BSD License
// which accompanies this distribution. The full text of the license may be found at
// http://opensource.org/licenses/bsd-license.php
//
// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
//
//
.text
.align 3
GCC_ASM_EXPORT(ArmCallSmc)
GCC_ASM_EXPORT(ArmCallSmcArg1)
GCC_ASM_EXPORT(ArmCallSmcArg2)
GCC_ASM_EXPORT(ArmCallSmcArg3)
ASM_PFX(ArmCallSmc):
str x1, [sp, #-0x10]!
mov x1, x0
ldr x0,[x1]
smc #0
str x0,[x1]
ldr x1, [sp], #0x10
ret
ASM_PFX(ArmCallSmcArg1):
stp x2, x3, [sp, #-0x10]!
mov x2, x0
mov x3, x1
ldr x0,[x2]
ldr x1,[x3]
smc #0
str x0,[x2]
str x1,[x3]
ldp x2, x3, [sp], #0x10
ret
ASM_PFX(ArmCallSmcArg2):
stp x3, x4, [sp, #-0x10]!
str x5, [sp, #-8]!
mov x3, x0
mov x4, x1
mov x5, x2
ldr x0,[x3]
ldr x1,[x4]
ldr x2,[x5]
smc #0
str x0,[x3]
str x1,[x4]
str x2,[x5]
ldr x5, [sp], #8
ldp x3, x4, [sp], #0x10
ret
ASM_PFX(ArmCallSmcArg3):
stp x4, x5, [sp, #-0x10]!
stp x6, x7, [sp, #-0x10]!
mov x4, x0
mov x5, x1
mov x6, x2
mov x7, x3
ldr x0,[x4]
ldr x1,[x5]
ldr x2,[x6]
ldr x3,[x7]
smc #0
str x0,[x4]
str x1,[x5]
str x2,[x6]
str x3,[x7]
ldp x4, x5, [sp], #0x10
ldp x6, x7, [sp], #0x10
ret

View File

@@ -23,6 +23,9 @@
Arm/ArmSmc.asm | RVCT
Arm/ArmSmc.S | GCC
[Sources.AARCH64]
AArch64/ArmSmc.S | GCC
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec

View File

@@ -0,0 +1,32 @@
//
// Copyright (c) 2012-2013, ARM Limited. All rights reserved.
//
// This program and the accompanying materials
// are licensed and made available under the terms and conditions of the BSD License
// which accompanies this distribution. The full text of the license may be found at
// http://opensource.org/licenses/bsd-license.php
//
// THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
// WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
//
//
.text
.align 3
GCC_ASM_EXPORT(ArmCallSmc)
GCC_ASM_EXPORT(ArmCallSmcArg1)
GCC_ASM_EXPORT(ArmCallSmcArg2)
GCC_ASM_EXPORT(ArmCallSmcArg3)
ASM_PFX(ArmCallSmc):
ret
ASM_PFX(ArmCallSmcArg1):
ret
ASM_PFX(ArmCallSmcArg2):
ret
ASM_PFX(ArmCallSmcArg3):
ret

View File

@@ -26,6 +26,9 @@
Arm/ArmSmcNull.asm | RVCT
Arm/ArmSmcNull.S | GCC
[Sources.AARCH64]
AArch64/ArmSmcNull.S | GCC
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec

View File

@@ -0,0 +1,146 @@
/** @file
Copyright (c) 2012-2013, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "MemLibInternals.h"
/**
Copy Length bytes from Source to Destination.
@param DestinationBuffer Target of copy
@param SourceBuffer Place to copy from
@param Length Number of bytes to copy
@return Destination
**/
VOID *
EFIAPI
InternalMemCopyMem (
OUT VOID *DestinationBuffer,
IN CONST VOID *SourceBuffer,
IN UINTN Length
)
{
//
// Declare the local variables that actually move the data elements as
// volatile to prevent the optimizer from replacing this function with
// the intrinsic memcpy()
//
volatile UINT8 *Destination8;
CONST UINT8 *Source8;
volatile UINT32 *Destination32;
CONST UINT32 *Source32;
volatile UINT64 *Destination64;
CONST UINT64 *Source64;
UINTN Alignment;
if ((((UINTN)DestinationBuffer & 0x7) == 0) && (((UINTN)SourceBuffer & 0x7) == 0) && (Length >= 8)) {
if (SourceBuffer > DestinationBuffer) {
Destination64 = (UINT64*)DestinationBuffer;
Source64 = (CONST UINT64*)SourceBuffer;
while (Length >= 8) {
*(Destination64++) = *(Source64++);
Length -= 8;
}
// Finish if there are still some bytes to copy
Destination8 = (UINT8*)Destination64;
Source8 = (CONST UINT8*)Source64;
while (Length-- != 0) {
*(Destination8++) = *(Source8++);
}
} else if (SourceBuffer < DestinationBuffer) {
Destination64 = (UINT64*)((UINTN)DestinationBuffer + Length);
Source64 = (CONST UINT64*)((UINTN)SourceBuffer + Length);
// Destination64 and Source64 were aligned on a 64-bit boundary
// but if length is not a multiple of 8 bytes then they won't be
// anymore.
Alignment = Length & 0x7;
if (Alignment != 0) {
Destination8 = (UINT8*)Destination64;
Source8 = (CONST UINT8*)Source64;
while (Alignment-- != 0) {
*(--Destination8) = *(--Source8);
--Length;
}
Destination64 = (UINT64*)Destination8;
Source64 = (CONST UINT64*)Source8;
}
while (Length > 0) {
*(--Destination64) = *(--Source64);
Length -= 8;
}
}
} else if ((((UINTN)DestinationBuffer & 0x3) == 0) && (((UINTN)SourceBuffer & 0x3) == 0) && (Length >= 4)) {
if (SourceBuffer > DestinationBuffer) {
Destination32 = (UINT32*)DestinationBuffer;
Source32 = (CONST UINT32*)SourceBuffer;
while (Length >= 4) {
*(Destination32++) = *(Source32++);
Length -= 4;
}
// Finish if there are still some bytes to copy
Destination8 = (UINT8*)Destination32;
Source8 = (CONST UINT8*)Source32;
while (Length-- != 0) {
*(Destination8++) = *(Source8++);
}
} else if (SourceBuffer < DestinationBuffer) {
Destination32 = (UINT32*)((UINTN)DestinationBuffer + Length);
Source32 = (CONST UINT32*)((UINTN)SourceBuffer + Length);
// Destination32 and Source32 were aligned on a 32-bit boundary
// but if length is not a multiple of 4 bytes then they won't be
// anymore.
Alignment = Length & 0x3;
if (Alignment != 0) {
Destination8 = (UINT8*)Destination32;
Source8 = (CONST UINT8*)Source32;
while (Alignment-- != 0) {
*(--Destination8) = *(--Source8);
--Length;
}
Destination32 = (UINT32*)Destination8;
Source32 = (CONST UINT32*)Source8;
}
while (Length > 0) {
*(--Destination32) = *(--Source32);
Length -= 4;
}
}
} else {
if (SourceBuffer > DestinationBuffer) {
Destination8 = (UINT8*)DestinationBuffer;
Source8 = (CONST UINT8*)SourceBuffer;
while (Length-- != 0) {
*(Destination8++) = *(Source8++);
}
} else if (SourceBuffer < DestinationBuffer) {
Destination8 = (UINT8*)DestinationBuffer + Length;
Source8 = (CONST UINT8*)SourceBuffer + Length;
while (Length-- != 0) {
*(--Destination8) = *(--Source8);
}
}
}
return DestinationBuffer;
}

View File

@@ -0,0 +1,84 @@
/** @file
Copyright (c) 2012-2013, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "MemLibInternals.h"
/**
Set Buffer to Value for Size bytes.
@param Buffer Memory to set.
@param Length Number of bytes to set
@param Value Value of the set operation.
@return Buffer
**/
VOID *
EFIAPI
InternalMemSetMem (
OUT VOID *Buffer,
IN UINTN Length,
IN UINT8 Value
)
{
//
// Declare the local variables that actually move the data elements as
// volatile to prevent the optimizer from replacing this function with
// the intrinsic memset()
//
volatile UINT8 *Pointer8;
volatile UINT32 *Pointer32;
volatile UINT64 *Pointer64;
UINT32 Value32;
UINT64 Value64;
if ((((UINTN)Buffer & 0x7) == 0) && (Length >= 8)) {
// Generate the 64bit value
Value32 = (Value << 24) | (Value << 16) | (Value << 8) | Value;
Value64 = (((UINT64)Value32) << 32) | Value32;
Pointer64 = (UINT64*)Buffer;
while (Length >= 8) {
*(Pointer64++) = Value64;
Length -= 8;
}
// Finish with bytes if needed
Pointer8 = (UINT8*)Pointer64;
while (Length-- > 0) {
*(Pointer8++) = Value;
}
} else if ((((UINTN)Buffer & 0x3) == 0) && (Length >= 4)) {
// Generate the 32bit value
Value32 = (Value << 24) | (Value << 16) | (Value << 8) | Value;
Pointer32 = (UINT32*)Buffer;
while (Length >= 4) {
*(Pointer32++) = Value32;
Length -= 4;
}
// Finish with bytes if needed
Pointer8 = (UINT8*)Pointer32;
while (Length-- > 0) {
*(Pointer8++) = Value;
}
} else {
Pointer8 = (UINT8*)Buffer;
while (Length-- > 0) {
*(Pointer8++) = Value;
}
}
return Buffer;
}

View File

@@ -7,6 +7,7 @@
#
# Copyright (c) 2007 - 2010, Intel Corporation. All rights reserved.<BR>
# Portions copyright (c) 2010, Apple Inc. All rights reserved.<BR>
# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -28,7 +29,7 @@
#
# VALID_ARCHITECTURES = ARM
# VALID_ARCHITECTURES = ARM AARCH64
#
@@ -54,6 +55,9 @@
Arm/SetMem.asm
Arm/SetMem.S
[Sources.AARCH64]
AArch64/CopyMem.c
AArch64/SetMem.c
[Packages]
MdePkg/MdePkg.dec

View File

@@ -0,0 +1,129 @@
/** @file
Specific relocation fixups for ARM architecture.
Copyright (c) 2006 - 2009, Intel Corporation. All rights reserved.<BR>
Portions copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php.
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include "BasePeCoffLibInternals.h"
#include <Library/BaseLib.h>
//
// Note: Currently only large memory model is supported by UEFI relocation code.
//
/**
Performs an AARCH64-based specific relocation fixup and is a no-op on other
instruction sets.
@param Reloc The pointer to the relocation record.
@param Fixup The pointer to the address to fix up.
@param FixupData The pointer to a buffer to log the fixups.
@param Adjust The offset to adjust the fixup.
@return Status code.
**/
RETURN_STATUS
PeCoffLoaderRelocateImageEx (
IN UINT16 **Reloc,
IN OUT CHAR8 *Fixup,
IN OUT CHAR8 **FixupData,
IN UINT64 Adjust
)
{
UINT64 *F64;
switch ((**Reloc) >> 12) {
case EFI_IMAGE_REL_BASED_DIR64:
F64 = (UINT64 *) Fixup;
*F64 = *F64 + (UINT64) Adjust;
if (*FixupData != NULL) {
*FixupData = ALIGN_POINTER(*FixupData, sizeof(UINT64));
*(UINT64 *)(*FixupData) = *F64;
*FixupData = *FixupData + sizeof(UINT64);
}
break;
default:
return RETURN_UNSUPPORTED;
}
return RETURN_SUCCESS;
}
/**
Returns TRUE if the machine type of PE/COFF image is supported. Supported
does not mean the image can be executed it means the PE/COFF loader supports
loading and relocating of the image type. It's up to the caller to support
the entry point.
@param Machine Machine type from the PE Header.
@return TRUE if this PE/COFF loader can load the image
**/
BOOLEAN
PeCoffLoaderImageFormatSupported (
IN UINT16 Machine
)
{
if ((Machine == IMAGE_FILE_MACHINE_AARCH64) || (Machine == IMAGE_FILE_MACHINE_EBC)) {
return TRUE;
}
return FALSE;
}
/**
Performs an ARM-based specific re-relocation fixup and is a no-op on other
instruction sets. This is used to re-relocated the image into the EFI virtual
space for runtime calls.
@param Reloc The pointer to the relocation record.
@param Fixup The pointer to the address to fix up.
@param FixupData The pointer to a buffer to log the fixups.
@param Adjust The offset to adjust the fixup.
@return Status code.
**/
RETURN_STATUS
PeHotRelocateImageEx (
IN UINT16 **Reloc,
IN OUT CHAR8 *Fixup,
IN OUT CHAR8 **FixupData,
IN UINT64 Adjust
)
{
UINT64 *Fixup64;
switch ((**Reloc) >> 12) {
case EFI_IMAGE_REL_BASED_DIR64:
Fixup64 = (UINT64 *) Fixup;
*FixupData = ALIGN_POINTER (*FixupData, sizeof (UINT64));
if (*(UINT64 *) (*FixupData) == *Fixup64) {
*Fixup64 = *Fixup64 + (UINT64) Adjust;
}
*FixupData = *FixupData + sizeof (UINT64);
break;
default:
DEBUG ((EFI_D_ERROR, "PeHotRelocateEx:unknown fixed type\n"));
return RETURN_UNSUPPORTED;
}
return RETURN_SUCCESS;
}

View File

@@ -6,6 +6,7 @@
#
# Copyright (c) 2006 - 2010, Intel Corporation. All rights reserved.<BR>
# Portions copyright (c) 2008 - 2009, Apple Inc. All rights reserved.<BR>
# Portions copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -27,7 +28,7 @@
#
# VALID_ARCHITECTURES = IA32 X64 IPF EBC ARM
# VALID_ARCHITECTURES = IA32 X64 IPF EBC ARM AARCH64
#
[Sources]
@@ -43,6 +44,9 @@
[Sources.ARM]
Arm/PeCoffLoaderEx.c
[Sources.AARCH64]
AArch64/PeCoffLoaderEx.c
[Packages]
MdePkg/MdePkg.dec

View File

@@ -336,7 +336,7 @@ PrepareFdt (
INTN err;
INTN node;
INTN cpu_node;
INTN lenp;
INT32 lenp;
CONST VOID* BootArg;
CONST VOID* Method;
EFI_PHYSICAL_ADDRESS InitrdImageStart;

View File

@@ -0,0 +1,125 @@
/*
* Copyright (c) 2011 - 2013, ARM Ltd
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the company may not be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
* TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
.text
.align 2
ASM_GLOBAL ASM_PFX(memcpy)
// Taken from Newlib BSD implementation.
ASM_PFX(memcpy):
// Copy dst to x6, so we can preserve return value.
mov x6, x0
// NOTE: although size_t is unsigned, this code uses signed
// comparisons on x2 so relies on nb never having its top bit
// set. In practice this is not going to be a real problem.
// Require at least 64 bytes to be worth aligning.
cmp x2, #64
blt qwordcopy
// Compute offset to align destination to 16 bytes.
neg x3, x0
and x3, x3, 15
cbz x3, blockcopy // offset == 0 is likely
// We know there is at least 64 bytes to be done, so we
// do a 16 byte misaligned copy at first and then later do
// all 16-byte aligned copies. Some bytes will be copied
// twice, but there's no harm in that since memcpy does not
// guarantee correctness on overlap.
sub x2, x2, x3 // nb -= offset
ldp x4, x5, [x1]
add x1, x1, x3
stp x4, x5, [x6]
add x6, x6, x3
// The destination pointer is now qword (16 byte) aligned.
// (The src pointer might be.)
blockcopy:
// Copy 64 bytes at a time.
subs x2, x2, #64
blt 3f
2: subs x2, x2, #64
ldp x4, x5, [x1,#0]
ldp x8, x9, [x1,#16]
ldp x10,x11,[x1,#32]
ldp x12,x13,[x1,#48]
add x1, x1, #64
stp x4, x5, [x6,#0]
stp x8, x9, [x6,#16]
stp x10,x11,[x6,#32]
stp x12,x13,[x6,#48]
add x6, x6, #64
bge 2b
// Unwind pre-decrement
3: add x2, x2, #64
qwordcopy:
// Copy 0-48 bytes, 16 bytes at a time.
subs x2, x2, #16
blt tailcopy
2: ldp x4, x5, [x1],#16
subs x2, x2, #16
stp x4, x5, [x6],#16
bge 2b
// No need to unwind the pre-decrement, it would not change
// the low 4 bits of the count. But how likely is it for the
// byte count to be multiple of 16? Is it worth the overhead
// of testing for x2 == -16?
tailcopy:
// Copy trailing 0-15 bytes.
tbz x2, #3, 1f
ldr x4, [x1],#8 // copy 8 bytes
str x4, [x6],#8
1:
tbz x2, #2, 1f
ldr w4, [x1],#4 // copy 4 bytes
str w4, [x6],#4
1:
tbz x2, #1, 1f
ldrh w4, [x1],#2 // copy 2 bytes
strh w4, [x6],#2
1:
tbz x2, #0, return
ldrb w4, [x1] // copy 1 byte
strb w4, [x6]
return:
// This is the only return point of memcpy.
ret

View File

@@ -22,9 +22,8 @@
VERSION_STRING = 1.0
LIBRARY_CLASS = CompilerIntrinsicsLib
[Sources.common]
[Sources.AARCH64]
AArch64/memcpy.S | GCC
[Sources.ARM]
Arm/mullu.asm | RVCT

View File

@@ -0,0 +1,93 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#------------------------------------------------------------------------------
GCC_ASM_EXPORT(DebugAgentVectorTable)
GCC_ASM_IMPORT(DefaultExceptionHandler)
.text
ASM_PFX(DebugAgentVectorTable):
//
// Current EL with SP0 : 0x0 - 0x180
//
.align 11
ASM_PFX(SynchronousExceptionSP0):
b ASM_PFX(SynchronousExceptionSP0)
.align 7
ASM_PFX(IrqSP0):
b ASM_PFX(IrqSP0)
.align 7
ASM_PFX(FiqSP0):
b ASM_PFX(FiqSP0)
.align 7
ASM_PFX(SErrorSP0):
b ASM_PFX(SErrorSP0)
//
// Current EL with SPx: 0x200 - 0x380
//
.align 7
ASM_PFX(SynchronousExceptionSPx):
b ASM_PFX(SynchronousExceptionSPx)
.align 7
ASM_PFX(IrqSPx):
b ASM_PFX(IrqSPx)
.align 7
ASM_PFX(FiqSPx):
b ASM_PFX(FiqSPx)
.align 7
ASM_PFX(SErrorSPx):
b ASM_PFX(SErrorSPx)
/* Lower EL using AArch64 : 0x400 - 0x580 */
.align 7
ASM_PFX(SynchronousExceptionA64):
b ASM_PFX(SynchronousExceptionA64)
.align 7
ASM_PFX(IrqA64):
b ASM_PFX(IrqA64)
.align 7
ASM_PFX(FiqA64):
b ASM_PFX(FiqA64)
.align 7
ASM_PFX(SErrorA64):
b ASM_PFX(SErrorA64)
//
// Lower EL using AArch32 : 0x0 - 0x180
//
.align 7
ASM_PFX(SynchronousExceptionA32):
b ASM_PFX(SynchronousExceptionA32)
.align 7
ASM_PFX(IrqA32):
b ASM_PFX(IrqA32)
.align 7
ASM_PFX(FiqA32):
b ASM_PFX(FiqA32)
.align 7
ASM_PFX(SErrorA32):
b ASM_PFX(SErrorA32)

View File

@@ -285,7 +285,7 @@ InitializeDebugAgent (
// Now we've got UART, make the check:
// - The Vector table must be 32-byte aligned
ASSERT(((UINTN)DebugAgentVectorTable & ARM_VECTOR_TABLE_ALIGNMENT) == 0);
//Need to fix basetools ASSERT(((UINTN)DebugAgentVectorTable & ARM_VECTOR_TABLE_ALIGNMENT) == 0);
ArmWriteVBar ((UINTN)DebugAgentVectorTable);
// We use InitFlag to know if DebugAgent has been intialized from

View File

@@ -26,6 +26,9 @@
Arm/DebugAgentException.asm | RVCT
Arm/DebugAgentException.S | GCC
[Sources.AARCH64]
AArch64/DebugAgentException.S | GCC
[Packages]
MdePkg/MdePkg.dec
MdeModulePkg/MdeModulePkg.dec

View File

@@ -0,0 +1,112 @@
/** @file
Default exception handler
Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
**/
#include <Uefi.h>
#include <Library/UefiLib.h>
#include <Library/BaseLib.h>
#include <Library/DebugLib.h>
#include <Library/PeCoffGetEntryPointLib.h>
#include <Library/PrintLib.h>
#include <Library/ArmDisassemblerLib.h>
#include <Library/SerialPortLib.h>
#include <Guid/DebugImageInfoTable.h>
#include <Protocol/DebugSupport.h>
#include <Protocol/LoadedImage.h>
EFI_DEBUG_IMAGE_INFO_TABLE_HEADER *gDebugImageTableHeader = NULL;
STATIC CHAR8 *gExceptionTypeString[] = {
"Synchronous",
"IRQ",
"FIQ",
"SError"
};
CHAR8 *
GetImageName (
IN UINTN FaultAddress,
OUT UINTN *ImageBase,
OUT UINTN *PeCoffSizeOfHeaders
);
/**
This is the default action to take on an unexpected exception
Since this is exception context don't do anything crazy like try to allcoate memory.
@param ExceptionType Type of the exception
@param SystemContext Register state at the time of the Exception
**/
VOID
DefaultExceptionHandler (
IN EFI_EXCEPTION_TYPE ExceptionType,
IN OUT EFI_SYSTEM_CONTEXT SystemContext
)
{
CHAR8 Buffer[100];
UINTN CharCount;
CharCount = AsciiSPrint (Buffer,sizeof (Buffer),"\n\n%a Exception: \n", gExceptionTypeString[ExceptionType]);
SerialPortWrite ((UINT8 *) Buffer, CharCount);
DEBUG_CODE_BEGIN ();
CHAR8 *Pdb;
UINTN ImageBase;
UINTN PeCoffSizeOfHeader;
Pdb = GetImageName (SystemContext.SystemContextAArch64->ELR, &ImageBase, &PeCoffSizeOfHeader);
if (Pdb != NULL) {
DEBUG ((EFI_D_ERROR, "%a loaded at 0x%016lx \n", Pdb, ImageBase));
}
DEBUG_CODE_END ();
DEBUG ((EFI_D_ERROR, "\n X0 0x%016lx X1 0x%016lx X2 0x%016lx X3 0x%016lx\n", SystemContext.SystemContextAArch64->X0, SystemContext.SystemContextAArch64->X1, SystemContext.SystemContextAArch64->X2, SystemContext.SystemContextAArch64->X3));
DEBUG ((EFI_D_ERROR, " X4 0x%016lx X5 0x%016lx X6 0x%016lx X7 0x%016lx\n", SystemContext.SystemContextAArch64->X4, SystemContext.SystemContextAArch64->X5, SystemContext.SystemContextAArch64->X6, SystemContext.SystemContextAArch64->X7));
DEBUG ((EFI_D_ERROR, " X8 0x%016lx X9 0x%016lx X10 0x%016lx X11 0x%016lx\n", SystemContext.SystemContextAArch64->X8, SystemContext.SystemContextAArch64->X9, SystemContext.SystemContextAArch64->X10, SystemContext.SystemContextAArch64->X11));
DEBUG ((EFI_D_ERROR, " X12 0x%016lx X13 0x%016lx X14 0x%016lx X15 0x%016lx\n", SystemContext.SystemContextAArch64->X12, SystemContext.SystemContextAArch64->X13, SystemContext.SystemContextAArch64->X14, SystemContext.SystemContextAArch64->X15));
DEBUG ((EFI_D_ERROR, " X16 0x%016lx X17 0x%016lx X18 0x%016lx X19 0x%016lx\n", SystemContext.SystemContextAArch64->X16, SystemContext.SystemContextAArch64->X17, SystemContext.SystemContextAArch64->X18, SystemContext.SystemContextAArch64->X19));
DEBUG ((EFI_D_ERROR, " X20 0x%016lx X21 0x%016lx X22 0x%016lx X23 0x%016lx\n", SystemContext.SystemContextAArch64->X20, SystemContext.SystemContextAArch64->X21, SystemContext.SystemContextAArch64->X22, SystemContext.SystemContextAArch64->X23));
DEBUG ((EFI_D_ERROR, " X24 0x%016lx X25 0x%016lx X26 0x%016lx X27 0x%016lx\n", SystemContext.SystemContextAArch64->X24, SystemContext.SystemContextAArch64->X25, SystemContext.SystemContextAArch64->X26, SystemContext.SystemContextAArch64->X27));
DEBUG ((EFI_D_ERROR, " X28 0x%016lx FP 0x%016lx LR 0x%016lx \n", SystemContext.SystemContextAArch64->X28, SystemContext.SystemContextAArch64->FP, SystemContext.SystemContextAArch64->LR));
/* We save these as 128bit numbers, but have to print them as two 64bit numbers,
so swap the 64bit words to correctly represent a 128bit number. */
DEBUG ((EFI_D_ERROR, "\n V0 0x%016lx %016lx V1 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V0[1], SystemContext.SystemContextAArch64->V0[0], SystemContext.SystemContextAArch64->V1[1], SystemContext.SystemContextAArch64->V1[0]));
DEBUG ((EFI_D_ERROR, " V2 0x%016lx %016lx V3 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V2[1], SystemContext.SystemContextAArch64->V2[0], SystemContext.SystemContextAArch64->V3[1], SystemContext.SystemContextAArch64->V3[0]));
DEBUG ((EFI_D_ERROR, " V4 0x%016lx %016lx V5 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V4[1], SystemContext.SystemContextAArch64->V4[0], SystemContext.SystemContextAArch64->V5[1], SystemContext.SystemContextAArch64->V5[0]));
DEBUG ((EFI_D_ERROR, " V6 0x%016lx %016lx V7 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V6[1], SystemContext.SystemContextAArch64->V6[0], SystemContext.SystemContextAArch64->V7[1], SystemContext.SystemContextAArch64->V7[0]));
DEBUG ((EFI_D_ERROR, " V8 0x%016lx %016lx V9 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V8[1], SystemContext.SystemContextAArch64->V8[0], SystemContext.SystemContextAArch64->V9[1], SystemContext.SystemContextAArch64->V9[0]));
DEBUG ((EFI_D_ERROR, " V10 0x%016lx %016lx V11 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V10[1], SystemContext.SystemContextAArch64->V10[0], SystemContext.SystemContextAArch64->V11[1], SystemContext.SystemContextAArch64->V11[0]));
DEBUG ((EFI_D_ERROR, " V12 0x%016lx %016lx V13 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V12[1], SystemContext.SystemContextAArch64->V12[0], SystemContext.SystemContextAArch64->V13[1], SystemContext.SystemContextAArch64->V13[0]));
DEBUG ((EFI_D_ERROR, " V14 0x%016lx %016lx V15 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V14[1], SystemContext.SystemContextAArch64->V14[0], SystemContext.SystemContextAArch64->V15[1], SystemContext.SystemContextAArch64->V15[0]));
DEBUG ((EFI_D_ERROR, " V16 0x%016lx %016lx V17 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V16[1], SystemContext.SystemContextAArch64->V16[0], SystemContext.SystemContextAArch64->V17[1], SystemContext.SystemContextAArch64->V17[0]));
DEBUG ((EFI_D_ERROR, " V18 0x%016lx %016lx V19 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V18[1], SystemContext.SystemContextAArch64->V18[0], SystemContext.SystemContextAArch64->V19[1], SystemContext.SystemContextAArch64->V19[0]));
DEBUG ((EFI_D_ERROR, " V20 0x%016lx %016lx V21 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V20[1], SystemContext.SystemContextAArch64->V20[0], SystemContext.SystemContextAArch64->V21[1], SystemContext.SystemContextAArch64->V21[0]));
DEBUG ((EFI_D_ERROR, " V22 0x%016lx %016lx V23 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V22[1], SystemContext.SystemContextAArch64->V22[0], SystemContext.SystemContextAArch64->V23[1], SystemContext.SystemContextAArch64->V23[0]));
DEBUG ((EFI_D_ERROR, " V24 0x%016lx %016lx V25 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V24[1], SystemContext.SystemContextAArch64->V24[0], SystemContext.SystemContextAArch64->V25[1], SystemContext.SystemContextAArch64->V25[0]));
DEBUG ((EFI_D_ERROR, " V26 0x%016lx %016lx V27 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V26[1], SystemContext.SystemContextAArch64->V26[0], SystemContext.SystemContextAArch64->V27[1], SystemContext.SystemContextAArch64->V27[0]));
DEBUG ((EFI_D_ERROR, " V28 0x%016lx %016lx V29 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V28[1], SystemContext.SystemContextAArch64->V28[0], SystemContext.SystemContextAArch64->V29[1], SystemContext.SystemContextAArch64->V29[0]));
DEBUG ((EFI_D_ERROR, " V30 0x%016lx %016lx V31 0x%016lx %016lx\n", SystemContext.SystemContextAArch64->V30[1], SystemContext.SystemContextAArch64->V30[0], SystemContext.SystemContextAArch64->V31[1], SystemContext.SystemContextAArch64->V31[0]));
DEBUG ((EFI_D_ERROR, "\n SP 0x%016lx ELR 0x%016lx SPSR 0x%08lx FPSR 0x%08lx\n ESR 0x%08lx FAR 0x%016lx\n", SystemContext.SystemContextAArch64->SP, SystemContext.SystemContextAArch64->ELR, SystemContext.SystemContextAArch64->SPSR, SystemContext.SystemContextAArch64->FPSR, SystemContext.SystemContextAArch64->ESR, SystemContext.SystemContextAArch64->FAR));
DEBUG ((EFI_D_ERROR, "\n ESR : EC 0x%02x IL 0x%x ISS 0x%08x\n", (SystemContext.SystemContextAArch64->ESR & 0xFC000000) >> 26, (SystemContext.SystemContextAArch64->ESR >> 25) & 0x1, SystemContext.SystemContextAArch64->ESR & 0x1FFFFFF ));
DEBUG ((EFI_D_ERROR, "\n"));
ASSERT (FALSE);
}

View File

@@ -1,6 +1,7 @@
#/** @file
#
# Copyright (c) 2008, Apple Inc. All rights reserved.<BR>
# Copyright (c) 2008, Apple Inc. All rights reserved.<BR>
# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -27,6 +28,9 @@
[Sources.ARM]
Arm/DefaultExceptionHandler.c
[Sources.AARCH64]
AArch64/DefaultExceptionHandler.c
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec

View File

@@ -26,6 +26,9 @@
[Sources.ARM]
Arm/DefaultExceptionHandler.c
[Sources.AARCH64]
AArch64/DefaultExceptionHandler.c
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec

View File

@@ -0,0 +1,23 @@
#------------------------------------------------------------------------------
#
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
#------------------------------------------------------------------------------
.text
.align 2
.globl ASM_PFX(GccSemihostCall)
ASM_PFX(GccSemihostCall):
hlt #0xf000
ret

View File

@@ -2,6 +2,7 @@
# Semihosting JTAG lib
#
# Copyright (c) 2008 - 2010, Apple Inc. All rights reserved.<BR>
# Copyright (c) 2011 - 2013, ARM Ltd. All rights reserved.<BR>
#
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
@@ -25,7 +26,7 @@
#
# The following information is for reference only and not required by the build tools.
#
# VALID_ARCHITECTURES = ARM
# VALID_ARCHITECTURES = ARM AARCH64
#
[Sources.common]
SemihostLib.c
@@ -33,10 +34,13 @@
[Sources.ARM]
Arm/GccSemihost.S | GCC
[Sources.AARCH64]
AArch64/GccSemihost.S | GCC
[Packages]
MdePkg/MdePkg.dec
ArmPkg/ArmPkg.dec
[LibraryClasses]
BaseLib