arm: Thumb ALL the things!

This patch switches every last part of Coreboot on ARM over to Thumb
mode: libpayload, the internal libgcc, and assorted assembly files. In
combination with the respective depthcharge patch, this will switch to
Thumb mode right after the entry point of the bootblock and not switch
back to ARM until the final assembly stub that jumps to the kernel.

The required changes to make this work include some new headers and
Makefile flags to handle assembly files (using the unified syntax and
the same helper macros as Linux), modifying our custom-written libgcc
code for 64-bit division to support Thumb (removing some stale old files
that were never really used for clarity), and flipping the general
CFLAGS to Thumb (some more cleanup there as well while I'm at it).

BUG=None
TEST=Snow and Nyan still boot.

Original-Change-Id: I80c04281e3adbf74f9f477486a96b9fafeb455b3
Original-Signed-off-by: Julius Werner <jwerner@chromium.org>
Original-Reviewed-on: https://chromium-review.googlesource.com/182212
Original-Reviewed-by: Gabe Black <gabeblack@chromium.org>
(cherry picked from commit 5f65c17cbfae165a95354146ae79e06c512c2c5a)

Conflicts:
	payloads/libpayload/include/arm/arch/asm.h
	src/arch/arm/Makefile.inc
	src/arch/arm/armv7/Makefile.inc

*** There is an issue with what to do with ramstage-S-ccopts, and
*** will need to be covered in additional ARM cleanup patches.

Change-Id: I80c04281e3adbf74f9f477486a96b9fafeb455b3
Signed-off-by: Marc Jones <marc.jones@se-eng.com>
Reviewed-on: http://review.coreboot.org/6930
Tested-by: build bot (Jenkins)
Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
This commit is contained in:
Julius Werner
2014-01-13 13:24:30 -08:00
committed by Marc Jones
parent a38ccfdee1
commit 25a282dabc
10 changed files with 77 additions and 393 deletions

View File

@@ -1,214 +0,0 @@
/* Miscellaneous BPABI functions.
Copyright (C) 2003, 2004 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#if defined __GNUC__
#include <stdint.h>
uint64_t __udivmoddi4(uint64_t n, uint64_t d, uint64_t *rp);
extern int64_t __divdi3(int64_t, int64_t);
extern uint64_t __udivdi3(uint64_t, uint64_t);
extern int64_t __gnu_ldivmod_helper(int64_t, int64_t, int64_t *);
extern uint64_t __gnu_uldivmod_helper(uint64_t, uint64_t, uint64_t *);
typedef union
{
struct {
int32_t low;
int32_t high;
} s;
int64_t ll;
} DWunion;
uint64_t
__udivmoddi4(uint64_t n, uint64_t d, uint64_t *rp)
{
const DWunion nn = {.ll = n};
const DWunion dd = {.ll = d};
DWunion rr;
uint32_t d0, d1, n0, n1, n2;
uint32_t q0, q1;
uint32_t b, bm;
d0 = dd.s.low;
d1 = dd.s.high;
n0 = nn.s.low;
n1 = nn.s.high;
if (d1 == 0) {
if (d0 > n1) {
/* 0q = nn / 0D */
udiv_qrnnd(q0, n0, n1, n0, d0);
q1 = 0;
/* Remainder in n0. */
} else {
/* qq = NN / 0d */
if (d0 == 0)
d0 = 1 / d0; /* Divide intentionally by zero. */
udiv_qrnnd(q1, n1, 0, n1, d0);
udiv_qrnnd(q0, n0, n1, n0, d0);
/* Remainder in n0. */
}
if (rp != 0) {
rr.s.low = n0;
rr.s.high = 0;
*rp = rr.ll;
}
} else {
if (d1 > n1) {
/* 00 = nn / DD */
q0 = 0;
q1 = 0;
/* Remainder in n1n0. */
if (rp != 0) {
rr.s.low = n0;
rr.s.high = n1;
*rp = rr.ll;
}
} else {
/* 0q = NN / dd */
count_leading_zeros(bm, d1);
if (bm == 0) {
/* From (n1 >= d1) /\ (the most significant
bit of d1 is set), conclude (the most
significant bit of n1 is set) /\ (the
quotient digit q0 = 0 or 1).
This special case is necessary, not an
optimization. */
/* The condition on the next line takes
advantage of that n1 >= d1 (true due to
program flow). */
if (n1 > d1 || n0 >= d0) {
q0 = 1;
sub_ddmmss(n1, n0, n1, n0, d1, d0);
} else
q0 = 0;
q1 = 0;
if (rp != 0) {
rr.s.low = n0;
rr.s.high = n1;
*rp = rr.ll;
}
} else {
uint32_t m1, m0;
/* Normalize. */
b = 32 - bm;
d1 = (d1 << bm) | (d0 >> b);
d0 = d0 << bm;
n2 = n1 >> b;
n1 = (n1 << bm) | (n0 >> b);
n0 = n0 << bm;
udiv_qrnnd(q0, n1, n2, n1, d1);
umul_ppmm(m1, m0, q0, d0);
if (m1 > n1 || (m1 == n1 && m0 > n0)) {
q0--;
sub_ddmmss(m1, m0, m1, m0, d1, d0);
}
q1 = 0;
/* Remainder in (n1n0 - m1m0) >> bm. */
if (rp != 0) {
sub_ddmmss(n1, n0, n1, n0, m1, m0);
rr.s.low = (n1 << b) | (n0 >> bm);
rr.s.high = n1 >> bm;
*rp = rr.ll;
}
}
}
}
const DWunion ww = {{.low = q0, .high = q1}};
return ww.ll;
}
int64_t
__divdi3(int64_t u, int64_t v)
{
int32_t c = 0;
DWunion uu = {.ll = u};
DWunion vv = {.ll = v};
int64_t w;
if (uu.s.high < 0) {
c = ~c;
uu.ll = -uu.ll;
}
if (vv.s.high < 0) {
c = ~c;
vv.ll = -vv.ll;
}
w = __udivmoddi4(uu.ll, vv.ll, (uint64_t *)0);
if (c)
w = -w;
return w;
}
int64_t
__gnu_ldivmod_helper (int64_t a, int64_t b, int64_t *remainder)
{
int64_t quotient;
quotient = __divdi3(a, b);
*remainder = a - b * quotient;
return quotient;
}
uint64_t
__udivdi3(uint64_t n, uint64_t d)
{
return __udivmoddi4(n, d, (uint64_t *)0);
}
uint64_t
__gnu_uldivmod_helper(uint64_t a, uint64_t b, uint64_t *remainder)
{
uint64_t quotient;
quotient = __udivdi3(a, b);
*remainder = a - b * quotient;
return quotient;
}
#endif

View File

@@ -1,130 +0,0 @@
/* Miscellaneous BPABI functions.
Copyright (C) 2003, 2004, 2007, 2008 Free Software Foundation, Inc.
Contributed by CodeSourcery, LLC.
This file is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation; either version 2, or (at your option) any
later version.
In addition to the permissions in the GNU General Public License, the
Free Software Foundation gives you unlimited permission to link the
compiled version of this file into combinations with other programs,
and to distribute those combinations without any restriction coming
from the use of this file. (The General Public License restrictions
do apply in other respects; for example, they cover modification of
the file, and distribution when not linked into a combine
executable.)
This file is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA. */
#include <arch/asm.h>
#ifdef __ARMEB__
#define xxh r0
#define xxl r1
#define yyh r2
#define yyl r3
#else
#define xxh r1
#define xxl r0
#define yyh r3
#define yyl r2
#endif
#if defined __thumb2__
.macro do_it cond
it \cond
.endm
#define do_push push
#define do_pop pop
#else
.macro do_it cond
.endm
#define do_push stmfd sp!,
#define do_pop ldmfd sp!,
#endif
ENTRY(__aeabi_lcmp)
cmp xxh, yyh
do_it lt
movlt r0, #-1
do_it gt
movgt r0, #1
do_it ne
movne pc, lr
subs r0, xxl, yyl
do_it lo
movlo r0, #-1
do_it hi
movhi r0, #1
mov pc, lr
ENDPROC(__aeabi_lcmp)
ENTRY(__aeabi_ulcmp)
cmp xxh, yyh
do_it lo
movlo r0, #-1
do_it hi
movhi r0, #1
do_it ne
movne pc, lr
cmp xxl, yyl
do_it lo
movlo r0, #-1
do_it hi
movhi r0, #1
do_it eq
moveq r0, #0
mov pc, lr
ENDPROC(__aeabi_ulcmp)
ENTRY(__aeabi_ldivmod)
sub sp, sp, #8
#if defined(__thumb2__)
mov ip, sp
push {ip, lr}
#else
do_push {sp, lr}
#endif
bl __gnu_ldivmod_helper
ldr lr, [sp, #4]
add sp, sp, #8
do_pop {r2, r3}
mov pc, lr
ENDPROC(__aeabi_ldivmod)
ENTRY(__aeabi_uldivmod)
sub sp, sp, #8
#if defined(__thumb2__)
mov ip, sp
push {ip, lr}
#else
do_push {sp, lr}
#endif
bl __gnu_uldivmod_helper
ldr lr, [sp, #4]
add sp, sp, #8
do_pop {r2, r3}
mov pc, lr
ENDPROC(__aeabi_uldivmod)

View File

@@ -55,8 +55,12 @@ Q_1 .req r1
R_0 .req r2
R_1 .req r3
THUMB(
TMP .req r8
)
ENTRY(__aeabi_uldivmod)
stmfd sp!, {r4, r5, r6, r7, lr}
stmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) lr}
@ Test if B == 0
orrs ip, B_0, B_1 @ Z set -> B == 0
beq L_div_by_0
@@ -92,12 +96,16 @@ L_div_64_64:
subs D_1, D_0, #32
rsb ip, D_0, #32
movmi B_1, B_1, lsl D_0
orrmi B_1, B_1, B_0, lsr ip
ARM( orrmi B_1, B_1, B_0, lsr ip )
THUMB( lsrmi TMP, B_0, ip )
THUMB( orrmi B_1, B_1, TMP )
movpl B_1, B_0, lsl D_1
mov B_0, B_0, lsl D_0
@ C = 1 << (clz B - clz A)
movmi C_1, C_1, lsl D_0
orrmi C_1, C_1, C_0, lsr ip
ARM( orrmi C_1, C_1, C_0, lsr ip )
THUMB( lsrmi TMP, C_0, ip )
THUMB( orrmi C_1, C_1, TMP )
movpl C_1, C_0, lsl D_1
mov C_0, C_0, lsl D_0
L_done_shift:
@@ -170,7 +178,7 @@ L_exit:
mov R_1, A_1
mov Q_0, D_0
mov Q_1, D_1
ldmfd sp!, {r4, r5, r6, r7, pc}
ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
L_div_32_32:
@ Note: A_0 & r0 are aliases
@@ -180,7 +188,7 @@ L_div_32_32:
mov R_0, r1
mov R_1, #0
mov Q_1, #0
ldmfd sp!, {r4, r5, r6, r7, pc}
ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
L_pow2:
/* CLZ only exists in ARM architecture version 5 and above. */
@@ -201,12 +209,14 @@ L_pow2:
add D_0, D_0, #32
L_1:
movpl A_0, A_0, lsr D_0
orrpl A_0, A_0, A_1, lsl D_1
ARM( orrpl A_0, A_0, A_1, lsl D_1 )
THUMB( lslpl TMP, A_1, D_1 )
THUMB( orrpl A_0, A_0, TMP )
mov A_1, A_1, lsr D_0
@ Mov back C to R
mov R_0, C_0
mov R_1, C_1
ldmfd sp!, {r4, r5, r6, r7, pc}
ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
#else
@ Note: A, B and Q, R are aliases
@ R = A & (B - 1)
@@ -244,7 +254,7 @@ L_1:
@ Move C to R
mov R_0, C_0
mov R_1, C_1
ldmfd sp!, {r4, r5, r6, r7, pc}
ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
#endif
L_div_by_0:
@@ -254,5 +264,5 @@ L_div_by_0:
mov Q_1, #0
mov R_0, #0
mov R_1, #0
ldmfd sp!, {r4, r5, r6, r7, pc}
ldmfd sp!, {r4, r5, r6, r7, THUMB(TMP,) pc}
ENDPROC(__aeabi_uldivmod)