treewide: Replace CONFIG(ARCH_xx) tests

Once we support building stages for different architectures,
such CONFIG(ARCH_xx) tests do not evaluate correctly anymore.

Change-Id: I599995b3ed5c4dfd578c87067fe8bfc8c75b9d43
Signed-off-by: Kyösti Mälkki <kyosti.malkki@gmail.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/42183
Reviewed-by: Raul Rangel <rrangel@chromium.org>
Reviewed-by: Furquan Shaikh <furquan@google.com>
Reviewed-by: Angel Pons <th3fanbus@gmail.com>
Reviewed-by: Aaron Durbin <adurbin@chromium.org>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Kyösti Mälkki
2020-06-08 06:05:03 +03:00
committed by Angel Pons
parent 5edbb1c5d9
commit 7336f97deb
16 changed files with 32 additions and 28 deletions

View File

@ -12,7 +12,7 @@
#include <stdlib.h> #include <stdlib.h>
#include <string.h> #include <string.h>
#include <smp/spinlock.h> #include <smp/spinlock.h>
#if CONFIG(ARCH_X86) #if ENV_X86
#include <arch/ebda.h> #include <arch/ebda.h>
#endif #endif
#include <timer.h> #include <timer.h>
@ -566,7 +566,7 @@ void dev_initialize(void)
printk(BIOS_INFO, "Initializing devices...\n"); printk(BIOS_INFO, "Initializing devices...\n");
#if CONFIG(ARCH_X86) #if ENV_X86
/* Ensure EBDA is prepared before Option ROMs. */ /* Ensure EBDA is prepared before Option ROMs. */
setup_default_ebda(); setup_default_ebda();
#endif #endif

View File

@ -3,7 +3,7 @@
#ifndef __OPROM_IO_H__ #ifndef __OPROM_IO_H__
#define __OPROM_IO_H__ #define __OPROM_IO_H__
#if CONFIG(ARCH_X86) #if ENV_X86
#include <arch/io.h> #include <arch/io.h>
#else #else
void outb(u8 val, u16 port); void outb(u8 val, u16 port);

View File

@ -72,7 +72,7 @@ unsigned long tb_freq = 0;
u64 get_time(void) u64 get_time(void)
{ {
u64 act = 0; u64 act = 0;
#if CONFIG(ARCH_X86) #if ENV_X86
u32 eax, edx; u32 eax, edx;
__asm__ __volatile__( __asm__ __volatile__(

View File

@ -151,7 +151,7 @@ u8 biosemu_dev_translate_address(int type, unsigned long * addr);
static inline void static inline void
out32le(void *addr, u32 val) out32le(void *addr, u32 val)
{ {
#if CONFIG(ARCH_X86) || CONFIG(ARCH_ARM) #if ENV_X86 || ENV_ARM || ENV_ARM64
*((u32*) addr) = cpu_to_le32(val); *((u32*) addr) = cpu_to_le32(val);
#else #else
asm volatile ("stwbrx %0, 0, %1"::"r" (val), "r"(addr)); asm volatile ("stwbrx %0, 0, %1"::"r" (val), "r"(addr));
@ -162,7 +162,7 @@ static inline u32
in32le(void *addr) in32le(void *addr)
{ {
u32 val; u32 val;
#if CONFIG(ARCH_X86) || CONFIG(ARCH_ARM) #if ENV_X86 || ENV_ARM || ENV_ARM64
val = cpu_to_le32(*((u32 *) addr)); val = cpu_to_le32(*((u32 *) addr));
#else #else
asm volatile ("lwbrx %0, 0, %1":"=r" (val):"r"(addr)); asm volatile ("lwbrx %0, 0, %1":"=r" (val):"r"(addr));
@ -173,7 +173,7 @@ in32le(void *addr)
static inline void static inline void
out16le(void *addr, u16 val) out16le(void *addr, u16 val)
{ {
#if CONFIG(ARCH_X86) || CONFIG(ARCH_ARM) #if ENV_X86 || ENV_ARM || ENV_ARM64
*((u16*) addr) = cpu_to_le16(val); *((u16*) addr) = cpu_to_le16(val);
#else #else
asm volatile ("sthbrx %0, 0, %1"::"r" (val), "r"(addr)); asm volatile ("sthbrx %0, 0, %1"::"r" (val), "r"(addr));
@ -184,7 +184,7 @@ static inline u16
in16le(void *addr) in16le(void *addr)
{ {
u16 val; u16 val;
#if CONFIG(ARCH_X86) || CONFIG(ARCH_ARM) #if ENV_X86 || ENV_ARM || ENV_ARM64
val = cpu_to_le16(*((u16*) addr)); val = cpu_to_le16(*((u16*) addr));
#else #else
asm volatile ("lhbrx %0, 0, %1":"=r" (val):"r"(addr)); asm volatile ("lhbrx %0, 0, %1":"=r" (val):"r"(addr));

View File

@ -748,7 +748,7 @@ static bool elog_do_add_boot_count(void)
/* Check and log POST codes from previous boot */ /* Check and log POST codes from previous boot */
static void log_last_boot_post(void) static void log_last_boot_post(void)
{ {
#if CONFIG(ARCH_X86) #if ENV_X86
u8 code; u8 code;
u32 extra; u32 extra;

View File

@ -145,7 +145,7 @@ void cbmem_add_records_to_cbtable(struct lb_header *header);
* and CBMEM_CONSOLE. Sometimes it is necessary to have cbmem_top() * and CBMEM_CONSOLE. Sometimes it is necessary to have cbmem_top()
* value stored in nvram to enable early recovery on S3 path. * value stored in nvram to enable early recovery on S3 path.
*/ */
#if CONFIG(ARCH_X86) #if ENV_X86
void backup_top_of_low_cacheable(uintptr_t ramtop); void backup_top_of_low_cacheable(uintptr_t ramtop);
uintptr_t restore_top_of_low_cacheable(void); uintptr_t restore_top_of_low_cacheable(void);
#endif #endif

View File

@ -247,7 +247,7 @@
(ENV_DECOMPRESSOR || ENV_BOOTBLOCK || ENV_ROMSTAGE || \ (ENV_DECOMPRESSOR || ENV_BOOTBLOCK || ENV_ROMSTAGE || \
(ENV_SEPARATE_VERSTAGE && !CONFIG(VBOOT_STARTS_IN_ROMSTAGE))) (ENV_SEPARATE_VERSTAGE && !CONFIG(VBOOT_STARTS_IN_ROMSTAGE)))
#if CONFIG(ARCH_X86) #if ENV_X86
/* Indicates memory layout is determined with arch/x86/car.ld. */ /* Indicates memory layout is determined with arch/x86/car.ld. */
#define ENV_CACHE_AS_RAM (ENV_ROMSTAGE_OR_BEFORE && !CONFIG(RESET_VECTOR_IN_RAM)) #define ENV_CACHE_AS_RAM (ENV_ROMSTAGE_OR_BEFORE && !CONFIG(RESET_VECTOR_IN_RAM))
/* No .data sections with execute-in-place from ROM. */ /* No .data sections with execute-in-place from ROM. */

View File

@ -63,7 +63,7 @@ DECLARE_REGION(bl31)
* (Does not necessarily mean that the memory is accessible.) */ * (Does not necessarily mean that the memory is accessible.) */
static inline int preram_symbols_available(void) static inline int preram_symbols_available(void)
{ {
return !CONFIG(ARCH_X86) || ENV_ROMSTAGE_OR_BEFORE; return !ENV_X86 || ENV_ROMSTAGE_OR_BEFORE;
} }
#endif /* __SYMBOLS_H */ #endif /* __SYMBOLS_H */

View File

@ -501,7 +501,7 @@ detailed_block(struct edid *result_edid, unsigned char *x, int in_extension,
* another call to edid_set_framebuffer_bits_per_pixel(). As a cheap * another call to edid_set_framebuffer_bits_per_pixel(). As a cheap
* heuristic, assume that X86 systems require a 64-byte row alignment * heuristic, assume that X86 systems require a 64-byte row alignment
* (since that seems to be true for most Intel chipsets). */ * (since that seems to be true for most Intel chipsets). */
if (CONFIG(ARCH_X86)) if (ENV_X86)
edid_set_framebuffer_bits_per_pixel(out, 32, 64); edid_set_framebuffer_bits_per_pixel(out, 32, 64);
else else
edid_set_framebuffer_bits_per_pixel(out, 32, 0); edid_set_framebuffer_bits_per_pixel(out, 32, 0);

View File

@ -425,7 +425,7 @@ void main(void)
/* TODO: Understand why this is here and move to arch/platform code. */ /* TODO: Understand why this is here and move to arch/platform code. */
/* For MMIO UART this needs to be called before any other printk. */ /* For MMIO UART this needs to be called before any other printk. */
if (CONFIG(ARCH_X86)) if (ENV_X86)
init_timer(); init_timer();
/* console_init() MUST PRECEDE ALL printk()! Additionally, ensure /* console_init() MUST PRECEDE ALL printk()! Additionally, ensure

View File

@ -7,8 +7,13 @@
* <lib.h> in case GCC does not have an assembly version for this arch. * <lib.h> in case GCC does not have an assembly version for this arch.
*/ */
#if !CONFIG(ARCH_X86) /* work around lack of --gc-sections on x86 */ \ /*
&& !CONFIG(ARCH_RISCV_RV32) /* defined in rv32 libgcc.a */ * FIXME
* work around lack of --gc-sections on x86
* defined in rv32 libgcc.a
*/
#if !ENV_X86 && !ENV_RISCV
int __clzsi2(u32 a); int __clzsi2(u32 a);
int __clzsi2(u32 a) int __clzsi2(u32 a)
{ {

View File

@ -46,7 +46,7 @@ void run_romstage(void)
vboot_run_logic(); vboot_run_logic();
if (CONFIG(ARCH_X86) && CONFIG(BOOTBLOCK_NORMAL)) { if (ENV_X86 && CONFIG(BOOTBLOCK_NORMAL)) {
if (legacy_romstage_selector(&romstage)) if (legacy_romstage_selector(&romstage))
goto fail; goto fail;
} else { } else {
@ -119,8 +119,7 @@ void run_ramstage(void)
* Only x86 systems using ramstage stage cache currently take the same * Only x86 systems using ramstage stage cache currently take the same
* firmware path on resume. * firmware path on resume.
*/ */
if (CONFIG(ARCH_X86) && if (ENV_X86 && !CONFIG(NO_STAGE_CACHE))
!CONFIG(NO_STAGE_CACHE))
run_ramstage_from_resume(&ramstage); run_ramstage_from_resume(&ramstage);
vboot_run_logic(); vboot_run_logic();

View File

@ -3,7 +3,7 @@
#include <console/console.h> #include <console/console.h>
#include <device/mmio.h> #include <device/mmio.h>
#if CONFIG(ARCH_X86) && CONFIG(SSE2) #if ENV_X86 && CONFIG(SSE2)
/* Assembler in lib/ is ugly. */ /* Assembler in lib/ is ugly. */
static void write_phys(uintptr_t addr, u32 value) static void write_phys(uintptr_t addr, u32 value)
{ {

View File

@ -11,7 +11,7 @@
#include <stdint.h> #include <stdint.h>
#include <reg_script.h> #include <reg_script.h>
#if CONFIG(ARCH_X86) #if ENV_X86
#include <cpu/x86/msr.h> #include <cpu/x86/msr.h>
#endif #endif
@ -363,7 +363,7 @@ static void reg_script_write_iosf(struct reg_script_context *ctx)
static uint64_t reg_script_read_msr(struct reg_script_context *ctx) static uint64_t reg_script_read_msr(struct reg_script_context *ctx)
{ {
#if CONFIG(ARCH_X86) #if ENV_X86
const struct reg_script *step = reg_script_get_step(ctx); const struct reg_script *step = reg_script_get_step(ctx);
msr_t msr = rdmsr(step->reg); msr_t msr = rdmsr(step->reg);
uint64_t value = msr.hi; uint64_t value = msr.hi;
@ -375,7 +375,7 @@ static uint64_t reg_script_read_msr(struct reg_script_context *ctx)
static void reg_script_write_msr(struct reg_script_context *ctx) static void reg_script_write_msr(struct reg_script_context *ctx)
{ {
#if CONFIG(ARCH_X86) #if ENV_X86
const struct reg_script *step = reg_script_get_step(ctx); const struct reg_script *step = reg_script_get_step(ctx);
msr_t msr; msr_t msr;
msr.hi = step->value >> 32; msr.hi = step->value >> 32;

View File

@ -71,7 +71,7 @@ static int timestamp_should_run(void)
* Only check boot_cpu() in other stages than * Only check boot_cpu() in other stages than
* ENV_PAYLOAD_LOADER on x86. * ENV_PAYLOAD_LOADER on x86.
*/ */
if ((!ENV_PAYLOAD_LOADER && CONFIG(ARCH_X86)) && !boot_cpu()) if ((!ENV_PAYLOAD_LOADER && ENV_X86) && !boot_cpu())
return 0; return 0;
return 1; return 1;

View File

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */ /* SPDX-License-Identifier: GPL-2.0-only */
#if CONFIG(ARCH_X86) #if ENV_X86
#include <cpu/x86/pae.h> #include <cpu/x86/pae.h>
#else #else
#define memset_pae(a, b, c, d, e) 0 #define memset_pae(a, b, c, d, e) 0
@ -83,7 +83,7 @@ static void clear_memory(void *unused)
cbmem_get_region(&baseptr, &size); cbmem_get_region(&baseptr, &size);
memranges_insert(&mem, (uintptr_t)baseptr, size, BM_MEM_TABLE); memranges_insert(&mem, (uintptr_t)baseptr, size, BM_MEM_TABLE);
if (CONFIG(ARCH_X86)) { if (ENV_X86) {
/* Find space for PAE enabled memset */ /* Find space for PAE enabled memset */
pgtbl = get_free_memory_range(&mem, MEMSET_PAE_PGTL_ALIGN, pgtbl = get_free_memory_range(&mem, MEMSET_PAE_PGTL_ALIGN,
MEMSET_PAE_PGTL_SIZE); MEMSET_PAE_PGTL_SIZE);
@ -114,7 +114,7 @@ static void clear_memory(void *unused)
range_entry_size(r)); range_entry_size(r));
} }
/* Use PAE if available */ /* Use PAE if available */
else if (CONFIG(ARCH_X86)) { else if (ENV_X86) {
if (memset_pae(range_entry_base(r), 0, if (memset_pae(range_entry_base(r), 0,
range_entry_size(r), (void *)pgtbl, range_entry_size(r), (void *)pgtbl,
(void *)vmem_addr)) (void *)vmem_addr))
@ -126,7 +126,7 @@ static void clear_memory(void *unused)
} }
} }
if (CONFIG(ARCH_X86)) { if (ENV_X86) {
/* Clear previously skipped memory reserved for pagetables */ /* Clear previously skipped memory reserved for pagetables */
printk(BIOS_DEBUG, "%s: Clearing DRAM %016lx-%016lx\n", printk(BIOS_DEBUG, "%s: Clearing DRAM %016lx-%016lx\n",
__func__, pgtbl, pgtbl + MEMSET_PAE_PGTL_SIZE); __func__, pgtbl, pgtbl + MEMSET_PAE_PGTL_SIZE);