cpu/qemu-x86: Add x86_64 bootblock support

Add support for x86_64 bootblock on qemu.

Introduce a new approach to long mode support. The previous patch set
generated page tables at runtime and placed them in heap. The new
approach places the page tables in memory mapped ROM.

Introduce a new tool called pgtblgen that creates x86 long mode compatible
page tables and writes those to a file. The file is included into the CBFS
and placed at a predefined offset.

Add assembly code to load the page tables, based on a Kconfig symbol and
enter long in bootblock.

The code can be easily ported to real hardware bootblock.

Tested on qemu q35.

Change-Id: Iec92c6cea464c97c18a0811e2e91bc22133ace42
Signed-off-by: Patrick Rudolph <siro@das-labor.org>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/35680
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Reviewed-by: Arthur Heymans <arthur@aheymans.xyz>
This commit is contained in:
Patrick Rudolph
2019-09-28 17:44:01 +02:00
committed by Patrick Georgi
parent 6f7c955464
commit b1ef725f39
9 changed files with 297 additions and 12 deletions

View File

@@ -66,6 +66,14 @@ config ARCH_RAMSTAGE_X86_64
bool
default n
config ARCH_X86_64_PGTBL_LOC
hex "x86_64 page table location in CBFS"
depends on ARCH_BOOTBLOCK_X86_64
default 0xfffea000
help
The position where to place pagetables. Needs to be known at
compile time. Must not overlap other files in CBFS.
config USE_MARCH_586
def_bool n
help

View File

@@ -31,6 +31,12 @@
#include <cpu/x86/16bit/reset16.inc>
#include <cpu/x86/32bit/entry32.inc>
/* BIST result in eax */
mov %eax, %ebx
/* entry64.inc preserves ebx. */
#include <cpu/x86/64bit/entry64.inc>
mov %ebx, %eax
#if CONFIG(BOOTBLOCK_DEBUG_SPINLOOP)
/* Wait for a JTAG debugger to break in and set EBX non-zero */

View File

@@ -38,9 +38,17 @@ cache_as_ram:
/* Align the stack and keep aligned for call to bootblock_c_entry() */
and $0xfffffff0, %esp
sub $4, %esp
/* Restore the BIST result and timestamps. */
#if defined(__x86_64__)
movd %mm1, %rdi
shld %rdi, 32
movd %mm1, %rsi
or %rsi, %rdi
movd %mm2, %rsi
#else
sub $4, %esp
movd %mm0, %ebx
movd %mm1, %eax
movd %mm2, %edx
@@ -48,6 +56,7 @@ cache_as_ram:
pushl %ebx
pushl %edx
pushl %eax
#endif
before_c_entry:
post_code(0x29)

View File

@@ -0,0 +1,62 @@
/*
* This file is part of the coreboot project.
*
* Copyright (c) 2019 Patrick Rudolph <siro@das-labor.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/*
* For starting coreboot in long mode.
*
* For reference see "AMD64 ArchitectureProgrammer's Manual Volume 2",
* Document 24593-Rev. 3.31-July 2019 Chapter 5.3
*
* Clobbers: eax, ecx, edx
*/
#if defined(__x86_64__)
.code32
#if (CONFIG_ARCH_X86_64_PGTBL_LOC & 0xfff) > 0
#error pagetables must be 4KiB aligned!
#endif
#include <cpu/x86/msr.h>
#include <arch/rom_segs.h>
setup_longmode:
/* Get page table address */
movl $(CONFIG_ARCH_X86_64_PGTBL_LOC), %eax
/* load identity mapped page tables */
movl %eax, %cr3
/* enable PAE */
movl %cr4, %eax
btsl $5, %eax
movl %eax, %cr4
/* enable long mode */
movl $(IA32_EFER), %ecx
rdmsr
btsl $8, %eax
wrmsr
/* enable paging */
movl %cr0, %eax
btsl $31, %eax
movl %eax, %cr0
/* use long jump to switch to 64-bit code segment */
ljmp $ROM_CODE_SEG64, $__longmode_start
.code64
__longmode_start:
#endif