move cpu/x86/car to cpu/intel/car as previously discussed on the mailing list.
this patch also slightly changes it so we have a single cache_as_ram.inc which requires no "help" from cache_as_ram_post.c and cache_as_ram_disable.c (or worse, a lot of cruft hacked right into romstage.c like on tyan s2735) Now all CAR code except the AMD Opteron/Athlon64 CAR code follows the new simpler scheme. I'll gladly leave src/cpu/amd/car to someone else ;-) Signed-off-by: Stefan Reinauer <stepan@coresystems.de> Acked-by: Stefan Reinauer <stepan@coresystems.de> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5423 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
committed by
Stefan Reinauer
parent
1abf46c74e
commit
ccdd20a539
@ -126,6 +126,19 @@ clear_fixed_var_mtrr:
|
||||
wrmsr
|
||||
|
||||
jmp clear_fixed_var_mtrr
|
||||
|
||||
fixed_mtrr_msr:
|
||||
.long 0x250, 0x258, 0x259
|
||||
.long 0x268, 0x269, 0x26A
|
||||
.long 0x26B, 0x26C, 0x26D
|
||||
.long 0x26E, 0x26F
|
||||
var_mtrr_msr:
|
||||
.long 0x200, 0x201, 0x202, 0x203
|
||||
.long 0x204, 0x205, 0x206, 0x207
|
||||
.long 0x208, 0x209, 0x20A, 0x20B
|
||||
.long 0x20C, 0x20D, 0x20E, 0x20F
|
||||
.long 0x000 /* NULL, end of table */
|
||||
|
||||
clear_fixed_var_mtrr_out:
|
||||
|
||||
/* 0x06 is the WB IO type for a given 4k segment.
|
||||
@ -289,48 +302,118 @@ lout:
|
||||
/* We need to set ebp ? No need */
|
||||
movl %esp, %ebp
|
||||
pushl %eax /* bist */
|
||||
call stage1_main
|
||||
/* We will not go back */
|
||||
call main
|
||||
|
||||
fixed_mtrr_msr:
|
||||
.long 0x250, 0x258, 0x259
|
||||
.long 0x268, 0x269, 0x26A
|
||||
.long 0x26B, 0x26C, 0x26D
|
||||
.long 0x26E, 0x26F
|
||||
var_mtrr_msr:
|
||||
.long 0x200, 0x201, 0x202, 0x203
|
||||
.long 0x204, 0x205, 0x206, 0x207
|
||||
.long 0x208, 0x209, 0x20A, 0x20B
|
||||
.long 0x20C, 0x20D, 0x20E, 0x20F
|
||||
.long 0x000 /* NULL, end of table */
|
||||
/*
|
||||
FIXME : backup stack in CACHE_AS_RAM into mmx and sse and after we get STACK up, we restore that.
|
||||
It is only needed if we want to go back
|
||||
*/
|
||||
|
||||
/* We don't need cache as ram for now on */
|
||||
/* disable cache */
|
||||
movl %cr0, %eax
|
||||
orl $(0x1<<30),%eax
|
||||
movl %eax, %cr0
|
||||
|
||||
.align 0x1000
|
||||
.code16
|
||||
.global LogicalAP_SIPI
|
||||
LogicalAP_SIPI:
|
||||
// cr0 register is shared among the logical processors;
|
||||
// so clear CD & NW bits so that the BSP's cr0 register
|
||||
// controls the cache behavior
|
||||
// Note: The cache behavior is determined by "OR" result
|
||||
// of the cr0 registers of the logical processors
|
||||
|
||||
movl %cr0, %eax
|
||||
andl $0x9FFFFFFF, %eax
|
||||
movl %eax, %cr0
|
||||
|
||||
finit
|
||||
|
||||
// Set the semaphore to indicate the Logical AP is done
|
||||
// with CAR specific initialization
|
||||
movl $0x250, %ecx
|
||||
movl $0x06, %eax
|
||||
xorl %edx, %edx
|
||||
/* clear sth */
|
||||
movl $0x269, %ecx /* fix4k_c8000*/
|
||||
xorl %edx, %edx
|
||||
xorl %eax, %eax
|
||||
wrmsr
|
||||
|
||||
// Halt this AP
|
||||
cli
|
||||
Halt_LogicalAP:
|
||||
#if CONFIG_DCACHE_RAM_SIZE > 0x8000
|
||||
movl $0x268, %ecx /* fix4k_c0000*/
|
||||
wrmsr
|
||||
#endif
|
||||
|
||||
/* Set the default memory type and disable fixed and enable variable MTRRs */
|
||||
movl $0x2ff, %ecx
|
||||
// movl $MTRRdefType_MSR, %ecx
|
||||
xorl %edx, %edx
|
||||
/* Enable Variable and Disable Fixed MTRRs */
|
||||
movl $0x00000800, %eax
|
||||
wrmsr
|
||||
|
||||
#if defined(CLEAR_FIRST_1M_RAM)
|
||||
/* enable caching for first 1M using variable mtrr */
|
||||
movl $0x200, %ecx
|
||||
xorl %edx, %edx
|
||||
movl $(0 | 1), %eax
|
||||
// movl $(0 | MTRR_TYPE_WRCOMB), %eax
|
||||
wrmsr
|
||||
|
||||
movl $0x201, %ecx
|
||||
movl $0x0000000f, %edx /* AMD 40 bit 0xff*/
|
||||
movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax
|
||||
wrmsr
|
||||
#endif
|
||||
|
||||
/* enable cache */
|
||||
movl %cr0, %eax
|
||||
andl $0x9fffffff,%eax
|
||||
movl %eax, %cr0
|
||||
|
||||
#if defined(CLEAR_FIRST_1M_RAM)
|
||||
/* clear the first 1M */
|
||||
movl $0x0, %edi
|
||||
cld
|
||||
movl $(0x100000>>2), %ecx
|
||||
xorl %eax, %eax
|
||||
rep stosl
|
||||
|
||||
/* disable cache */
|
||||
movl %cr0, %eax
|
||||
orl $(0x1<<30),%eax
|
||||
movl %eax, %cr0
|
||||
|
||||
/* enable caching for first 1M using variable mtrr */
|
||||
movl $0x200, %ecx
|
||||
xorl %edx, %edx
|
||||
movl $(0 | 6), %eax
|
||||
// movl $(0 | MTRR_TYPE_WRBACK), %eax
|
||||
wrmsr
|
||||
|
||||
movl $0x201, %ecx
|
||||
movl $0x0000000f, %edx /* AMD 40 bit 0xff*/
|
||||
movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax
|
||||
wrmsr
|
||||
|
||||
/* enable cache */
|
||||
movl %cr0, %eax
|
||||
andl $0x9fffffff,%eax
|
||||
movl %eax, %cr0
|
||||
invd
|
||||
|
||||
/* FIXME: I hope we don't need to change esp and ebp value here, so we
|
||||
* can restore value from mmx sse back But the problem is the range is
|
||||
* some io related, So don't go back
|
||||
*/
|
||||
#endif
|
||||
|
||||
/* clear boot_complete flag */
|
||||
xorl %ebp, %ebp
|
||||
__main:
|
||||
post_code(0x11)
|
||||
cld /* clear direction flag */
|
||||
|
||||
movl %ebp, %esi
|
||||
|
||||
/* For now: use CONFIG_RAMBASE + 1MB - 64K (counting downwards) as stack. This
|
||||
* makes sure that we stay completely within the 1M-64K of memory that we
|
||||
* preserve for suspend/resume.
|
||||
*/
|
||||
|
||||
#ifndef HIGH_MEMORY_SAVE
|
||||
#warning Need a central place for HIGH_MEMORY_SAVE
|
||||
#define HIGH_MEMORY_SAVE ( (1024 - 64) * 1024 )
|
||||
#endif
|
||||
movl $(CONFIG_RAMBASE + HIGH_MEMORY_SAVE), %esp
|
||||
movl %esp, %ebp
|
||||
pushl %esi
|
||||
call copy_and_run
|
||||
|
||||
.Lhlt:
|
||||
post_code(0xee)
|
||||
hlt
|
||||
jmp Halt_LogicalAP
|
||||
.code32
|
||||
.CacheAsRam_out:
|
||||
jmp .Lhlt
|
||||
|
@ -1,89 +0,0 @@
|
||||
/*
|
||||
* This file is part of the coreboot project.
|
||||
*
|
||||
* Copyright (C) 2007-2009 coresystems GmbH
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License as
|
||||
* published by the Free Software Foundation; version 2 of
|
||||
* the License.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
|
||||
* MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <arch/stages.h>
|
||||
|
||||
/* called from assembler code */
|
||||
void stage1_main(unsigned long bist);
|
||||
|
||||
/* from romstage.c */
|
||||
void real_main(unsigned long bist);
|
||||
|
||||
void stage1_main(unsigned long bist)
|
||||
{
|
||||
unsigned int cpu_reset = 0;
|
||||
|
||||
real_main(bist);
|
||||
|
||||
/* No servicable parts below this line .. */
|
||||
#ifdef CAR_DEBUG
|
||||
/* Check value of esp to verify if we have enough rom for stack in Cache as RAM */
|
||||
unsigned v_esp;
|
||||
__asm__ volatile (
|
||||
"movl %%esp, %0\n"
|
||||
: "=a" (v_esp)
|
||||
);
|
||||
printk(BIOS_SPEW, "v_esp=%08x\n", v_esp);
|
||||
#endif
|
||||
|
||||
printk(BIOS_SPEW, "cpu_reset = %08x\n", cpu_reset);
|
||||
printk(BIOS_SPEW, "No cache as ram now - ");
|
||||
|
||||
/* store cpu_reset to ebx */
|
||||
__asm__ volatile (
|
||||
"movl %0, %%ebx\n\t"
|
||||
::"a" (cpu_reset)
|
||||
);
|
||||
|
||||
#undef CLEAR_FIRST_1M_RAM
|
||||
#include "cpu/x86/car/cache_as_ram_post.c"
|
||||
|
||||
/* For now: use rambase + 1MB - 64K (counting downwards) as stack. This
|
||||
* makes sure that we stay completely within the 1M of memory we
|
||||
* preserve with the memcpy above.
|
||||
*/
|
||||
|
||||
#ifndef HIGH_MEMORY_SAVE
|
||||
#define HIGH_MEMORY_SAVE ( (1024 - 64) * 1024 )
|
||||
#endif
|
||||
|
||||
__asm__ volatile (
|
||||
"movl %0, %%ebp\n"
|
||||
"movl %0, %%esp\n"
|
||||
:: "a" (CONFIG_RAMBASE + HIGH_MEMORY_SAVE)
|
||||
);
|
||||
|
||||
{
|
||||
unsigned new_cpu_reset;
|
||||
|
||||
/* get back cpu_reset from ebx */
|
||||
__asm__ volatile (
|
||||
"movl %%ebx, %0\n"
|
||||
:"=a" (new_cpu_reset)
|
||||
);
|
||||
|
||||
/* Copy and execute coreboot_ram */
|
||||
copy_and_run(new_cpu_reset);
|
||||
}
|
||||
|
||||
/* We will not return */
|
||||
printk(BIOS_DEBUG, "sorry. parachute did not open.\n");
|
||||
}
|
@ -7,4 +7,4 @@ subdirs-y += ../../x86/cache
|
||||
subdirs-y += ../../x86/smm
|
||||
subdirs-y += ../microcode
|
||||
|
||||
cpu_incs += $(src)/cpu/x86/car/cache_as_ram.inc
|
||||
cpu_incs += $(src)/cpu/intel/car/cache_as_ram.inc
|
||||
|
@ -1,86 +0,0 @@
|
||||
|
||||
__asm__ volatile (
|
||||
/*
|
||||
FIXME : backup stack in CACHE_AS_RAM into mmx and sse and after we get STACK up, we restore that.
|
||||
It is only needed if we want to go back
|
||||
*/
|
||||
|
||||
/* We don't need cache as ram for now on */
|
||||
/* disable cache */
|
||||
"movl %cr0, %eax\n\t"
|
||||
"orl $(0x1<<30),%eax\n\t"
|
||||
"movl %eax, %cr0\n\t"
|
||||
|
||||
/* clear sth */
|
||||
"movl $0x269, %ecx\n\t" /* fix4k_c8000*/
|
||||
"xorl %edx, %edx\n\t"
|
||||
"xorl %eax, %eax\n\t"
|
||||
"wrmsr\n\t"
|
||||
#if CONFIG_DCACHE_RAM_SIZE > 0x8000
|
||||
"movl $0x268, %ecx\n\t" /* fix4k_c0000*/
|
||||
"wrmsr\n\t"
|
||||
#endif
|
||||
|
||||
/* Set the default memory type and disable fixed and enable variable MTRRs */
|
||||
"movl $0x2ff, %ecx\n\t"
|
||||
// "movl $MTRRdefType_MSR, %ecx\n\t"
|
||||
"xorl %edx, %edx\n\t"
|
||||
/* Enable Variable and Disable Fixed MTRRs */
|
||||
"movl $0x00000800, %eax\n\t"
|
||||
"wrmsr\n\t"
|
||||
|
||||
#if defined(CLEAR_FIRST_1M_RAM)
|
||||
/* enable caching for first 1M using variable mtrr */
|
||||
"movl $0x200, %ecx\n\t"
|
||||
"xorl %edx, %edx\n\t"
|
||||
"movl $(0 | 1), %eax\n\t"
|
||||
// "movl $(0 | MTRR_TYPE_WRCOMB), %eax\n\t"
|
||||
"wrmsr\n\t"
|
||||
|
||||
"movl $0x201, %ecx\n\t"
|
||||
"movl $0x0000000f, %edx\n\t" /* AMD 40 bit 0xff*/
|
||||
"movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
|
||||
"wrmsr\n\t"
|
||||
#endif
|
||||
|
||||
/* enable cache */
|
||||
"movl %cr0, %eax\n\t"
|
||||
"andl $0x9fffffff,%eax\n\t"
|
||||
"movl %eax, %cr0\n\t"
|
||||
#if defined(CLEAR_FIRST_1M_RAM)
|
||||
/* clear the first 1M */
|
||||
"movl $0x0, %edi\n\t"
|
||||
"cld\n\t"
|
||||
"movl $(0x100000>>2), %ecx\n\t"
|
||||
"xorl %eax, %eax\n\t"
|
||||
"rep stosl\n\t"
|
||||
|
||||
/* disable cache */
|
||||
"movl %cr0, %eax\n\t"
|
||||
"orl $(0x1<<30),%eax\n\t"
|
||||
"movl %eax, %cr0\n\t"
|
||||
|
||||
/* enable caching for first 1M using variable mtrr */
|
||||
"movl $0x200, %ecx\n\t"
|
||||
"xorl %edx, %edx\n\t"
|
||||
"movl $(0 | 6), %eax\n\t"
|
||||
// "movl $(0 | MTRR_TYPE_WRBACK), %eax\n\t"
|
||||
"wrmsr\n\t"
|
||||
|
||||
"movl $0x201, %ecx\n\t"
|
||||
"movl $0x0000000f, %edx\n\t" /* AMD 40 bit 0xff*/
|
||||
"movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
|
||||
"wrmsr\n\t"
|
||||
|
||||
/* enable cache */
|
||||
"movl %cr0, %eax\n\t"
|
||||
"andl $0x9fffffff,%eax\n\t"
|
||||
"movl %eax, %cr0\n\t"
|
||||
"invd\n\t"
|
||||
|
||||
/*
|
||||
FIXME: I hope we don't need to change esp and ebp value here, so we can restore value from mmx sse back
|
||||
But the problem is the range is some io related, So don't go back
|
||||
*/
|
||||
#endif
|
||||
);
|
Reference in New Issue
Block a user