CAR patch from YH LU

git-svn-id: svn://svn.coreboot.org/coreboot/trunk@2096 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
Ronald G. Minnich 2005-11-23 21:01:08 +00:00
parent 872141a402
commit fb0a64ba77
12 changed files with 329 additions and 359 deletions

View File

@ -8,10 +8,17 @@
/* Save the BIST result */
movl %eax, %ebp
// for normal part %ebx already contain cpu_init_detected from fallback call
CacheAsRam:
/* hope we can skip the double set for normal part */
#if USE_FALLBACK_IMAGE == 1
movl $MTRRdefType_MSR, %ecx
rdmsr
andl $0x00000800, %eax
movl %eax, %ebx ; // We store the status about if cpu_init_detected
/* Set MtrrFixDramModEn for clear fixed mtrr */
xorl %eax, %eax # clear %eax and %edx
@ -53,7 +60,6 @@ clear_fixed_var_mtrr_out:
orl $(SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn), %eax
wrmsr
#if 1
#if CacheSize == 0x10000
/* enable caching for 64K using fixed mtrr */
movl $0x268, %ecx /* fix4k_c0000*/
@ -86,30 +92,6 @@ clear_fixed_var_mtrr_out:
xorl %eax, %eax
wrmsr
#else
/* enable caching for 64K using variable mtrr */
movl $0x200, %ecx
xorl %edx, %edx
movl $(CacheBase | MTRR_TYPE_WRBACK), %eax
wrmsr
movl $0x201, %ecx
movl $0x0000000f, %edx /* AMD 40 bit 0xff*/
movl $((~((CacheBase + CacheSize) - 1)) | 0x800), %eax
wrmsr
/* make it to be IO by clearing RD Dram and WR Dram */
movl $IORR0_BASE, %ecx
xorl %edx, %edx
movl $CacheBase, %eax /* bit 3, and bit 4 = 0 mean clear RD ram and WR ram */
wrmsr
movl $IORR0_MASK, %ecx
movl $0x000000ff, %edx
movl $(~((CacheBase + CacheSize) - 1) | 0x800), %eax
wrmsr
#endif
/* enable memory access for 0 - 1MB using top_mem */
movl $TOP_MEM, %ecx
xorl %edx, %edx
@ -145,7 +127,6 @@ clear_fixed_var_mtrr_out:
#if USE_FALLBACK_IMAGE == 1
/* Read the range with lodsl*/
movl $(CacheBase+CacheSize-4), %esi
std
@ -157,36 +138,6 @@ clear_fixed_var_mtrr_out:
xorl %eax, %eax
rep stosl
#if 0
/* check the cache as ram */
movl $CacheBase, %esi
movl $(CacheSize>>2), %ecx
.xin1:
movl %esi, %eax
movl %eax, (%esi)
movl $0x1000, %edx
movb %ah, %al
.testx1:
outb %al, $0x80
decl %edx
jnz .testx1
movl (%esi), %eax
cmpb 0xff, %al
je .xin2 /* dont show */
movl $0x1000, %edx
.testx2:
outb %al, $0x80
decl %edx
jnz .testx2
.xin2: decl %ecx
je .xout1
add $4, %esi
jmp .xin1
.xout1:
#endif
#endif /*USE_FALLBACK_IMAGE == 1*/
@ -198,6 +149,7 @@ clear_fixed_var_mtrr_out:
movl %ebp, %eax
/* We need to set ebp ? No need */
movl %esp, %ebp
pushl %ebx /* init detected */
pushl %eax /* bist */
call amd64_main
/* We will not go back */

View File

@ -1,95 +0,0 @@
/* by yhlu 6.2005 */
/* be warned, this file will be used other cores and core0/node0 */
__asm__ volatile (
/*
FIXME : backup stack in CACHE_AS_RAM into mmx and sse and after we get STACK up, we restore that.
It is only needed if we want to go back
*/
/* We don't need cache as ram for now on */
/* disable cache */
"movl %cr0, %eax\n\t"
"orl $(0x1<<30),%eax\n\t"
"movl %eax, %cr0\n\t"
/* clear sth */
"movl $0x269, %ecx\n\t" /* fix4k_c8000*/
"xorl %edx, %edx\n\t"
"xorl %eax, %eax\n\t"
"wrmsr\n\t"
#if DCACHE_RAM_SIZE > 0x8000
"movl $0x268, %ecx\n\t" /* fix4k_c0000*/
"wrmsr\n\t"
#endif
/* disable fixed mtrr from now on, it will be enabled by linuxbios_ram again*/
"movl $0xC0010010, %ecx\n\t"
// "movl $SYSCFG_MSR, %ecx\n\t"
"rdmsr\n\t"
"andl $(~(3<<18)), %eax\n\t"
// "andl $(~(SYSCFG_MSR_MtrrFixDramModEn | SYSCFG_MSR_MtrrFixDramEn)), %eax\n\t"
"wrmsr\n\t"
/* Set the default memory type and disable fixed and enable variable MTRRs */
"movl $0x2ff, %ecx\n\t"
// "movl $MTRRdefType_MSR, %ecx\n\t"
"xorl %edx, %edx\n\t"
/* Enable Variable and Disable Fixed MTRRs */
"movl $0x00000800, %eax\n\t"
"wrmsr\n\t"
#if defined(CLEAR_FIRST_1M_RAM)
/* enable caching for first 1M using variable mtrr */
"movl $0x200, %ecx\n\t"
"xorl %edx, %edx\n\t"
"movl $(0 | 1), %eax\n\t"
// "movl $(0 | MTRR_TYPE_WRCOMB), %eax\n\t"
"wrmsr\n\t"
"movl $0x201, %ecx\n\t"
"movl $0x0000000f, %edx\n\t"
"movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
"wrmsr\n\t"
#endif
/* enable cache */
"movl %cr0, %eax\n\t"
"andl $0x9fffffff,%eax\n\t"
"movl %eax, %cr0\n\t"
#if defined(CLEAR_FIRST_1M_RAM)
/* clear the first 1M */
"movl $0x0, %edi\n\t"
"cld\n\t"
"movl $(0x100000>>2), %ecx\n\t"
"xorl %eax, %eax\n\t"
"rep stosl\n\t"
/* disable cache */
"movl %cr0, %eax\n\t"
"orl $(0x1<<30),%eax\n\t"
"movl %eax, %cr0\n\t"
/* enable caching for first 1M using variable mtrr */
"movl $0x200, %ecx\n\t"
"xorl %edx, %edx\n\t"
"movl $(0 | 6), %eax\n\t"
// "movl $(0 | MTRR_TYPE_WRBACK), %eax\n\t"
"wrmsr\n\t"
"movl $0x201, %ecx\n\t"
"movl $0x0000000f, %edx\n\t"
"movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
"wrmsr\n\t"
/* enable cache */
"movl %cr0, %eax\n\t"
"andl $0x9fffffff,%eax\n\t"
"movl %eax, %cr0\n\t"
"invd\n\t"
/*
FIXME: I hope we don't need to change esp and ebp value here, so we can restore value from mmx sse back
But the problem is the range is some io related, So don't go back
*/
#endif
);

View File

@ -0,0 +1,53 @@
/* by yhlu 6.2005 */
/* be warned, this file will be used core 0/node 0 only */
__asm__ volatile (
/* disable cache */
"movl %cr0, %eax\n\t"
"orl $(0x1<<30),%eax\n\t"
"movl %eax, %cr0\n\t"
/* enable caching for first 1M using variable mtrr */
"movl $0x200, %ecx\n\t"
"xorl %edx, %edx\n\t"
"movl $(0 | 1), %eax\n\t"
// "movl $(0 | MTRR_TYPE_WRCOMB), %eax\n\t"
"wrmsr\n\t"
"movl $0x201, %ecx\n\t"
"movl $0x0000000f, %edx\n\t"
"movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
"wrmsr\n\t"
/* clear the first 1M */
"movl $0x0, %edi\n\t"
"cld\n\t"
"movl $(0x100000>>2), %ecx\n\t"
"xorl %eax, %eax\n\t"
"rep stosl\n\t"
/* disable cache */
"movl %cr0, %eax\n\t"
"orl $(0x1<<30),%eax\n\t"
"movl %eax, %cr0\n\t"
/* enable caching for first 1M using variable mtrr */
"movl $0x200, %ecx\n\t"
"xorl %edx, %edx\n\t"
"movl $(0 | 6), %eax\n\t"
// "movl $(0 | MTRR_TYPE_WRBACK), %eax\n\t"
"wrmsr\n\t"
"movl $0x201, %ecx\n\t"
"movl $0x0000000f, %edx\n\t"
"movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
"wrmsr\n\t"
/* enable cache */
"movl %cr0, %eax\n\t"
"andl $0x9fffffff,%eax\n\t"
"movl %eax, %cr0\n\t"
"invd\n\t"
);

View File

@ -0,0 +1,46 @@
/* by yhlu 6.2005 */
/* be warned, this file will be used other cores and core 0 / node 0 */
__asm__ volatile (
/*
FIXME : backup stack in CACHE_AS_RAM into mmx and sse and after we get STACK up, we restore that.
It is only needed if we want to go back
*/
/* We don't need cache as ram for now on */
/* disable cache */
"movl %cr0, %eax\n\t"
"orl $(0x1<<30),%eax\n\t"
"movl %eax, %cr0\n\t"
/* clear sth */
"movl $0x269, %ecx\n\t" /* fix4k_c8000*/
"xorl %edx, %edx\n\t"
"xorl %eax, %eax\n\t"
"wrmsr\n\t"
#if DCACHE_RAM_SIZE > 0x8000
"movl $0x268, %ecx\n\t" /* fix4k_c0000*/
"wrmsr\n\t"
#endif
/* disable fixed mtrr from now on, it will be enabled by linuxbios_ram again*/
"movl $0xC0010010, %ecx\n\t"
// "movl $SYSCFG_MSR, %ecx\n\t"
"rdmsr\n\t"
"andl $(~(3<<18)), %eax\n\t"
// "andl $(~(SYSCFG_MSR_MtrrFixDramModEn | SYSCFG_MSR_MtrrFixDramEn)), %eax\n\t"
"wrmsr\n\t"
/* Set the default memory type and disable fixed and enable variable MTRRs */
"movl $0x2ff, %ecx\n\t"
// "movl $MTRRdefType_MSR, %ecx\n\t"
"xorl %edx, %edx\n\t"
/* Enable Variable and Disable Fixed MTRRs */
"movl $0x00000800, %eax\n\t"
"wrmsr\n\t"
/* enable cache */
"movl %cr0, %eax\n\t"
"andl $0x9fffffff,%eax\n\t"
"movl %eax, %cr0\n\t"
);

View File

@ -0,0 +1,81 @@
static void post_cache_as_ram(unsigned cpu_reset)
{
#if 1
{
/* Check value of esp to verify if we have enough rom for stack in Cache as RAM */
unsigned v_esp;
__asm__ volatile (
"movl %%esp, %0\n\t"
: "=a" (v_esp)
);
#if CONFIG_USE_INIT
printk_debug("v_esp=%08x\r\n", v_esp);
#else
print_debug("v_esp="); print_debug_hex32(v_esp); print_debug("\r\n");
#endif
}
#endif
#if CONFIG_USE_INIT
printk_debug("cpu_reset = %08x\r\n",cpu_reset);
#else
print_debug("cpu_reset = "); print_debug_hex32(cpu_reset); print_debug("\r\n");
#endif
if(cpu_reset == 0) {
print_debug("Clearing initial memory region: ");
}
print_debug("No cache as ram now - ");
/* store cpu_reset to ebx */
__asm__ volatile (
"movl %0, %%ebx\n\t"
::"a" (cpu_reset)
);
#include "cpu/amd/car/disable_cache_as_ram.c"
if(cpu_reset==0) { // cpu_reset don't need to clear it
#include "cpu/amd/car/clear_1m_ram.c"
}
__asm__ volatile (
/* set new esp */ /* before _RAMBASE */
"subl %0, %%ebp\n\t"
"subl %0, %%esp\n\t"
::"a"( (DCACHE_RAM_BASE + DCACHE_RAM_SIZE)- _RAMBASE )
);
{
unsigned new_cpu_reset;
/* get back cpu_reset from ebx */
__asm__ volatile (
"movl %%ebx, %0\n\t"
:"=a" (new_cpu_reset)
);
print_debug("Use Ram as Stack now - "); /* but We can not go back any more, we lost old stack data in cache as ram*/
if(new_cpu_reset==0) {
print_debug("done\r\n");
} else
{
print_debug("\r\n");
}
#if CONFIG_USE_INIT
printk_debug("new_cpu_reset = %08x\r\n", new_cpu_reset);
#else
print_debug("new_cpu_reset = "); print_debug_hex32(new_cpu_reset); print_debug("\r\n");
#endif
/*copy and execute linuxbios_ram */
copy_and_run(new_cpu_reset);
/* We will not return */
}
print_debug("should not be here -\r\n");
}

View File

@ -15,6 +15,7 @@
static int first_time = 1;
static int disable_siblings = !CONFIG_LOGICAL_CPUS;
void amd_sibling_init(device_t cpu, struct node_core_id id)
{
unsigned long i;
@ -93,9 +94,13 @@ struct node_core_id get_node_core_id(void)
return id;
}
unsigned int read_nb_cfg_54(void)
{
msr_t msr;
msr = rdmsr(NB_CFG_MSR);
return ( ( msr.hi >> (54-32)) & 1);
}
#if 0
static int get_max_siblings(int nodes)
{
device_t dev;
@ -160,20 +165,6 @@ unsigned get_apicid_base(unsigned ioapic_num)
nb_cfg_54 = read_nb_cfg_54();
#if 0
//it is for all e0 single core and nc_cfg_54 low is set, but in the auto.c stage we do not set that bit for it.
if(nb_cfg_54 && (!disable_siblings) && (siblings == 0)) {
//we need to check if e0 single core is there
int i;
for(i=0; i<nodes; i++) {
if(is_e0_later_in_bsp(i)) {
siblings = 1;
break;
}
}
}
#endif
//contruct apicid_base
if((!disable_siblings) && (siblings>0) ) {
@ -199,4 +190,3 @@ unsigned get_apicid_base(unsigned ioapic_num)
return apicid_base;
}
#endif

View File

@ -2,6 +2,31 @@
#include "cpu/amd/dualcore/dualcore_id.c"
static inline unsigned get_core_num_in_bsp(unsigned nodeid)
{
return ((pci_read_config32(PCI_DEV(0, 0x18+nodeid, 3), 0xe8)>>12) & 3);
}
static inline uint8_t set_apicid_cpuid_lo(void)
{
if(is_cpu_pre_e0()) return 0; // pre_e0 can not be set
if(read_option(CMOS_VSTART_dual_core, CMOS_VLEN_dual_core, 0) != 0) { // disable dual_core
return 0;
}
// set the NB_CFG[54]=1; why the OS will be happy with that ???
msr_t msr;
msr = rdmsr(NB_CFG_MSR);
msr.hi |= (1<<(54-32)); // InitApicIdCpuIdLo
wrmsr(NB_CFG_MSR, msr);
return 1;
}
#if USE_DCACHE_RAM == 0
static void do_k8_init_and_stop_secondaries(void)
{
struct node_core_id id;
@ -72,3 +97,5 @@ static void k8_init_and_stop_secondaries(void)
do_k8_init_and_stop_secondaries();
}
#endif

View File

@ -15,6 +15,11 @@ struct node_core_id {
unsigned coreid:8;
};
static inline unsigned get_initial_apicid(void)
{
return ((cpuid_ebx(1) >> 24) & 0xf);
}
static inline struct node_core_id get_node_core_id(unsigned nb_cfg_54) {
struct node_core_id id;
// get the apicid via cpuid(1) ebx[27:24]

View File

@ -0,0 +1,75 @@
//it takes the ENABLE_APIC_EXT_ID and APIC_ID_OFFSET and LIFT_BSP_APIC_ID
static unsigned init_cpus(unsigned cpu_init_detectedx, int controllers, const struct mem_controller *ctrl)
{
unsigned cpu_reset;
unsigned bsp_apicid = 0;
struct node_core_id id;
#if CONFIG_LOGICAL_CPUS == 1
/* if dual core is not enabled, we don't need reorder the apicid */
set_apicid_cpuid_lo();
#endif
id = get_node_core_id_x(); // that is initid
#if ENABLE_APIC_EXT_ID == 1
if(id.coreid == 0) {
enable_apic_ext_id(id.nodeid);
}
#endif
#if (ENABLE_APIC_EXT_ID == 1)
#if LIFT_BSP_APIC_ID == 1
bsp_apicid += APIC_ID_OFFSET;
#endif
#endif
enable_lapic();
init_timer();
#if (ENABLE_APIC_EXT_ID == 1)
#if LIFT_BSP_APIC_ID == 0
if( id.nodeid != 0 ) //all except cores in node0
#endif
{
//get initial apic id and lift it
uint32_t dword = lapic_read(LAPIC_ID);
dword &= ~(0xff<<24);
dword |= ((get_initial_apicid() + APIC_ID_OFFSET)<<24);
lapic_write(LAPIC_ID, dword );
}
#endif
if (cpu_init_detectedx) {
// __asm__ volatile ("jmp __cpu_reset");
soft_reset(); // avoid soft reset? , I don't want to reinit ram again, make sure bsp get get INIT, So need to send one INIT to BSP ....
/*
1. check if it is BSP
2. if not send INIT to BSP and get out
3. if it is BSP, check if the mem is inited or not
4. if not inited, issue soft reset
5. if it is inited, call post_cache_as_ram with cpu_reset ==0. --- need to clear first 1M ram
*/
#if 0
if(!mem_inited(controllers, ctrl)) {
print_debug("mem is not initialized properly, need to hard reset\r\n");
hard_reset();
}
cpu_reset = 1;
post_cache_as_ram(cpu_reset);
#endif
//no return;
}
distinguish_cpu_resets(id.nodeid);
if (!boot_cpu()) {
// We need stop the CACHE as RAM for this CPU too
#include "cpu/amd/car/disable_cache_as_ram.c"
stop_this_cpu(); // it will stop all cores except core0 of cpu0
}
return bsp_apicid;
}

View File

@ -136,10 +136,10 @@ default CONFIG_PCI_ROM_RUN=1
##
## enable CACHE_AS_RAM specifics
##
default USE_DCACHE_RAM=0
default USE_DCACHE_RAM=1
default DCACHE_RAM_BASE=0xcf000
default DCACHE_RAM_SIZE=0x1000
default CONFIG_USE_INIT=0
default CONFIG_USE_INIT=1
##
## Build code to setup a generic IOAPIC

View File

@ -110,12 +110,7 @@ static inline int spd_read_byte(unsigned device, unsigned address)
/* tyan does not want the default */
#include "resourcemap.c"
#if CONFIG_LOGICAL_CPUS==1
#define SET_NB_CFG_54 1
#include "cpu/amd/dualcore/dualcore.c"
#else
#include "cpu/amd/model_fxx/node_id.c"
#endif
#define FIRST_CPU 1
#define SECOND_CPU 1
@ -141,6 +136,10 @@ static inline int spd_read_byte(unsigned device, unsigned address)
#include "cpu/amd/car/copy_and_run.c"
#include "cpu/amd/car/post_cache_as_ram.c"
#include "cpu/amd/model_fxx/init_cpus.c"
#if USE_FALLBACK_IMAGE == 1
#include "southbridge/nvidia/ck804/ck804_enable_rom.c"
@ -180,28 +179,12 @@ static void sio_setup(void)
}
void real_main(unsigned long bist);
void real_main(unsigned long bist, unsigned long cpu_init_detectedx);
void amd64_main(unsigned long bist)
void amd64_main(unsigned long bist, unsigned long cpu_init_detectedx)
{
#if CONFIG_LOGICAL_CPUS==1
struct node_core_id id;
#else
unsigned nodeid;
#endif
/* Make cerain my local apic is useable */
// enable_lapic();
#if CONFIG_LOGICAL_CPUS==1
id = get_node_core_id_x();
/* Is this a cpu only reset? */
if (cpu_init_detected(id.nodeid)) {
#else
// nodeid = lapicid() & 0xf;
nodeid = get_node_id();
/* Is this a cpu only reset? */
if (cpu_init_detected(nodeid)) {
#endif
if (cpu_init_detectedx) {
if (last_boot_normal()) {
goto normal_image;
} else {
@ -242,7 +225,7 @@ void amd64_main(unsigned long bist)
normal_image:
__asm__ volatile ("jmp __normal_image"
: /* outputs */
: "a" (bist) /* inputs */
: "a" (bist), "b" (cpu_init_detectedx) /* inputs */
);
cpu_reset:
#if 0
@ -254,11 +237,11 @@ void amd64_main(unsigned long bist)
#endif
fallback_image:
real_main(bist);
real_main(bist, cpu_init_detectedx);
}
void real_main(unsigned long bist)
void real_main(unsigned long bist, unsigned long cpu_init_detectedx)
#else
void amd64_main(unsigned long bist)
void amd64_main(unsigned long bist, unsigned long cpu_init_detectedx)
#endif
{
static const struct mem_controller cpu[] = {
@ -291,81 +274,9 @@ void amd64_main(unsigned long bist)
unsigned cpu_reset = 0;
if (bist == 0) {
#if CONFIG_LOGICAL_CPUS==1
struct node_core_id id;
#else
unsigned nodeid;
#endif
/* Skip this if there was a built in self test failure */
// amd_early_mtrr_init(); # don't need, already done in cache_as_ram
#if CONFIG_LOGICAL_CPUS==1
set_apicid_cpuid_lo();
id = get_node_core_id_x(); // that is initid
#if ENABLE_APIC_EXT_ID == 1
if(id.coreid == 0) {
enable_apic_ext_id(id.nodeid);
}
#endif
#else
nodeid = get_node_id();
#if ENABLE_APIC_EXT_ID == 1
enable_apic_ext_id(nodeid);
#endif
#endif
enable_lapic();
// init_timer();
#if CONFIG_LOGICAL_CPUS==1
#if ENABLE_APIC_EXT_ID == 1
#if LIFT_BSP_APIC_ID == 0
if( id.nodeid != 0 ) //all except cores in node0
#endif
lapic_write(LAPIC_ID, ( lapic_read(LAPIC_ID) | (APIC_ID_OFFSET<<24) ) );
#endif
if(id.coreid == 0) {
if (cpu_init_detected(id.nodeid)) {
// __asm__ volatile ("jmp __cpu_reset");
cpu_reset = 1;
goto cpu_reset_x;
}
distinguish_cpu_resets(id.nodeid);
// start_other_core(id.nodeid);
}
#else
#if ENABLE_APIC_EXT_ID == 1
#if LIFT_BSP_APIC_ID == 0
if(nodeid != 0)
#endif
lapic_write(LAPIC_ID, ( lapic_read(LAPIC_ID) | (APIC_ID_OFFSET<<24) ) ); // CPU apicid is from 0x10
#endif
if (cpu_init_detected(nodeid)) {
// __asm__ volatile ("jmp __cpu_reset");
cpu_reset = 1;
goto cpu_reset_x;
}
distinguish_cpu_resets(nodeid);
#endif
if (!boot_cpu()
#if CONFIG_LOGICAL_CPUS==1
|| (id.coreid != 0)
#endif
) {
// We need stop the CACHE as RAM for this CPU too
#include "cpu/amd/car/cache_as_ram_post.c"
stop_this_cpu(); // it will stop all cores except core0 of cpu0
}
init_cpus(cpu_init_detectedx, sizeof(cpu)/sizeof(cpu[0]), cpu);
}
init_timer(); // only do it it first CPU
lpc47b397_enable_serial(SERIAL_DEV, TTYS0_BASE);
uart_init();
console_init();
@ -377,11 +288,6 @@ void amd64_main(unsigned long bist)
needs_reset = setup_coherent_ht_domain();
#if CONFIG_LOGICAL_CPUS==1
// It is said that we should start core1 after all core0 launched
start_other_cores();
#endif
needs_reset |= ht_setup_chains_x();
needs_reset |= ck804_early_setup_x();
@ -395,88 +301,7 @@ void amd64_main(unsigned long bist)
memreset_setup();
sdram_initialize(sizeof(cpu)/sizeof(cpu[0]), cpu);
post_cache_as_ram(cpu_reset);
#if 1
{
/* Check value of esp to verify if we have enough rom for stack in Cache as RAM */
unsigned v_esp;
__asm__ volatile (
"movl %%esp, %0\n\t"
: "=a" (v_esp)
);
#if CONFIG_USE_INIT
printk_debug("v_esp=%08x\r\n", v_esp);
#else
print_debug("v_esp="); print_debug_hex32(v_esp); print_debug("\r\n");
#endif
}
#endif
#if 1
cpu_reset_x:
#if CONFIG_USE_INIT
printk_debug("cpu_reset = %08x\r\n",cpu_reset);
#else
print_debug("cpu_reset = "); print_debug_hex32(cpu_reset); print_debug("\r\n");
#endif
if(cpu_reset == 0) {
print_debug("Clearing initial memory region: ");
}
print_debug("No cache as ram now - ");
/* store cpu_reset to ebx */
__asm__ volatile (
"movl %0, %%ebx\n\t"
::"a" (cpu_reset)
);
if(cpu_reset==0) {
#define CLEAR_FIRST_1M_RAM 1
#include "cpu/amd/car/cache_as_ram_post.c"
}
else {
#undef CLEAR_FIRST_1M_RAM
#include "cpu/amd/car/cache_as_ram_post.c"
}
__asm__ volatile (
/* set new esp */ /* before _RAMBASE */
"subl %0, %%ebp\n\t"
"subl %0, %%esp\n\t"
::"a"( (DCACHE_RAM_BASE + DCACHE_RAM_SIZE)- _RAMBASE )
);
{
unsigned new_cpu_reset;
/* get back cpu_reset from ebx */
__asm__ volatile (
"movl %%ebx, %0\n\t"
:"=a" (new_cpu_reset)
);
/* We can not go back any more, we lost old stack data in cache as ram*/
if(new_cpu_reset==0) {
print_debug("Use Ram as Stack now - done\r\n");
} else
{
print_debug("Use Ram as Stack now - \r\n");
}
#if CONFIG_USE_INIT
printk_debug("new_cpu_reset = %08x\r\n", new_cpu_reset);
#else
print_debug("new_cpu_reset = "); print_debug_hex32(new_cpu_reset); print_debug("\r\n");
#endif
/*copy and execute linuxbios_ram */
copy_and_run(new_cpu_reset);
/* We will not return */
}
#endif
print_err("should not be here -\r\n");
}

View File

@ -192,6 +192,17 @@ static void enable_routing(u8 node)
print_spew(" done.\r\n");
}
static void enable_apic_ext_id(u8 node)
{
u32 val;
val = pci_read_config32(NODE_HT(node), 0x68);
val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
pci_write_config32(NODE_HT(node), 0x68, val);
}
static void fill_row(u8 node, u8 row, u32 value)
{
pci_write_config32(NODE_HT(node), 0x40+(row<<2), value);