Applying 11_26_car_tyan.diff from Yinghai Lu.

NOTE: This will break the tree so it can be fixed up later



git-svn-id: svn://svn.coreboot.org/coreboot/trunk@2115 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
Stefan Reinauer
2005-12-01 10:54:44 +00:00
parent 70597f96c4
commit 806e146e75
38 changed files with 1034 additions and 2526 deletions

View File

@ -9,19 +9,20 @@
/* Save the BIST result */
movl %eax, %ebp
// for normal part %ebx already contain cpu_init_detected from fallback call
/*for normal part %ebx already contain cpu_init_detected from fallback call */
CacheAsRam:
cache_as_ram_setup:
/* hope we can skip the double set for normal part */
#if USE_FALLBACK_IMAGE == 1
/* check if cpu_init_detected */
movl $MTRRdefType_MSR, %ecx
rdmsr
andl $0x00000800, %eax
movl %eax, %ebx ; // We store the status about if cpu_init_detected
movl %eax, %ebx /* We store the status */
/* Set MtrrFixDramModEn for clear fixed mtrr */
xorl %eax, %eax # clear %eax and %edx
xorl %eax, %eax
xorl %edx, %edx
enable_fixed_mtrr_dram_modify:
@ -97,13 +98,16 @@ clear_fixed_var_mtrr_out:
xorl %edx, %edx
movl $(((CONFIG_LB_MEM_TOPK << 10) + TOP_MEM_MASK) & ~TOP_MEM_MASK) , %eax
wrmsr
#else
#endif /* USE_FALLBACK_IMAGE == 1*/
#if USE_FALLBACK_IMAGE == 0
/* disable cache */
movl %cr0, %eax
orl $(0x1<<30),%eax
movl %eax, %cr0
#endif /* USE_FALLBACK_IMAGE == 1*/
#endif
#if defined(XIP_ROM_SIZE) && defined(XIP_ROM_BASE)
/* enable write base caching so we can do execute in place
@ -151,7 +155,7 @@ clear_fixed_var_mtrr_out:
movl %esp, %ebp
pushl %ebx /* init detected */
pushl %eax /* bist */
call amd64_main
call cache_as_ram_main
/* We will not go back */
fixed_mtrr_msr:
@ -169,4 +173,4 @@ var_iorr_msr:
mem_top:
.long 0xC001001A, 0xC001001D
.long 0x000 /* NULL, end of table */
.CacheAsRam_out:
cache_as_ram_setup_out:

View File

@ -1,3 +1,19 @@
#include "cpu/amd/car/disable_cache_as_ram.c"
#include "cpu/amd/car/clear_1m_ram.c"
static inline void print_debug_pcar(const char *strval, uint32_t val)
{
#if CONFIG_USE_INIT
printk_debug("%s%08x\r\n", strval, val);
#else
print_debug(strval); print_debug_hex32(val); print_debug("\r\n");
#endif
}
static void post_cache_as_ram(unsigned cpu_reset)
{
@ -10,19 +26,11 @@ static void post_cache_as_ram(unsigned cpu_reset)
"movl %%esp, %0\n\t"
: "=a" (v_esp)
);
#if CONFIG_USE_INIT
printk_debug("v_esp=%08x\r\n", v_esp);
#else
print_debug("v_esp="); print_debug_hex32(v_esp); print_debug("\r\n");
#endif
print_debug_pcar("v_esp=", v_esp);
}
#endif
#if CONFIG_USE_INIT
printk_debug("cpu_reset = %08x\r\n",cpu_reset);
#else
print_debug("cpu_reset = "); print_debug_hex32(cpu_reset); print_debug("\r\n");
#endif
print_debug_pcar("cpu_reset = ",cpu_reset);
if(cpu_reset == 0) {
print_debug("Clearing initial memory region: ");
@ -35,12 +43,19 @@ static void post_cache_as_ram(unsigned cpu_reset)
::"a" (cpu_reset)
);
#include "cpu/amd/car/disable_cache_as_ram.c"
disable_cache_as_ram();
if(cpu_reset==0) { // cpu_reset don't need to clear it
#include "cpu/amd/car/clear_1m_ram.c"
clear_1m_ram();
}
#if 0
int i;
for(i=0;i<0x800000;i++) {
outb(0x66, 0x80);
}
#endif
__asm__ volatile (
/* set new esp */ /* before _RAMBASE */
"subl %0, %%ebp\n\t"
@ -58,6 +73,7 @@ static void post_cache_as_ram(unsigned cpu_reset)
);
print_debug("Use Ram as Stack now - "); /* but We can not go back any more, we lost old stack data in cache as ram*/
if(new_cpu_reset==0) {
print_debug("done\r\n");
} else
@ -65,11 +81,9 @@ static void post_cache_as_ram(unsigned cpu_reset)
print_debug("\r\n");
}
#if CONFIG_USE_INIT
printk_debug("new_cpu_reset = %08x\r\n", new_cpu_reset);
#else
print_debug("new_cpu_reset = "); print_debug_hex32(new_cpu_reset); print_debug("\r\n");
#endif
print_debug_pcar("new_cpu_reset = ", new_cpu_reset);
/*copy and execute linuxbios_ram */
copy_and_run(new_cpu_reset);
/* We will not return */

View File

@ -24,7 +24,7 @@ static inline struct node_core_id get_node_core_id(unsigned nb_cfg_54) {
struct node_core_id id;
// get the apicid via cpuid(1) ebx[27:24]
if( nb_cfg_54) {
// when NB_CFG[54] is set, nodid = ebx[27:25], coreid = ebx[24]
// when NB_CFG[54] is set, nodeid = ebx[27:25], coreid = ebx[24]
id.coreid = (cpuid_ebx(1) >> 24) & 0xf;
id.nodeid = (id.coreid>>1);
id.coreid &= 1;

View File

@ -1,75 +1,256 @@
//it takes the ENABLE_APIC_EXT_ID and APIC_ID_OFFSET and LIFT_BSP_APIC_ID
static unsigned init_cpus(unsigned cpu_init_detectedx, int controllers, const struct mem_controller *ctrl)
typedef void (*process_ap_t)(unsigned apicid, void *gp);
static void for_each_ap(unsigned bsp_apicid, unsigned core0_only, process_ap_t process_ap, void *gp)
{
unsigned cpu_reset;
unsigned bsp_apicid = 0;
struct node_core_id id;
// here assume the OS don't change our apicid
unsigned ap_apicid;
#if CONFIG_LOGICAL_CPUS == 1
/* if dual core is not enabled, we don't need reorder the apicid */
set_apicid_cpuid_lo();
#endif
unsigned nodes;
unsigned siblings = 0;
unsigned disable_siblings;
unsigned e0_later_single_core;
unsigned nb_cfg_54;
int i,j;
id = get_node_core_id_x(); // that is initid
#if ENABLE_APIC_EXT_ID == 1
if(id.coreid == 0) {
enable_apic_ext_id(id.nodeid);
/* get_nodes define in in_coherent_ht.c */
nodes = get_nodes();
disable_siblings = !CONFIG_LOGICAL_CPUS;
#if CONFIG_LOGICAL_CPUS == 1
if(read_option(CMOS_VSTART_dual_core, CMOS_VLEN_dual_core, 0) != 0) { // 0 mean dual core
disable_siblings = 1;
}
#endif
/* here I assume that all node are same stepping, otherwise we can use use nb_cfg_54 from bsp for all nodes */
nb_cfg_54 = read_nb_cfg_54();
for(i=0; i<nodes;i++) {
e0_later_single_core = 0;
j = ((pci_read_config32(PCI_DEV(0, 0x18+i, 3), 0xe8) >> 12) & 3);
if(nb_cfg_54) {
if(j == 0 ){ // if it is single core, we need to increase siblings for apic calculation
e0_later_single_core = is_e0_later_in_bsp(i); // single core
}
if(e0_later_single_core) {
j=1;
}
}
#endif
siblings = j;
unsigned jj;
if(e0_later_single_core || disable_siblings || core0_only) {
jj = 0;
} else {
jj = siblings;
}
for(j=0; j<=jj; j++) {
ap_apicid = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8);
#if (ENABLE_APIC_EXT_ID == 1)
#if LIFT_BSP_APIC_ID == 1
bsp_apicid += APIC_ID_OFFSET;
#endif
#endif
#if LIFT_BSP_APIC_ID == 0
if( (i!=0) || (j!=0)) /* except bsp */
#endif
ap_apicid += APIC_ID_OFFSET;
#endif
if(ap_apicid == bsp_apicid) continue;
process_ap(ap_apicid, gp);
}
}
}
enable_lapic();
static inline int lapic_remote_read(int apicid, int reg, unsigned *pvalue)
{
int timeout;
unsigned status;
int result;
lapic_wait_icr_idle();
lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
timeout = 0;
init_timer();
do {
status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
} while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
timeout = 0;
do {
status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
} while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
result = -1;
if (status == LAPIC_ICR_RR_VALID) {
*pvalue = lapic_read(LAPIC_RRR);
result = 0;
}
return result;
}
#define LAPIC_MSG_REG 0x380
static inline __attribute__((always_inline)) void print_apicid_nodeid_coreid(unsigned apicid, struct node_core_id id, const char *str)
{
#if CONFIG_USE_INIT == 0
print_debug(str);
print_debug(" ---- {APICID = "); print_debug_hex8(apicid);
print_debug(" NODEID = "), print_debug_hex8(id.nodeid); print_debug(" COREID = "), print_debug_hex8(id.coreid);
print_debug("} --- \r\n");
#else
printk_debug("%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\r\n", str, apicid, id.nodeid, id.coreid);
#endif
}
static void wait_cpu_state(unsigned apicid, unsigned state)
{
unsigned readback;
int loop =100000;
while(--loop>0) {
if(lapic_remote_read(apicid, LAPIC_MSG_REG, &readback)!=0) continue;
if((readback & 0xff) == state) break; //target cpu is in stage started
}
}
static void wait_ap_started(unsigned ap_apicid, void *gp )
{
wait_cpu_state(ap_apicid, 0x33); // started
}
static void wait_all_aps_started(unsigned bsp_apicid)
{
for_each_ap(bsp_apicid, 0 , wait_ap_started, (void *)0);
}
static void allow_all_aps_stop(unsigned bsp_apicid)
{
lapic_write(LAPIC_MSG_REG, (bsp_apicid<<24) | 0x44); // allow aps to stop
}
static unsigned init_cpus(unsigned cpu_init_detectedx)
{
unsigned bsp_apicid = 0;
unsigned apicid;
struct node_core_id id;
/*
* already set early mtrr in cache_as_ram.inc
*/
/* that is from initial apicid, we need nodeid and coreid later */
id = get_node_core_id_x();
/* NB_CFG MSR is shared between cores, so we need make sure core0 is done at first --- use wait_all_core0_started */
if(id.coreid == 0) {
set_apicid_cpuid_lo(); /* only set it on core0 */
#if ENABLE_APIC_EXT_ID == 1
enable_apic_ext_id(id.nodeid);
#endif
}
enable_lapic();
// init_timer(); // We need TMICT to pass msg for FID/VID change
#if (ENABLE_APIC_EXT_ID == 1)
#if LIFT_BSP_APIC_ID == 0
if( id.nodeid != 0 ) //all except cores in node0
#endif
{
//get initial apic id and lift it
uint32_t dword = lapic_read(LAPIC_ID);
dword &= ~(0xff<<24);
dword |= ((get_initial_apicid() + APIC_ID_OFFSET)<<24);
lapic_write(LAPIC_ID, dword );
}
unsigned initial_apicid = get_initial_apicid();
#if LIFT_BSP_APIC_ID == 0
if( initial_apicid != 0 ) // other than bsp
#endif
{
/* use initial apic id to lift it */
uint32_t dword = lapic_read(LAPIC_ID);
dword &= ~(0xff<<24);
dword |= (((initial_apicid + APIC_ID_OFFSET) & 0xff)<<24);
lapic_write(LAPIC_ID, dword);
}
#if LIFT_BSP_APIC_ID == 1
bsp_apicid += APIC_ID_OFFSET;
#endif
#endif
if (cpu_init_detectedx) {
// __asm__ volatile ("jmp __cpu_reset");
soft_reset(); // avoid soft reset? , I don't want to reinit ram again, make sure bsp get get INIT, So need to send one INIT to BSP ....
/*
1. check if it is BSP
2. if not send INIT to BSP and get out
3. if it is BSP, check if the mem is inited or not
4. if not inited, issue soft reset
5. if it is inited, call post_cache_as_ram with cpu_reset ==0. --- need to clear first 1M ram
*/
/* get the apicid, it may be lifted already */
apicid = lapicid();
#if 0
if(!mem_inited(controllers, ctrl)) {
print_debug("mem is not initialized properly, need to hard reset\r\n");
hard_reset();
}
cpu_reset = 1;
post_cache_as_ram(cpu_reset);
#endif
//no return;
#if 1
// show our apicid, nodeid, and coreid
if( id.coreid==0 ) {
if (id.nodeid!=0) //all core0 except bsp
print_apicid_nodeid_coreid(apicid, id, " core0: ");
}
#if 1
else { //all core1
print_apicid_nodeid_coreid(apicid, id, " core1: ");
}
distinguish_cpu_resets(id.nodeid);
#endif
if (!boot_cpu()) {
// We need stop the CACHE as RAM for this CPU too
#include "cpu/amd/car/disable_cache_as_ram.c"
stop_this_cpu(); // it will stop all cores except core0 of cpu0
#endif
if (cpu_init_detectedx) {
print_apicid_nodeid_coreid(apicid, id, "\r\n\r\n\r\nINIT detect from ");
print_debug("\r\nIssuing SOFT_RESET...\r\n");
soft_reset();
}
if(id.coreid==0) {
distinguish_cpu_resets(id.nodeid);
// start_other_core(id.nodeid); // start second core in first cpu, only allowed for nb_cfg_54 is not set
}
//here don't need to wait
lapic_write(LAPIC_MSG_REG, (apicid<<24) | 0x33); // mark the cpu is started
if(apicid != bsp_apicid) {
// We need to stop the CACHE as RAM for this CPU, really?
wait_cpu_state(bsp_apicid, 0x44);
lapic_write(LAPIC_MSG_REG, (apicid<<24) | 0x44); // bsp can not check it before stop_this_cpu
disable_cache_as_ram(); // inline
stop_this_cpu(); // inline, it will stop all cores except node0/core0 the bsp ....
}
return bsp_apicid;
}
#if CONFIG_LOGICAL_CPUS == 1
static unsigned is_core0_started(unsigned nodeid)
{
uint32_t htic;
device_t device;
device = PCI_DEV(0, 0x18 + nodeid, 0);
htic = pci_read_config32(device, HT_INIT_CONTROL);
htic &= HTIC_INIT_Detect;
return htic;
}
static void wait_all_core0_started(void)
{
//When core0 is started, it will distingush_cpu_resets. So wait for that
unsigned i;
unsigned nodes = get_nodes();
for(i=1;i<nodes;i++) { // skip bsp, because it is running on bsp
while(!is_core0_started(i)) {}
}
}
#endif

View File

@ -1,12 +0,0 @@
/* 2004.12 yhlu add dual core support */
#include <arch/cpu.h>
#include "cpu/amd/model_fxx/model_fxx_msr.h"
static inline unsigned get_node_id(void) {
unsigned nodeid;
// get the apicid via cpuid(1) ebx[27:24]
nodeid = (cpuid_ebx(1) >> 24) & 0x7;
return nodeid;
}