Cosmetically make init_cpus more similar for fam10 and K8.
Remove some fam10 warnings. Signed-off-by: Myles Watson <mylesgw@gmail.com> Acked-by: Myles Watson <mylesgw@gmail.com> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5382 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
parent
9b43afde39
commit
362db613a0
@ -30,29 +30,6 @@
|
||||
#define SET_FIDVID_CORE0_ONLY 0
|
||||
#endif
|
||||
|
||||
static void print_initcpu8 (const char *strval, u8 val)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s%02x\n", strval, val);
|
||||
}
|
||||
|
||||
static void print_initcpu8_nocr (const char *strval, u8 val)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s%02x", strval, val);
|
||||
}
|
||||
|
||||
|
||||
static void print_initcpu16 (const char *strval, u16 val)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s%04x\n", strval, val);
|
||||
}
|
||||
|
||||
|
||||
static void print_initcpu(const char *strval, u32 val)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s%08x\n", strval, val);
|
||||
}
|
||||
|
||||
|
||||
void update_microcode(u32 cpu_deviceid);
|
||||
static void prep_fid_change(void);
|
||||
static void init_fidvid_stage2(u32 apicid, u32 nodeid);
|
||||
@ -72,7 +49,6 @@ static void set_EnableCf8ExtCfg(void)
|
||||
static void set_EnableCf8ExtCfg(void) { }
|
||||
#endif
|
||||
|
||||
|
||||
/*[39:8] */
|
||||
#define PCI_MMIO_BASE 0xfe000000
|
||||
/* because we will use gs to store hi, so need to make sure lo can start
|
||||
@ -99,20 +75,17 @@ static void set_pci_mmio_conf_reg(void)
|
||||
msr.lo = 0;
|
||||
wrmsr(0xc0000101, msr); //GS_Base Reg
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
typedef void (*process_ap_t) (u32 apicid, void *gp);
|
||||
|
||||
//core_range = 0 : all cores
|
||||
//core range = 1 : core 0 only
|
||||
//core range = 2 : cores other than core0
|
||||
|
||||
static void for_each_ap(u32 bsp_apicid, u32 core_range,
|
||||
process_ap_t process_ap, void *gp)
|
||||
static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
|
||||
void *gp)
|
||||
{
|
||||
// here assume the OS don't change our apicid
|
||||
u32 ap_apicid;
|
||||
@ -130,7 +103,7 @@ static void for_each_ap(u32 bsp_apicid, u32 core_range,
|
||||
|
||||
disable_siblings = !CONFIG_LOGICAL_CPUS;
|
||||
|
||||
#if CONFIG_LOGICAL_CPUS == 1
|
||||
#if CONFIG_LOGICAL_CPUS == 1 && CONFIG_HAVE_OPTION_TABLE == 1
|
||||
if (read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 mean multi core
|
||||
disable_siblings = 1;
|
||||
}
|
||||
@ -164,9 +137,10 @@ static void for_each_ap(u32 bsp_apicid, u32 core_range,
|
||||
jend = cores_found;
|
||||
}
|
||||
|
||||
|
||||
for (j = jstart; j <= jend; j++) {
|
||||
ap_apicid = i * (nb_cfg_54 ? (siblings + 1):1) + j * (nb_cfg_54 ? 1:64);
|
||||
ap_apicid =
|
||||
i * (nb_cfg_54 ? (siblings + 1) : 1) +
|
||||
j * (nb_cfg_54 ? 1 : 64);
|
||||
|
||||
#if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
|
||||
#if CONFIG_LIFT_BSP_APIC_ID == 0
|
||||
@ -175,7 +149,8 @@ static void for_each_ap(u32 bsp_apicid, u32 core_range,
|
||||
ap_apicid += CONFIG_APIC_ID_OFFSET;
|
||||
#endif
|
||||
|
||||
if(ap_apicid == bsp_apicid) continue;
|
||||
if (ap_apicid == bsp_apicid)
|
||||
continue;
|
||||
|
||||
process_ap(ap_apicid, gp);
|
||||
|
||||
@ -183,8 +158,7 @@ static void for_each_ap(u32 bsp_apicid, u32 core_range,
|
||||
}
|
||||
}
|
||||
|
||||
/* FIXME: Duplicate of what is in lapic.h? */
|
||||
static int lapic_remote_read(int apicid, int reg, u32 *pvalue)
|
||||
static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
|
||||
{
|
||||
int timeout;
|
||||
u32 status;
|
||||
@ -192,8 +166,9 @@ static int lapic_remote_read(int apicid, int reg, u32 *pvalue)
|
||||
lapic_wait_icr_idle();
|
||||
lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
|
||||
lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
|
||||
timeout = 0;
|
||||
|
||||
/* Extra busy check compared to lapic.h */
|
||||
timeout = 0;
|
||||
do {
|
||||
status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
|
||||
} while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
|
||||
@ -212,28 +187,30 @@ static int lapic_remote_read(int apicid, int reg, u32 *pvalue)
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
/* Use the LAPIC timer count register to hold each cores init status */
|
||||
#define LAPIC_MSG_REG 0x380
|
||||
|
||||
|
||||
#if SET_FIDVID == 1
|
||||
static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
|
||||
#endif
|
||||
|
||||
static inline __attribute__((always_inline)) void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id, const char *str)
|
||||
static inline __attribute__ ((always_inline))
|
||||
void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
|
||||
const char *str)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str, apicid, id.nodeid, id.coreid);
|
||||
printk(BIOS_DEBUG,
|
||||
"%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
|
||||
apicid, id.nodeid, id.coreid);
|
||||
}
|
||||
|
||||
|
||||
static unsigned wait_cpu_state(u32 apicid, u32 state)
|
||||
static u32 wait_cpu_state(u32 apicid, u32 state)
|
||||
{
|
||||
u32 readback = 0;
|
||||
u32 timeout = 1;
|
||||
int loop = 4000000;
|
||||
while (--loop > 0) {
|
||||
if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0) continue;
|
||||
if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
|
||||
continue;
|
||||
if ((readback & 0x3f) == state) {
|
||||
timeout = 0;
|
||||
break; //target cpu is in stage started
|
||||
@ -248,30 +225,26 @@ static unsigned wait_cpu_state(u32 apicid, u32 state)
|
||||
return timeout;
|
||||
}
|
||||
|
||||
|
||||
static void wait_ap_started(u32 ap_apicid, void *gp)
|
||||
{
|
||||
u32 timeout;
|
||||
timeout = wait_cpu_state(ap_apicid, 0x13); // started
|
||||
printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
|
||||
if (timeout) {
|
||||
print_initcpu8_nocr("* AP ", ap_apicid);
|
||||
print_initcpu(" didn't start timeout:", timeout);
|
||||
}
|
||||
else {
|
||||
print_initcpu8_nocr("AP started: ", ap_apicid);
|
||||
printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
|
||||
} else {
|
||||
printk(BIOS_DEBUG, "started\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void wait_all_other_cores_started(u32 bsp_apicid)
|
||||
{
|
||||
// all aps other than core0
|
||||
print_debug("started ap apicid: ");
|
||||
printk(BIOS_DEBUG, "started ap apicid: ");
|
||||
for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
|
||||
print_debug("\n");
|
||||
printk(BIOS_DEBUG, "\n");
|
||||
}
|
||||
|
||||
|
||||
static void allow_all_aps_stop(u32 bsp_apicid)
|
||||
{
|
||||
/* Called by the BSP to indicate AP can stop */
|
||||
@ -292,7 +265,6 @@ static void enable_apic_ext_id(u32 node)
|
||||
pci_write_config32(NODE_HT(node), 0x68, val);
|
||||
}
|
||||
|
||||
|
||||
static void STOP_CAR_AND_CPU(void)
|
||||
{
|
||||
msr_t msr;
|
||||
@ -303,6 +275,7 @@ static void STOP_CAR_AND_CPU(void)
|
||||
wrmsr(BU_CFG2, msr);
|
||||
|
||||
disable_cache_as_ram(); // inline
|
||||
/* stop all cores except node0/core0 the bsp .... */
|
||||
stop_this_cpu();
|
||||
}
|
||||
|
||||
@ -339,7 +312,6 @@ static u32 init_cpus(u32 cpu_init_detectedx)
|
||||
|
||||
enable_lapic();
|
||||
|
||||
|
||||
#if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
|
||||
u32 initial_apicid = get_initial_apicid();
|
||||
|
||||
@ -350,11 +322,11 @@ static u32 init_cpus(u32 cpu_init_detectedx)
|
||||
/* use initial apic id to lift it */
|
||||
u32 dword = lapic_read(LAPIC_ID);
|
||||
dword &= ~(0xff << 24);
|
||||
dword |= (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
|
||||
dword |=
|
||||
(((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
|
||||
|
||||
lapic_write(LAPIC_ID, dword);
|
||||
}
|
||||
|
||||
#if CONFIG_LIFT_BSP_APIC_ID == 1
|
||||
bsp_apicid += CONFIG_APIC_ID_OFFSET;
|
||||
#endif
|
||||
@ -368,15 +340,14 @@ static u32 init_cpus(u32 cpu_init_detectedx)
|
||||
if (id.coreid == 0) {
|
||||
if (id.nodeid != 0) //all core0 except bsp
|
||||
print_apicid_nodeid_coreid(apicid, id, " core0: ");
|
||||
}
|
||||
else { //all other cores
|
||||
} else { //all other cores
|
||||
print_apicid_nodeid_coreid(apicid, id, " corex: ");
|
||||
}
|
||||
|
||||
|
||||
if (cpu_init_detectedx) {
|
||||
print_apicid_nodeid_coreid(apicid, id, "\n\n\nINIT detected from ");
|
||||
print_debug("\nIssuing SOFT_RESET...\n");
|
||||
print_apicid_nodeid_coreid(apicid, id,
|
||||
"\n\n\nINIT detected from ");
|
||||
printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
|
||||
soft_reset();
|
||||
}
|
||||
|
||||
@ -384,11 +355,9 @@ static u32 init_cpus(u32 cpu_init_detectedx)
|
||||
if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
|
||||
distinguish_cpu_resets(id.nodeid); // Also indicates we are started
|
||||
}
|
||||
|
||||
// Mark the core as started.
|
||||
lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x13);
|
||||
|
||||
|
||||
if (apicid != bsp_apicid) {
|
||||
/* Setup each AP's cores MSRs.
|
||||
* This happens after HTinit.
|
||||
@ -397,7 +366,6 @@ static u32 init_cpus(u32 cpu_init_detectedx)
|
||||
update_microcode(cpuid_eax(1));
|
||||
cpuSetAMDMSR();
|
||||
|
||||
|
||||
#if SET_FIDVID == 1
|
||||
#if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
|
||||
// Run on all AP for proper FID/VID setup.
|
||||
@ -406,11 +374,16 @@ static u32 init_cpus(u32 cpu_init_detectedx)
|
||||
{
|
||||
// check warm(bios) reset to call stage2 otherwise do stage1
|
||||
if (warm_reset_detect(id.nodeid)) {
|
||||
printk(BIOS_DEBUG, "init_fidvid_stage2 apicid: %02x\n", apicid);
|
||||
printk(BIOS_DEBUG,
|
||||
"init_fidvid_stage2 apicid: %02x\n",
|
||||
apicid);
|
||||
init_fidvid_stage2(apicid, id.nodeid);
|
||||
} else {
|
||||
printk(BIOS_DEBUG, "init_fidvid_ap(stage1) apicid: %02x\n", apicid);
|
||||
init_fidvid_ap(bsp_apicid, apicid, id.nodeid, id.coreid);
|
||||
printk(BIOS_DEBUG,
|
||||
"init_fidvid_ap(stage1) apicid: %02x\n",
|
||||
apicid);
|
||||
init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
|
||||
id.coreid);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
@ -424,13 +397,14 @@ static u32 init_cpus(u32 cpu_init_detectedx)
|
||||
set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
|
||||
|
||||
STOP_CAR_AND_CPU();
|
||||
printk(BIOS_DEBUG, "\nAP %02x should be halted but you are reading this....\n", apicid);
|
||||
printk(BIOS_DEBUG,
|
||||
"\nAP %02x should be halted but you are reading this....\n",
|
||||
apicid);
|
||||
}
|
||||
|
||||
return bsp_apicid;
|
||||
}
|
||||
|
||||
|
||||
static u32 is_core0_started(u32 nodeid)
|
||||
{
|
||||
u32 htic;
|
||||
@ -441,21 +415,22 @@ static u32 is_core0_started(u32 nodeid)
|
||||
return htic;
|
||||
}
|
||||
|
||||
|
||||
static void wait_all_core0_started(void)
|
||||
{
|
||||
/* When core0 is started, it will distingush_cpu_resets
|
||||
. So wait for that to finish */
|
||||
* So wait for that to finish */
|
||||
u32 i;
|
||||
u32 nodes = get_nodes();
|
||||
|
||||
printk(BIOS_DEBUG, "Wait all core0s started \n");
|
||||
printk(BIOS_DEBUG, "core0 started: ");
|
||||
for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
|
||||
while(!is_core0_started(i)) {}
|
||||
print_initcpu8(" Core0 started on node: ", i);
|
||||
while (!is_core0_started(i)) {
|
||||
}
|
||||
printk(BIOS_DEBUG, "Wait all core0s started done\n");
|
||||
printk(BIOS_DEBUG, " %02x", i);
|
||||
}
|
||||
printk(BIOS_DEBUG, "\n");
|
||||
}
|
||||
|
||||
#if CONFIG_MAX_PHYSICAL_CPUS > 1
|
||||
/**
|
||||
* void start_node(u32 node)
|
||||
@ -487,7 +462,6 @@ static void start_node(u8 node)
|
||||
printk(BIOS_DEBUG, " done.\n");
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* static void setup_remote_node(u32 node)
|
||||
*
|
||||
@ -546,7 +520,8 @@ static void AMD_Errata281(u8 node, u32 revision, u32 platform)
|
||||
/* For each node we need to check for a "broken" node */
|
||||
if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
|
||||
for (i = 0; i < nodes; i++) {
|
||||
if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1)) {
|
||||
if (mctGetLogicalCPUID(i) &
|
||||
(AMD_DR_B0 | AMD_DR_B1)) {
|
||||
mixed = 1;
|
||||
break;
|
||||
}
|
||||
@ -590,7 +565,6 @@ static void AMD_Errata281(u8 node, u32 revision, u32 platform)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void AMD_Errata298(void)
|
||||
{
|
||||
/* Workaround for L2 Eviction May Occur during operation to
|
||||
@ -636,8 +610,7 @@ static void AMD_Errata298(void)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
u32 get_platform_type(void)
|
||||
static u32 get_platform_type(void)
|
||||
{
|
||||
u32 ret = 0;
|
||||
|
||||
@ -663,8 +636,7 @@ u32 get_platform_type(void)
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void AMD_SetupPSIVID_d (u32 platform_type, u8 node)
|
||||
static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
|
||||
{
|
||||
u32 dword;
|
||||
int i;
|
||||
@ -692,7 +664,6 @@ void AMD_SetupPSIVID_d (u32 platform_type, u8 node)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
|
||||
* HT Phy operations are not valid on links that aren't present, so this
|
||||
@ -700,7 +671,7 @@ void AMD_SetupPSIVID_d (u32 platform_type, u8 node)
|
||||
*
|
||||
* Returns the offset of the link register.
|
||||
*/
|
||||
BOOL AMD_CpuFindCapability (u8 node, u8 cap_count, u8 *offset)
|
||||
static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
|
||||
{
|
||||
u32 reg;
|
||||
u32 val;
|
||||
@ -734,14 +705,13 @@ BOOL AMD_CpuFindCapability (u8 node, u8 cap_count, u8 *offset)
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* AMD_checkLinkType - Compare desired link characteristics using a logical
|
||||
* link type mask.
|
||||
*
|
||||
* Returns the link characteristic mask.
|
||||
*/
|
||||
u32 AMD_checkLinkType (u8 node, u8 link, u8 regoff)
|
||||
static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
|
||||
{
|
||||
u32 val;
|
||||
u32 linktype = 0;
|
||||
@ -765,7 +735,6 @@ u32 AMD_checkLinkType (u8 node, u8 link, u8 regoff)
|
||||
else
|
||||
linktype |= HTPHY_LINKTYPE_HT1;
|
||||
|
||||
|
||||
/* Check ganged */
|
||||
val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
|
||||
|
||||
@ -777,12 +746,11 @@ u32 AMD_checkLinkType (u8 node, u8 link, u8 regoff)
|
||||
return linktype;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
|
||||
* a phy setting for that link.
|
||||
*/
|
||||
void AMD_SetHtPhyRegister (u8 node, u8 link, u8 entry)
|
||||
static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
|
||||
{
|
||||
u32 phyReg;
|
||||
u32 phyBase;
|
||||
@ -794,7 +762,6 @@ void AMD_SetHtPhyRegister (u8 node, u8 link, u8 entry)
|
||||
|
||||
phyBase = ((u32) link << 3) | 0x180;
|
||||
|
||||
|
||||
/* Get the portal control register's initial value
|
||||
* and update it to access the desired phy register
|
||||
*/
|
||||
@ -834,7 +801,6 @@ void AMD_SetHtPhyRegister (u8 node, u8 link, u8 entry)
|
||||
} while (!(val & HTPHY_IS_COMPLETE_MASK));
|
||||
}
|
||||
|
||||
|
||||
void cpuSetAMDMSR(void)
|
||||
{
|
||||
/* This routine loads the CPU with default settings in fam10_msr_default
|
||||
@ -868,8 +834,7 @@ void cpuSetAMDMSR(void)
|
||||
printk(BIOS_DEBUG, " done\n");
|
||||
}
|
||||
|
||||
|
||||
void cpuSetAMDPCI(u8 node)
|
||||
static void cpuSetAMDPCI(u8 node)
|
||||
{
|
||||
/* This routine loads the CPU with default settings in fam10_pci_default
|
||||
* table . It must be run after Cache-As-RAM has been enabled, and
|
||||
@ -883,7 +848,6 @@ void cpuSetAMDPCI(u8 node)
|
||||
|
||||
printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
|
||||
|
||||
|
||||
revision = mctGetLogicalCPUID(node);
|
||||
platform = get_platform_type();
|
||||
|
||||
@ -893,12 +857,14 @@ void cpuSetAMDPCI(u8 node)
|
||||
if ((fam10_pci_default[i].revision & revision) &&
|
||||
(fam10_pci_default[i].platform & platform)) {
|
||||
val = pci_read_config32(NODE_PCI(node,
|
||||
fam10_pci_default[i].function),
|
||||
fam10_pci_default[i].
|
||||
function),
|
||||
fam10_pci_default[i].offset);
|
||||
val &= ~fam10_pci_default[i].mask;
|
||||
val |= fam10_pci_default[i].data;
|
||||
pci_write_config32(NODE_PCI(node,
|
||||
fam10_pci_default[i].function),
|
||||
fam10_pci_default[i].
|
||||
function),
|
||||
fam10_pci_default[i].offset, val);
|
||||
}
|
||||
}
|
||||
@ -915,7 +881,8 @@ void cpuSetAMDPCI(u8 node)
|
||||
if (AMD_CpuFindCapability(node, j, &offset)) {
|
||||
if (AMD_checkLinkType(node, j, offset)
|
||||
& fam10_htphy_default[i].linktype) {
|
||||
AMD_SetHtPhyRegister(node, j, i);
|
||||
AMD_SetHtPhyRegister(node, j,
|
||||
i);
|
||||
}
|
||||
} else {
|
||||
/* No more capabilities,
|
||||
@ -938,8 +905,7 @@ void cpuSetAMDPCI(u8 node)
|
||||
printk(BIOS_DEBUG, " done\n");
|
||||
}
|
||||
|
||||
|
||||
void cpuInitializeMCA(void)
|
||||
static void cpuInitializeMCA(void)
|
||||
{
|
||||
/* Clears Machine Check Architecture (MCA) registers, which power on
|
||||
* containing unknown data, on currently running processor.
|
||||
@ -967,14 +933,13 @@ void cpuInitializeMCA(void)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* finalize_node_setup()
|
||||
*
|
||||
* Do any additional post HT init
|
||||
*
|
||||
*/
|
||||
void finalize_node_setup(struct sys_info *sysinfo)
|
||||
static void finalize_node_setup(struct sys_info *sysinfo)
|
||||
{
|
||||
u8 i;
|
||||
u8 nodes = get_nodes();
|
||||
@ -989,7 +954,6 @@ void finalize_node_setup(struct sys_info *sysinfo)
|
||||
sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
|
||||
#endif
|
||||
|
||||
|
||||
for (i = 0; i < nodes; i++) {
|
||||
cpuSetAMDPCI(i);
|
||||
}
|
||||
@ -1007,4 +971,3 @@ void finalize_node_setup(struct sys_info *sysinfo)
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -38,13 +38,9 @@
|
||||
#include <cpu/amd/model_10xxx_msr.h>
|
||||
|
||||
extern device_t get_node_pci(u32 nodeid, u32 fn);
|
||||
extern int init_processor_name(void);
|
||||
|
||||
|
||||
|
||||
#define MCI_STATUS 0x401
|
||||
|
||||
|
||||
msr_t rdmsr_amd(u32 index)
|
||||
{
|
||||
msr_t result;
|
||||
@ -67,7 +63,7 @@ void wrmsr_amd(u32 index, msr_t msr)
|
||||
}
|
||||
|
||||
|
||||
void model_10xxx_init(device_t dev)
|
||||
static void model_10xxx_init(device_t dev)
|
||||
{
|
||||
u8 i;
|
||||
msr_t msr;
|
||||
|
@ -14,48 +14,30 @@
|
||||
#define SET_FIDVID_CORE0_ONLY 1
|
||||
#endif
|
||||
|
||||
static inline void print_initcpu8 (const char *strval, unsigned val)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s%02x\n", strval, val);
|
||||
}
|
||||
|
||||
static inline void print_initcpu8_nocr (const char *strval, unsigned val)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s%02x", strval, val);
|
||||
}
|
||||
|
||||
|
||||
static inline void print_initcpu16 (const char *strval, unsigned val)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s%04x\n", strval, val);
|
||||
}
|
||||
|
||||
static inline void print_initcpu(const char *strval, unsigned val)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s%08x\n", strval, val);
|
||||
}
|
||||
|
||||
typedef void (*process_ap_t)(unsigned apicid, void *gp);
|
||||
typedef void (*process_ap_t) (u32 apicid, void *gp);
|
||||
|
||||
//core_range = 0 : all cores
|
||||
//core range = 1 : core 0 only
|
||||
//core range = 2 : cores other than core0
|
||||
static void for_each_ap(unsigned bsp_apicid, unsigned core_range, process_ap_t process_ap, void *gp)
|
||||
|
||||
static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
|
||||
void *gp)
|
||||
{
|
||||
// here assume the OS don't change our apicid
|
||||
unsigned ap_apicid;
|
||||
u32 ap_apicid;
|
||||
|
||||
unsigned nodes;
|
||||
unsigned siblings = 0;
|
||||
unsigned disable_siblings;
|
||||
unsigned e0_later_single_core;
|
||||
unsigned nb_cfg_54;
|
||||
u32 nodes;
|
||||
u32 siblings = 0;
|
||||
u32 disable_siblings;
|
||||
u32 e0_later_single_core;
|
||||
u32 nb_cfg_54;
|
||||
int i, j;
|
||||
|
||||
/* get_nodes define in in_coherent_ht.c */
|
||||
nodes = get_nodes();
|
||||
|
||||
disable_siblings = !CONFIG_LOGICAL_CPUS;
|
||||
|
||||
#if CONFIG_LOGICAL_CPUS == 1 && CONFIG_HAVE_OPTION_TABLE == 1
|
||||
if (read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 mean multi core
|
||||
disable_siblings = 1;
|
||||
@ -65,10 +47,10 @@ static void for_each_ap(unsigned bsp_apicid, unsigned core_range, process_ap_t p
|
||||
/* here I assume that all node are same stepping, otherwise we can use use nb_cfg_54 from bsp for all nodes */
|
||||
nb_cfg_54 = read_nb_cfg_54();
|
||||
|
||||
|
||||
for (i = 0; i < nodes; i++) {
|
||||
e0_later_single_core = 0;
|
||||
j = ((pci_read_config32(PCI_DEV(0, 0x18+i, 3), 0xe8) >> 12) & 3);
|
||||
j = ((pci_read_config32(PCI_DEV(0, 0x18 + i, 3), 0xe8) >> 12) &
|
||||
3);
|
||||
if (nb_cfg_54) {
|
||||
if (j == 0) { // if it is single core, we need to increase siblings for apic calculation
|
||||
#if CONFIG_K8_REV_F_SUPPORT == 0
|
||||
@ -83,25 +65,25 @@ static void for_each_ap(unsigned bsp_apicid, unsigned core_range, process_ap_t p
|
||||
}
|
||||
siblings = j;
|
||||
|
||||
unsigned jstart, jend;
|
||||
u32 jstart, jend;
|
||||
|
||||
if (core_range == 2) {
|
||||
jstart = 1;
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
jstart = 0;
|
||||
}
|
||||
|
||||
if(e0_later_single_core || disable_siblings || (core_range==1)) {
|
||||
if (e0_later_single_core || disable_siblings
|
||||
|| (core_range == 1)) {
|
||||
jend = 0;
|
||||
} else {
|
||||
jend = siblings;
|
||||
}
|
||||
|
||||
|
||||
for (j = jstart; j <= jend; j++) {
|
||||
|
||||
ap_apicid = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8);
|
||||
ap_apicid =
|
||||
i * (nb_cfg_54 ? (siblings + 1) : 1) +
|
||||
j * (nb_cfg_54 ? 1 : 8);
|
||||
|
||||
#if (CONFIG_ENABLE_APIC_EXT_ID == 1)
|
||||
#if CONFIG_LIFT_BSP_APIC_ID == 0
|
||||
@ -110,7 +92,8 @@ static void for_each_ap(unsigned bsp_apicid, unsigned core_range, process_ap_t p
|
||||
ap_apicid += CONFIG_APIC_ID_OFFSET;
|
||||
#endif
|
||||
|
||||
if(ap_apicid == bsp_apicid) continue;
|
||||
if (ap_apicid == bsp_apicid)
|
||||
continue;
|
||||
|
||||
process_ap(ap_apicid, gp);
|
||||
|
||||
@ -118,17 +101,17 @@ static void for_each_ap(unsigned bsp_apicid, unsigned core_range, process_ap_t p
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static inline int lapic_remote_read(int apicid, int reg, unsigned *pvalue)
|
||||
static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
|
||||
{
|
||||
int timeout;
|
||||
unsigned status;
|
||||
u32 status;
|
||||
int result;
|
||||
lapic_wait_icr_idle();
|
||||
lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
|
||||
lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
|
||||
timeout = 0;
|
||||
|
||||
/* Extra busy check compared to lapic.h */
|
||||
timeout = 0;
|
||||
do {
|
||||
status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
|
||||
} while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
|
||||
@ -139,6 +122,7 @@ static inline int lapic_remote_read(int apicid, int reg, unsigned *pvalue)
|
||||
} while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
|
||||
|
||||
result = -1;
|
||||
|
||||
if (status == LAPIC_ICR_RR_VALID) {
|
||||
*pvalue = lapic_read(LAPIC_RRR);
|
||||
result = 0;
|
||||
@ -148,24 +132,27 @@ static inline int lapic_remote_read(int apicid, int reg, unsigned *pvalue)
|
||||
|
||||
#define LAPIC_MSG_REG 0x380
|
||||
|
||||
|
||||
#if SET_FIDVID == 1
|
||||
static void init_fidvid_ap(unsigned bsp_apicid, unsigned apicid);
|
||||
static void init_fidvid_ap(u32 bsp_apicid, u32 apicid);
|
||||
#endif
|
||||
|
||||
static inline __attribute__((always_inline)) void print_apicid_nodeid_coreid(unsigned apicid, struct node_core_id id, const char *str)
|
||||
static inline __attribute__ ((always_inline))
|
||||
void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
|
||||
const char *str)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str, apicid, id.nodeid, id.coreid);
|
||||
printk(BIOS_DEBUG,
|
||||
"%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
|
||||
apicid, id.nodeid, id.coreid);
|
||||
}
|
||||
|
||||
|
||||
static unsigned wait_cpu_state(unsigned apicid, unsigned state)
|
||||
static u32 wait_cpu_state(u32 apicid, u32 state)
|
||||
{
|
||||
unsigned readback = 0;
|
||||
unsigned timeout = 1;
|
||||
u32 readback = 0;
|
||||
u32 timeout = 1;
|
||||
int loop = 2000000;
|
||||
while (--loop > 0) {
|
||||
if(lapic_remote_read(apicid, LAPIC_MSG_REG, &readback)!=0) continue;
|
||||
if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
|
||||
continue;
|
||||
if ((readback & 0xff) == state) {
|
||||
timeout = 0;
|
||||
break; //target cpu is in stage started
|
||||
@ -179,68 +166,69 @@ static unsigned wait_cpu_state(unsigned apicid, unsigned state)
|
||||
|
||||
return timeout;
|
||||
}
|
||||
static void wait_ap_started(unsigned ap_apicid, void *gp )
|
||||
|
||||
static void wait_ap_started(u32 ap_apicid, void *gp)
|
||||
{
|
||||
unsigned timeout;
|
||||
u32 timeout;
|
||||
timeout = wait_cpu_state(ap_apicid, 0x33); // started
|
||||
printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
|
||||
if (timeout) {
|
||||
print_initcpu8_nocr("*", ap_apicid);
|
||||
print_initcpu("*", timeout);
|
||||
}
|
||||
else {
|
||||
print_initcpu8_nocr(" ", ap_apicid);
|
||||
printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
|
||||
} else {
|
||||
printk(BIOS_DEBUG, "started\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void wait_all_aps_started(unsigned bsp_apicid)
|
||||
static void wait_all_aps_started(u32 bsp_apicid)
|
||||
{
|
||||
for_each_ap(bsp_apicid, 0, wait_ap_started, (void *)0);
|
||||
}
|
||||
|
||||
static void wait_all_other_cores_started(unsigned bsp_apicid) // all aps other than core0
|
||||
static void wait_all_other_cores_started(u32 bsp_apicid)
|
||||
{
|
||||
print_debug("started ap apicid: ");
|
||||
// all aps other than core0
|
||||
printk(BIOS_DEBUG, "started ap apicid: ");
|
||||
for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
|
||||
print_debug("\n");
|
||||
printk(BIOS_DEBUG, "\n");
|
||||
}
|
||||
|
||||
static void allow_all_aps_stop(unsigned bsp_apicid)
|
||||
static void allow_all_aps_stop(u32 bsp_apicid)
|
||||
{
|
||||
lapic_write(LAPIC_MSG_REG, (bsp_apicid<<24) | 0x44); // allow aps to stop
|
||||
// allow aps to stop
|
||||
|
||||
lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | 0x44);
|
||||
}
|
||||
|
||||
static void STOP_CAR_AND_CPU(void)
|
||||
{
|
||||
disable_cache_as_ram(); // inline
|
||||
stop_this_cpu(); // inline, it will stop all cores except node0/core0 the bsp ....
|
||||
/* stop all cores except node0/core0 the bsp .... */
|
||||
stop_this_cpu();
|
||||
}
|
||||
|
||||
#if CONFIG_MEM_TRAIN_SEQ == 1
|
||||
static inline void train_ram_on_node(unsigned nodeid, unsigned coreid, struct sys_info *sysinfo, unsigned retcall);
|
||||
#endif
|
||||
|
||||
#if RAMINIT_SYSINFO == 1
|
||||
static unsigned init_cpus(unsigned cpu_init_detectedx ,struct sys_info *sysinfo)
|
||||
static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
|
||||
#else
|
||||
static unsigned init_cpus(unsigned cpu_init_detectedx)
|
||||
static u32 init_cpus(u32 cpu_init_detectedx)
|
||||
#endif
|
||||
{
|
||||
unsigned bsp_apicid = 0;
|
||||
unsigned apicid;
|
||||
u32 bsp_apicid = 0;
|
||||
u32 apicid;
|
||||
struct node_core_id id;
|
||||
|
||||
/*
|
||||
* already set early mtrr in cache_as_ram.inc
|
||||
*/
|
||||
|
||||
/* that is from initial apicid, we need nodeid and coreid later */
|
||||
/* that is from initial apicid, we need nodeid and coreid
|
||||
later */
|
||||
id = get_node_core_id_x();
|
||||
|
||||
|
||||
/* NB_CFG MSR is shared between cores, so we need make sure core0 is done at first --- use wait_all_core0_started */
|
||||
/* NB_CFG MSR is shared between cores, so we need make sure
|
||||
core0 is done at first --- use wait_all_core0_started */
|
||||
if (id.coreid == 0) {
|
||||
set_apicid_cpuid_lo(); /* only set it on core0 */
|
||||
#if CONFIG_ENABLE_APIC_EXT_ID == 1
|
||||
#if (CONFIG_ENABLE_APIC_EXT_ID == 1)
|
||||
enable_apic_ext_id(id.nodeid);
|
||||
#endif
|
||||
}
|
||||
@ -249,19 +237,20 @@ static unsigned init_cpus(unsigned cpu_init_detectedx)
|
||||
// init_timer(); // We need TMICT to pass msg for FID/VID change
|
||||
|
||||
#if (CONFIG_ENABLE_APIC_EXT_ID == 1)
|
||||
unsigned initial_apicid = get_initial_apicid();
|
||||
u32 initial_apicid = get_initial_apicid();
|
||||
|
||||
#if CONFIG_LIFT_BSP_APIC_ID == 0
|
||||
if (initial_apicid != 0) // other than bsp
|
||||
#endif
|
||||
{
|
||||
/* use initial apic id to lift it */
|
||||
uint32_t dword = lapic_read(LAPIC_ID);
|
||||
u32 dword = lapic_read(LAPIC_ID);
|
||||
dword &= ~(0xff << 24);
|
||||
dword |= (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff)<<24);
|
||||
dword |=
|
||||
(((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
|
||||
|
||||
lapic_write(LAPIC_ID, dword);
|
||||
}
|
||||
|
||||
#if CONFIG_LIFT_BSP_APIC_ID == 1
|
||||
bsp_apicid += CONFIG_APIC_ID_OFFSET;
|
||||
#endif
|
||||
@ -276,18 +265,15 @@ static unsigned init_cpus(unsigned cpu_init_detectedx)
|
||||
if (id.coreid == 0) {
|
||||
if (id.nodeid != 0) //all core0 except bsp
|
||||
print_apicid_nodeid_coreid(apicid, id, " core0: ");
|
||||
} else { //all other cores
|
||||
print_apicid_nodeid_coreid(apicid, id, " corex: ");
|
||||
}
|
||||
#if 0
|
||||
else { //all core1
|
||||
print_apicid_nodeid_coreid(apicid, id, " core1: ");
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
if (cpu_init_detectedx) {
|
||||
print_apicid_nodeid_coreid(apicid, id, "\n\n\nINIT detected from ");
|
||||
print_debug("\nIssuing SOFT_RESET...\n");
|
||||
print_apicid_nodeid_coreid(apicid, id,
|
||||
"\n\n\nINIT detected from ");
|
||||
printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
|
||||
soft_reset();
|
||||
}
|
||||
|
||||
@ -295,13 +281,13 @@ static unsigned init_cpus(unsigned cpu_init_detectedx)
|
||||
distinguish_cpu_resets(id.nodeid);
|
||||
// start_other_core(id.nodeid); // start second core in first cpu, only allowed for nb_cfg_54 is not set
|
||||
}
|
||||
|
||||
//here don't need to wait
|
||||
lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x33); // mark the cpu is started
|
||||
|
||||
if (apicid != bsp_apicid) {
|
||||
unsigned timeout=1;
|
||||
unsigned loop = 100;
|
||||
u32 timeout = 1;
|
||||
u32 loop = 100;
|
||||
|
||||
#if SET_FIDVID == 1
|
||||
#if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
|
||||
if (id.coreid == 0) // only need set fid for core0
|
||||
@ -314,7 +300,9 @@ static unsigned init_cpus(unsigned cpu_init_detectedx)
|
||||
timeout = wait_cpu_state(bsp_apicid, 0x44);
|
||||
}
|
||||
if (timeout) {
|
||||
print_initcpu8("while waiting for BSP signal to STOP, timeout in ap ", apicid);
|
||||
printk(BIOS_DEBUG,
|
||||
"while waiting for BSP signal to STOP, timeout in ap %02x\n",
|
||||
apicid);
|
||||
}
|
||||
lapic_write(LAPIC_MSG_REG, (apicid << 24) | 0x44); // bsp can not check it before stop_this_cpu
|
||||
set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
|
||||
@ -329,10 +317,9 @@ static unsigned init_cpus(unsigned cpu_init_detectedx)
|
||||
return bsp_apicid;
|
||||
}
|
||||
|
||||
|
||||
static unsigned is_core0_started(unsigned nodeid)
|
||||
static u32 is_core0_started(u32 nodeid)
|
||||
{
|
||||
uint32_t htic;
|
||||
u32 htic;
|
||||
device_t device;
|
||||
device = PCI_DEV(0, 0x18 + nodeid, 0);
|
||||
htic = pci_read_config32(device, HT_INIT_CONTROL);
|
||||
@ -342,16 +329,16 @@ static unsigned is_core0_started(unsigned nodeid)
|
||||
|
||||
static void wait_all_core0_started(void)
|
||||
{
|
||||
//When core0 is started, it will distingush_cpu_resets. So wait for that
|
||||
unsigned i;
|
||||
unsigned nodes = get_nodes();
|
||||
/* When core0 is started, it will distingush_cpu_resets
|
||||
* So wait for that to finish */
|
||||
u32 i;
|
||||
u32 nodes = get_nodes();
|
||||
|
||||
print_debug("core0 started: ");
|
||||
printk(BIOS_DEBUG, "core0 started: ");
|
||||
for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
|
||||
while(!is_core0_started(i)) {}
|
||||
print_initcpu8_nocr(" ", i);
|
||||
while (!is_core0_started(i)) {
|
||||
}
|
||||
print_debug("\n");
|
||||
|
||||
printk(BIOS_DEBUG, " %02x", i);
|
||||
}
|
||||
printk(BIOS_DEBUG, "\n");
|
||||
}
|
||||
|
||||
|
@ -33,4 +33,7 @@
|
||||
#define LOGICAL_CPUS_NUM_MSR 0xC001100d
|
||||
#define CPU_ID_EXT_FEATURES_MSR 0xC0011005
|
||||
|
||||
msr_t rdmsr_amd(u32 index);
|
||||
void wrmsr_amd(u32 index, msr_t msr);
|
||||
|
||||
#endif /* CPU_AMD_MODEL_10XXX_MSR_H */
|
||||
|
@ -98,7 +98,7 @@ static inline int is_cpu_pre_f2(void)
|
||||
|
||||
#ifdef __PRE_RAM__
|
||||
//AMD_F0_SUPPORT
|
||||
static int is_cpu_f0_in_bsp(int nodeid)
|
||||
static inline int is_cpu_f0_in_bsp(int nodeid)
|
||||
{
|
||||
uint32_t dword;
|
||||
device_t dev;
|
||||
@ -106,7 +106,7 @@ static int is_cpu_f0_in_bsp(int nodeid)
|
||||
dword = pci_read_config32(dev, 0xfc);
|
||||
return (dword & 0xfff00) == 0x40f00;
|
||||
}
|
||||
static int is_cpu_pre_f2_in_bsp(int nodeid)
|
||||
static inline int is_cpu_pre_f2_in_bsp(int nodeid)
|
||||
{
|
||||
uint32_t dword;
|
||||
device_t dev;
|
||||
|
@ -153,7 +153,9 @@ static u32 amdfam10_scan_chain(device_t dev, u32 nodeid, u32 link, u32 sblink,
|
||||
device_t devx;
|
||||
u32 busses;
|
||||
u32 segn = max>>8;
|
||||
#if CONFIG_SB_HT_CHAIN_ON_BUS0 > 1
|
||||
u32 busn = max&0xff;
|
||||
#endif
|
||||
u32 max_devfn;
|
||||
|
||||
#if CONFIG_HT3_SUPPORT==1
|
||||
@ -332,7 +334,7 @@ static int reg_useable(u32 reg,device_t goal_dev, u32 goal_nodeid,
|
||||
u32 goal_link)
|
||||
{
|
||||
struct resource *res;
|
||||
u32 nodeid, link;
|
||||
u32 nodeid, link = 0;
|
||||
int result;
|
||||
res = 0;
|
||||
for(nodeid = 0; !res && (nodeid < NODE_NUMS); nodeid++) {
|
||||
@ -646,9 +648,7 @@ struct chip_operations northbridge_amd_amdfam10_ops = {
|
||||
|
||||
static void amdfam10_domain_read_resources(device_t dev)
|
||||
{
|
||||
struct resource *resource;
|
||||
unsigned reg;
|
||||
unsigned link;
|
||||
|
||||
/* Find the already assigned resource pairs */
|
||||
get_fx_devs();
|
||||
@ -658,19 +658,19 @@ static void amdfam10_domain_read_resources(device_t dev)
|
||||
limit = f1_read_config32(reg + 0x04);
|
||||
/* Is this register allocated? */
|
||||
if ((base & 3) != 0) {
|
||||
unsigned nodeid, link;
|
||||
unsigned nodeid, reg_link;
|
||||
device_t reg_dev;
|
||||
if(reg<0xc0) { // mmio
|
||||
nodeid = (limit & 0xf) + (base&0x30);
|
||||
} else { // io
|
||||
nodeid = (limit & 0xf) + ((base>>4)&0x30);
|
||||
}
|
||||
link = (limit >> 4) & 7;
|
||||
reg_link = (limit >> 4) & 7;
|
||||
reg_dev = __f0_dev[nodeid];
|
||||
if (reg_dev) {
|
||||
/* Reserve the resource */
|
||||
struct resource *reg_resource;
|
||||
reg_resource = new_resource(reg_dev, IOINDEX(0x1000 + reg, link));
|
||||
reg_resource = new_resource(reg_dev, IOINDEX(0x1000 + reg, reg_link));
|
||||
if (reg_resource) {
|
||||
reg_resource->flags = 1;
|
||||
}
|
||||
@ -683,6 +683,8 @@ static void amdfam10_domain_read_resources(device_t dev)
|
||||
#if CONFIG_PCI_64BIT_PREF_MEM == 0
|
||||
pci_domain_read_resources(dev);
|
||||
#else
|
||||
unsigned link;
|
||||
struct resource *resource;
|
||||
for(link=0; link<dev->links; link++) {
|
||||
/* Initialize the system wide io space constraints */
|
||||
resource = new_resource(dev, 0|(link<<2));
|
||||
@ -1215,7 +1217,9 @@ static u32 cpu_bus_scan(device_t dev, u32 max)
|
||||
{
|
||||
struct bus *cpu_bus;
|
||||
device_t dev_mc;
|
||||
#if CONFIG_CBB
|
||||
device_t pci_domain;
|
||||
#endif
|
||||
int i,j;
|
||||
int nodes;
|
||||
unsigned nb_cfg_54;
|
||||
@ -1309,7 +1313,7 @@ static u32 cpu_bus_scan(device_t dev, u32 max)
|
||||
/* Find which cpus are present */
|
||||
cpu_bus = &dev->link[0];
|
||||
for(i = 0; i < nodes; i++) {
|
||||
device_t dev, cpu;
|
||||
device_t cdb_dev, cpu;
|
||||
struct device_path cpu_path;
|
||||
unsigned busn, devn;
|
||||
struct bus *pbus;
|
||||
@ -1326,47 +1330,47 @@ static u32 cpu_bus_scan(device_t dev, u32 max)
|
||||
#endif
|
||||
|
||||
/* Find the cpu's pci device */
|
||||
dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
|
||||
if (!dev) {
|
||||
cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 0));
|
||||
if (!cdb_dev) {
|
||||
/* If I am probing things in a weird order
|
||||
* ensure all of the cpu's pci devices are found.
|
||||
*/
|
||||
int j;
|
||||
for(j = 0; j <= 5; j++) { //FBDIMM?
|
||||
dev = pci_probe_dev(NULL, pbus,
|
||||
PCI_DEVFN(devn, j));
|
||||
int fn;
|
||||
for(fn = 0; fn <= 5; fn++) { //FBDIMM?
|
||||
cdb_dev = pci_probe_dev(NULL, pbus,
|
||||
PCI_DEVFN(devn, fn));
|
||||
}
|
||||
dev = dev_find_slot(busn, PCI_DEVFN(devn,0));
|
||||
cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn,0));
|
||||
}
|
||||
if(dev) {
|
||||
if(cdb_dev) {
|
||||
/* Ok, We need to set the links for that device.
|
||||
* otherwise the device under it will not be scanned
|
||||
*/
|
||||
int j;
|
||||
int link;
|
||||
int linknum;
|
||||
#if CONFIG_HT3_SUPPORT==1
|
||||
linknum = 8;
|
||||
#else
|
||||
linknum = 4;
|
||||
#endif
|
||||
if(dev->links < linknum) {
|
||||
for(j=dev->links; j<linknum; j++) {
|
||||
dev->link[j].link = j;
|
||||
dev->link[j].dev = dev;
|
||||
if(cdb_dev->links < linknum) {
|
||||
for(link=cdb_dev->links; link<linknum; link++) {
|
||||
cdb_dev->link[link].link = link;
|
||||
cdb_dev->link[link].dev = cdb_dev;
|
||||
}
|
||||
dev->links = linknum;
|
||||
printk(BIOS_DEBUG, "%s links increase to %d\n", dev_path(dev), dev->links);
|
||||
cdb_dev->links = linknum;
|
||||
printk(BIOS_DEBUG, "%s links increase to %d\n", dev_path(cdb_dev), cdb_dev->links);
|
||||
}
|
||||
}
|
||||
|
||||
cores_found = 0; // one core
|
||||
dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
|
||||
if (dev && dev->enabled) {
|
||||
j = pci_read_config32(dev, 0xe8);
|
||||
cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
|
||||
if (cdb_dev && cdb_dev->enabled) {
|
||||
j = pci_read_config32(cdb_dev, 0xe8);
|
||||
cores_found = (j >> 12) & 3; // dev is func 3
|
||||
if (siblings > 3)
|
||||
cores_found |= (j >> 13) & 4;
|
||||
printk(BIOS_DEBUG, " %s siblings=%d\n", dev_path(dev), cores_found);
|
||||
printk(BIOS_DEBUG, " %s siblings=%d\n", dev_path(cdb_dev), cores_found);
|
||||
}
|
||||
|
||||
u32 jj;
|
||||
@ -1387,7 +1391,7 @@ static u32 cpu_bus_scan(device_t dev, u32 max)
|
||||
cpu = find_dev_path(cpu_bus, &cpu_path);
|
||||
|
||||
/* Enable the cpu if I have the processor */
|
||||
if (dev && dev->enabled) {
|
||||
if (cdb_dev && cdb_dev->enabled) {
|
||||
if (!cpu) {
|
||||
cpu = alloc_dev(cpu_bus, &cpu_path);
|
||||
}
|
||||
@ -1397,7 +1401,7 @@ static u32 cpu_bus_scan(device_t dev, u32 max)
|
||||
}
|
||||
|
||||
/* Disable the cpu if I don't have the processor */
|
||||
if (cpu && (!dev || !dev->enabled)) {
|
||||
if (cpu && (!cdb_dev || !cdb_dev->enabled)) {
|
||||
cpu->enabled = 0;
|
||||
}
|
||||
|
||||
|
@ -17,23 +17,17 @@
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
|
||||
static void print_raminit(const char *strval, u32 val)
|
||||
{
|
||||
printk(BIOS_DEBUG, "%s%08x\n", strval, val);
|
||||
}
|
||||
|
||||
static void print_tx(const char *strval, u32 val)
|
||||
{
|
||||
#if CONFIG_DEBUG_RAM_SETUP
|
||||
print_raminit(strval, val);
|
||||
printk(BIOS_DEBUG, "%s%08x\n", strval, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void print_t(const char *strval)
|
||||
{
|
||||
#if CONFIG_DEBUG_RAM_SETUP
|
||||
print_debug(strval);
|
||||
printk(BIOS_DEBUG, "%s", strval);
|
||||
#endif
|
||||
}
|
||||
#include "amdfam10.h"
|
||||
|
@ -726,13 +726,16 @@ int mctRead_SPD(u32 smaddr, u32 reg);
|
||||
void InterleaveNodes_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
|
||||
void InterleaveChannels_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
|
||||
void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
|
||||
static void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 ChipSel);
|
||||
void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 ChipSel);
|
||||
void phyAssistedMemFnceTraining(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
|
||||
u8 mct_SaveRcvEnDly_D_1Pass(struct DCTStatStruc *pDCTstat, u8 pass);
|
||||
static void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request);
|
||||
static u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct);
|
||||
static void mct_Wait(u32 cycles);
|
||||
u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct);
|
||||
u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly);
|
||||
void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request);
|
||||
u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct);
|
||||
void mct_Wait(u32 cycles);
|
||||
u8 mct_RcvrRankEnabled_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 Channel, u8 ChipSel);
|
||||
u32 mct_GetRcvrSysAddr_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 channel, u8 receiver, u8 *valid);
|
||||
void mct_Read1LTestPattern_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u32 addr);
|
||||
void EarlySampleSupport_D(void);
|
||||
#endif
|
||||
|
@ -582,7 +582,7 @@ skipLocMiddle:
|
||||
}
|
||||
|
||||
|
||||
static void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat,
|
||||
void StoreDQSDatStrucVal_D(struct MCTStatStruc *pMCTstat,
|
||||
struct DCTStatStruc *pDCTstat, u8 ChipSel)
|
||||
{
|
||||
/* Store the DQSDelay value, found during a training sweep, into the DCT
|
||||
|
@ -17,15 +17,10 @@
|
||||
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
|
||||
static u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct);
|
||||
static u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly);
|
||||
|
||||
void EarlySampleSupport_D(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
u32 procOdtWorkaround(struct DCTStatStruc *pDCTstat, u32 dct, u32 val)
|
||||
{
|
||||
u32 tmp;
|
||||
@ -251,7 +246,7 @@ void SyncSetting(struct DCTStatStruc *pDCTstat)
|
||||
}
|
||||
|
||||
|
||||
static u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct)
|
||||
u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct)
|
||||
{
|
||||
u32 ret = 0;
|
||||
u32 lo, hi;
|
||||
@ -362,7 +357,7 @@ static u8 mct_checkFenceHoleAdjust_D(struct MCTStatStruc *pMCTstat,
|
||||
}
|
||||
|
||||
|
||||
static u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly)
|
||||
u8 mct_AdjustDQSPosDelay_D(struct DCTStatStruc *pDCTstat, u8 dly)
|
||||
{
|
||||
u8 skip = 0;
|
||||
|
||||
@ -393,8 +388,7 @@ static u8 mctDoAxRdPtrInit_D(struct DCTStatStruc *pDCTstat, u8 *Rdtr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request) {
|
||||
void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request) {
|
||||
|
||||
/* Erratum #202: disable DCache scrubber for Ax parts */
|
||||
|
||||
@ -403,4 +397,3 @@ static void mct_AdjustScrub_D(struct DCTStatStruc *pDCTstat, u16 *scrub_request)
|
||||
pDCTstat->ErrStatus |= 1 << SB_DCBKScrubDis;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -502,7 +502,7 @@ static void dqsTrainRcvrEn_SW(struct MCTStatStruc *pMCTstat,
|
||||
}
|
||||
|
||||
|
||||
static u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct)
|
||||
u8 mct_InitReceiver_D(struct DCTStatStruc *pDCTstat, u8 dct)
|
||||
{
|
||||
if (pDCTstat->DIMMValidDCT[dct] == 0 ) {
|
||||
return 8;
|
||||
@ -1080,7 +1080,7 @@ static void fenceDynTraining_D(struct MCTStatStruc *pMCTstat,
|
||||
}
|
||||
|
||||
|
||||
static void mct_Wait(u32 cycles)
|
||||
void mct_Wait(u32 cycles)
|
||||
{
|
||||
u32 saved;
|
||||
u32 hi, lo, msr;
|
||||
|
Loading…
x
Reference in New Issue
Block a user