cpu/amd: Add initial AMD Family 15h support

TEST: Booted ASUS KGPE-D16 with single Opteron 6380
 * Unbuffered DDR3 DIMMs tested and working
 * Suspend to RAM (S3) tested and working

Change-Id: Idffd2ce36ce183fbfa087e5ba69a9148f084b45e
Signed-off-by: Timothy Pearson <tpearson@raptorengineeringinc.com>
Reviewed-on: http://review.coreboot.org/11966
Tested-by: build bot (Jenkins)
Reviewed-by: Martin Roth <martinroth@google.com>
This commit is contained in:
Timothy Pearson
2015-10-16 13:51:51 -05:00
committed by Martin Roth
parent d150006c4a
commit 730a043fb6
73 changed files with 9203 additions and 2064 deletions

View File

@@ -28,19 +28,24 @@
#define CacheSizeAPStack CONFIG_DCACHE_AP_STACK_SIZE
#define MSR_MCFG_BASE 0xC0010058
#define MSR_FAM10 0xC001102A
#define MSR_BU_CFG2 0xC001102A
#define jmp_if_not_k8(x) comisd %xmm2, %xmm1; jae x
#define jmp_if_k8(x) comisd %xmm2, %xmm1; jb x
#define jmp_if_not_fam15h(x) comisd %xmm3, %xmm1; jb x
#define jmp_if_fam15h(x) comisd %xmm3, %xmm1; jae x
#define CPUID_MASK 0x0ff00f00
#define CPUID_VAL_FAM10_ROTATED 0x0f000010
#define CPUID_VAL_FAM15_ROTATED 0x0f000060
/*
* XMM map:
* xmm1: CPU family
* xmm2: Fam10h comparison value
* xmm3: Backup EBX
* xmm3: Fam15h comparison value
* xmm4: Backup EBX
* xmm5: Coreboot init detect
*/
/* Save the BIST result. */
@@ -60,7 +65,7 @@ cache_as_ram_setup:
movl %eax, %cr4
/* Figure out the CPU family. */
cvtsi2sd %ebx, %xmm3
cvtsi2sd %ebx, %xmm4
movl $0x01, %eax
cpuid
/* Base family is bits 8..11, extended family is bits 20..27. */
@@ -70,13 +75,16 @@ cache_as_ram_setup:
cvtsi2sd %eax, %xmm1
movl $CPUID_VAL_FAM10_ROTATED, %eax
cvtsi2sd %eax, %xmm2
cvtsd2si %xmm3, %ebx
movl $CPUID_VAL_FAM15_ROTATED, %eax
cvtsi2sd %eax, %xmm3
cvtsd2si %xmm4, %ebx
/* Check if cpu_init_detected. */
movl $MTRR_DEF_TYPE_MSR, %ecx
rdmsr
andl $MTRR_DEF_TYPE_EN, %eax
movl %eax, %ebx /* We store the status. */
cvtsi2sd %ebx, %xmm5
jmp_if_k8(CAR_FAM10_out_post_errata)
@@ -117,21 +125,24 @@ cache_as_ram_setup:
CAR_FAM10_out:
jmp_if_fam15h(CAR_FAM10_errata_applied)
/*
* Errata 193: Disable clean copybacks to L3 cache to allow cached ROM.
* Re-enable it in after RAM is initialized and before CAR is disabled.
*/
movl $MSR_FAM10, %ecx
movl $MSR_BU_CFG2, %ecx
rdmsr
bts $15, %eax
bts $15, %eax /* Set bit 15 in EDX:EAX (bit 15 in EAX). */
wrmsr
/* Erratum 343, RevGuide for Fam10h, Pub#41322 Rev. 3.33 */
movl $MSR_FAM10, %ecx
movl $MSR_BU_CFG2, %ecx
rdmsr
bts $35-32, %edx /* Set bit 35 in EDX:EAX (bit 3 in EDX). */
wrmsr
CAR_FAM10_errata_applied:
#if CONFIG_MMCONF_SUPPORT
#if (CONFIG_MMCONF_BASE_ADDRESS > 0xFFFFFFFF)
#error "MMCONF_BASE_ADDRESS too big"
@@ -166,6 +177,63 @@ CAR_FAM10_out:
CAR_FAM10_out_post_errata:
/* Fam15h APIC IDs do not depend on NB config bit 54 */
jmp_if_not_fam15h(skip_nb54_set)
movl $0xc001001f, %ecx /* NB_CFG_MSR */
rdmsr
bts $(54 - 32), %edx /* Set NB config bit 54 */
wrmsr
skip_nb54_set:
/* On Fam15h CPUs each compute unit's MTRRs are shared between two cores */
jmp_if_not_fam15h(skip_cu_check)
/* Get the initial APIC ID. */
movl $1, %eax
cpuid
movl %ebx, %eax
/* Restore init detect */
cvtsd2si %xmm5, %ebx
/* Determine if this is the second core to start in a compute unit; if so, wait for first core start, clear init detect and skip MTRR init */
bt $24, %eax
jnc skip_cu_check /* First core in the compute unit jumps to skip_cu_check */
/* Determine if this is the second core to start in a compute unit; if so, clear init detect and skip MTRR init */
/* Busywait until the first core sets up the MTRRs */
check_init_detect_1:
/* Check if cpu_init_detected. */
movl $MTRR_DEF_TYPE_MSR, %ecx
rdmsr
andl $MTRR_DEF_TYPE_EN, %eax
cmp $0x00000000, %eax
je check_init_detect_1 /* First core has not yet started */
check_init_detect_2:
movl $SYSCFG_MSR, %ecx
rdmsr
andl $(SYSCFG_MSR_MtrrFixDramEn | SYSCFG_MSR_MtrrVarDramEn), %eax
cmp $0x00000000, %eax
je check_init_detect_2 /* First core has not yet started */
/* First core has now started */
movl $0x00000000, %ebx /* Clear init detect flag */
cvtsi2sd %ebx, %xmm5
jmp fam10_mtrr_setup_complete
skip_cu_check:
jmp_if_not_fam15h(CAR_FAM15_errata_applied)
/* Erratum 714, RevGuide for Fam15h, Pub#48063 Rev. 3.24 */
movl $MSR_BU_CFG2, %ecx
rdmsr
bts $8, %eax /* Set bit 8 in EDX:EAX (bit 8 in EAX). */
wrmsr
CAR_FAM15_errata_applied:
/* Set MtrrFixDramModEn for clear fixed MTRR. */
enable_fixed_mtrr_dram_modify:
movl $SYSCFG_MSR, %ecx
@@ -334,8 +402,42 @@ wbcache_post_fam10_setup:
orl $(SYSCFG_MSR_MtrrVarDramEn | SYSCFG_MSR_MtrrFixDramEn), %eax
wrmsr
fam10_mtrr_setup_complete:
post_code(0xa1)
/* Disable conversion of INVD to WBINVD (INVDWBINVD = 0) */
mov $0xc0010015, %ecx
rdmsr
btr $4, %eax
wrmsr
jmp_if_not_fam15h(fam15_car_msr_setup_complete)
/* Disable streaming store (DisSS = 1) */
mov $0xc0011020, %ecx
rdmsr
bts $28, %eax
wrmsr
/* Disable speculative ITLB reloads (DisSpecTlbRld = 1) */
mov $0xc0011021, %ecx
rdmsr
bts $9, %eax
wrmsr
/* Disable speculative DTLB reloads (DisSpecTlbRld = 1) and set DisHwPf = 1 */
mov $0xc0011022, %ecx
rdmsr
bts $4, %eax
bts $13, %eax
wrmsr
/* Disable CR0 combining (CombineCr0Cd = 0) */
mov $0xc001102b, %ecx
rdmsr
btr $49-32, %edx
wrmsr
fam15_car_msr_setup_complete:
/* Enable cache. */
movl %cr0, %eax
andl $(~(CR0_CacheDisable | CR0_NoWriteThrough)), %eax
@@ -416,9 +518,6 @@ CAR_FAM10_ap:
* to reverse it.
*/
/* Store our init detected. */
movl %ebx, %esi
/* Get the coreid bits at first. */
movl $0x80000008, %eax
cpuid
@@ -437,6 +536,8 @@ CAR_FAM10_ap:
movl %edi, %ecx /* CoreID bits */
bt $(54 - 32), %edx
jc roll_cfg
/* Fam10h NB config bit 54 was not set */
rolb %cl, %bl
roll_cfg:
@@ -446,8 +547,8 @@ roll_cfg:
movl $(CacheBase + (CacheSize - (CacheSizeBSPStack + CacheSizeBSPSlush))), %esp
subl %eax, %esp
/* Retrive init detected. */
movl %esi, %ebx
/* Restore init detect */
cvtsd2si %xmm5, %ebx
post_code(0xa4)
@@ -460,6 +561,8 @@ CAR_FAM10_ap_out:
andl $~(3 << 9), %eax
movl %eax, %cr4
post_code(0xa6)
/* Restore the BIST result. */
movl %ebp, %eax
@@ -467,6 +570,9 @@ CAR_FAM10_ap_out:
movl %esp, %ebp
pushl %ebx /* Init detected. */
pushl %eax /* BIST */
post_code(0xa7)
call cache_as_ram_main
/* We will not go back. */

View File

@@ -15,15 +15,27 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* be warned, this file will be used other cores and core 0 / node 0
* WARNING: this file will be used by both any AP cores and core 0 / node 0
*/
#include <cpu/x86/cache.h>
static inline __attribute__((always_inline)) void disable_cache_as_ram(void)
static inline __attribute__((always_inline)) uint32_t amd_fam1x_cpu_family(void)
{
uint32_t family;
family = cpuid_eax(0x80000001);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
return family;
}
static inline __attribute__((always_inline)) void disable_cache_as_ram(uint8_t skip_sharedc_config)
{
msr_t msr;
uint32_t family;
if (!skip_sharedc_config) {
/* disable cache */
write_cr0(read_cr0() | CR0_CacheDisable);
@@ -40,7 +52,6 @@ static inline __attribute__((always_inline)) void disable_cache_as_ram(void)
wrmsr(MTRR_FIX_4K_D8000, msr);
#endif
/* disable fixed mtrr from now on, it will be enabled by ramstage again */
msr = rdmsr(SYSCFG_MSR);
msr.lo &= ~(SYSCFG_MSR_MtrrFixDramEn | SYSCFG_MSR_MtrrFixDramModEn);
wrmsr(SYSCFG_MSR, msr);
@@ -54,7 +65,45 @@ static inline __attribute__((always_inline)) void disable_cache_as_ram(void)
enable_cache();
}
/* INVDWBINVD = 1 */
msr = rdmsr(0xc0010015);
msr.lo |= (0x1 << 4);
wrmsr(0xc0010015, msr);
family = amd_fam1x_cpu_family();
#if IS_ENABLED(CPU_AMD_MODEL_10XXX)
if (family >= 0x6f) {
/* Family 15h or later */
/* DisSS = 0 */
msr = rdmsr(0xc0011020);
msr.lo &= ~(0x1 << 28);
wrmsr(0xc0011020, msr);
if (!skip_sharedc_config) {
/* DisSpecTlbRld = 0 */
msr = rdmsr(0xc0011021);
msr.lo &= ~(0x1 << 9);
wrmsr(0xc0011021, msr);
/* Erratum 714: SpecNbReqDis = 0 */
msr = rdmsr(BU_CFG2_MSR);
msr.lo &= ~(0x1 << 8);
wrmsr(BU_CFG2_MSR, msr);
}
/* DisSpecTlbRld = 0 */
/* DisHwPf = 0 */
msr = rdmsr(0xc0011022);
msr.lo &= ~(0x1 << 4);
msr.lo &= ~(0x1 << 13);
wrmsr(0xc0011022, msr);
}
#endif
}
static void disable_cache_as_ram_bsp(void)
{
disable_cache_as_ram();
disable_cache_as_ram(0);
}

View File

@@ -88,7 +88,7 @@ static void vErrata343(void)
unsigned int uiMask = 0xFFFFFFF7;
msr = rdmsr(BU_CFG2_MSR);
msr.hi &= uiMask; // set bit 35 to 0
msr.hi &= uiMask; // IcDisSpecTlbWr (bit 35) = 0
wrmsr(BU_CFG2_MSR, msr);
#endif
}
@@ -96,6 +96,7 @@ static void vErrata343(void)
void post_cache_as_ram(void)
{
void *resume_backup_memory = NULL;
uint32_t family = amd_fam1x_cpu_family();
struct romstage_handoff *handoff;
handoff = romstage_handoff_find_or_add();
@@ -112,7 +113,10 @@ void post_cache_as_ram(void)
prepare_romstage_ramstack(resume_backup_memory);
/* from here don't store more data in CAR */
if (family < 0x6f) {
/* Family 10h or earlier */
vErrata343();
}
size_t car_size = car_data_size();
void *migrated_car = (void *)(CONFIG_RAMTOP - car_size);

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2008 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,41 +22,65 @@
*/
static const struct {
u32 msr;
u32 revision;
uint64_t revision;
u32 platform;
u32 data_lo;
u32 data_hi;
u32 mask_lo;
u32 mask_hi;
} fam10_msr_default[] = {
{ TOP_MEM2, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ TOP_MEM2, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00000000, 0x00000000,
0xFFFFFFFF, 0xFFFFFFFF },
{ SYSCFG, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ SYSCFG, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
3 << 21, 0x00000000,
3 << 21, 0x00000000 }, /* [MtrrTom2En]=1,[TOM2EnWB] = 1*/
{ HWCR, AMD_FAM10_ALL, AMD_PTYPE_ALL,
1 << 4, 0x00000000,
1 << 4, 0x00000000 }, /* [INVD_WBINVD]=1 */
{ MC1_CTL_MASK, AMD_OR_B2, AMD_PTYPE_ALL,
1 << 18, 0x00000000,
1 << 18, 0x00000000 }, /* Erratum 586: [DEIBP]=1 */
{ MC4_CTL_MASK, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ MC1_CTL_MASK, AMD_OR_B2, AMD_PTYPE_ALL,
1 << 15, 0x00000000,
1 << 15, 0x00000000 }, /* Erratum 593: [BSRP]=1 */
{ MC1_CTL_MASK, AMD_OR_C0, AMD_PTYPE_ALL,
1 << 15, 0x00000000,
1 << 15, 0x00000000 }, /* Erratum 739: [BSRP]=1 */
{ 0xc0011000, AMD_FAM15_ALL, AMD_PTYPE_ALL,
1 << 16, 0x00000000,
1 << 16, 0x00000000 }, /* Erratum 608: [bit 16]=1 */
{ 0xc0011000, AMD_OR_C0, AMD_PTYPE_ALL,
1 << 15, 0x00000000,
1 << 15, 0x00000000 }, /* Erratum 727: [bit 15]=1 */
{ MC4_CTL_MASK, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0xF << 19, 0x00000000,
0xF << 19, 0x00000000 }, /* [RtryHt[0..3]]=1 */
{ MC4_CTL_MASK, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
1 << 10, 0x00000000,
1 << 10, 0x00000000 }, /* [GartTblWkEn]=1 */
{ DC_CFG, AMD_FAM10_ALL, AMD_PTYPE_SVR,
0x00000000, 0x00000004,
0x00000000, 0x0000000C }, /* [REQ_CTR] = 1 for Server */
0x00000000, 0x0000000C }, /* Family 10h: [REQ_CTR] = 1 for Server */
{ DC_CFG, AMD_DR_Bx, AMD_PTYPE_SVR,
0x00000000, 0x00000000,
0x00000000, 0x00000C00 }, /* Erratum 326 */
{ NB_CFG, AMD_FAM10_ALL, AMD_PTYPE_DC | AMD_PTYPE_MC,
{ NB_CFG, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_DC | AMD_PTYPE_MC,
0x00000000, 1 << 22,
0x00000000, 1 << 22 }, /* [ApicInitIDLo]=1 */
{ NB_CFG, AMD_FAM15_ALL, AMD_PTYPE_DC | AMD_PTYPE_MC,
1 << 23, 0x00000000,
1 << 23, 0x00000000 }, /* Erratum 663: [bit 23]=1 */
{ BU_CFG2, AMD_DR_Bx, AMD_PTYPE_ALL,
1 << 29, 0x00000000,
1 << 29, 0x00000000 }, /* For Bx Smash1GPages=1 */
@@ -68,6 +93,14 @@ static const struct {
0 << 1, 0x00000000,
1 << 1, 0x00000000 }, /* IDX_MATCH_ALL=0 */
{ IC_CFG, AMD_OR_C0, AMD_PTYPE_ALL,
0x00000000, 1 << (39-32),
0x00000000, 1 << (39-32)}, /* C0 or above [DisLoopPredictor]=1 */
{ IC_CFG, AMD_OR_C0, AMD_PTYPE_ALL,
0xf << 1, 0x00000000,
0xf << 1, 0x00000000}, /* C0 or above [DisIcWayFilter]=0xf */
{ BU_CFG, AMD_DR_LT_B3, AMD_PTYPE_ALL,
1 << 21, 0x00000000,
1 << 21, 0x00000000 }, /* Erratum #254 DR B1 BU_CFG[21]=1 */
@@ -76,19 +109,51 @@ static const struct {
1 << 23, 0x00000000,
1 << 23, 0x00000000 }, /* Erratum #309 BU_CFG[23]=1 */
{ BU_CFG, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0 << 10, 0x00000000,
1 << 10, 0x00000000 }, /* [DcacheAgressivePriority]=0 */
/* CPUID_EXT_FEATURES */
{ CPUIDFEATURES, AMD_FAM10_ALL, AMD_PTYPE_DC | AMD_PTYPE_MC,
{ CPUIDFEATURES, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_DC | AMD_PTYPE_MC,
1 << 28, 0x00000000,
1 << 28, 0x00000000 }, /* [HyperThreadFeatEn]=1 */
{ CPUIDFEATURES, AMD_FAM10_ALL, AMD_PTYPE_DC,
{ CPUIDFEATURES, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_DC,
0x00000000, 1 << (33-32),
0x00000000, 1 << (33-32) }, /* [ExtendedFeatEn]=1 */
{ DE_CFG, AMD_OR_B2, AMD_PTYPE_ALL,
1 << 10, 0x00000000,
1 << 10, 0x00000000 }, /* Bx [ResyncPredSingleDispDis]=1 */
{ BU_CFG2, AMD_DRBH_Cx, AMD_PTYPE_ALL,
0x00000000, 1 << (35-32),
0x00000000, 1 << (35-32) }, /* Erratum 343 (set to 0 after CAR, in post_cache_as_ram()/model_10xxx_init() ) */
{ BU_CFG3, AMD_OR_B2, AMD_PTYPE_ALL,
0x00000000, 1 << (42-32),
0x00000000, 1 << (42-32)}, /* Bx [PwcDisableWalkerSharing]=1 */
{ BU_CFG3, AMD_OR_C0, AMD_PTYPE_ALL,
1 << 22, 0x00000000,
1 << 22, 0x00000000}, /* C0 or above [PfcDoubleStride]=1 */
{ EX_CFG, AMD_OR_C0, AMD_PTYPE_ALL,
0x00000000, 1 << (54-32),
0x00000000, 1 << (54-32)}, /* C0 or above [LateSbzResync]=1 */
{ LS_CFG2, AMD_OR_C0, AMD_PTYPE_ALL,
1 << 23, 0x00000000,
1 << 23, 0x00000000}, /* C0 or above [DisScbThreshold]=1 */
{ LS_CFG2, AMD_OR_C0, AMD_PTYPE_ALL,
1 << 14, 0x00000000,
1 << 14, 0x00000000}, /* C0 or above [ForceSmcCheckFlowStDis]=1 */
{ LS_CFG2, AMD_OR_C0, AMD_PTYPE_ALL,
1 << 12, 0x00000000,
1 << 12, 0x00000000}, /* C0 or above [ForceBusLockDis]=1 */
{ OSVW_ID_Length, AMD_DR_Bx | AMD_DR_Cx | AMD_DR_Dx, AMD_PTYPE_ALL,
0x00000004, 0x00000000,
0x00000004, 0x00000000}, /* B0 or Above, OSVW_ID_Length is 0004h */
@@ -101,9 +166,45 @@ static const struct {
0x00000000, 1 << (50-32),
0x00000000, 1 << (50-32)}, /* D0 or Above, RdMmExtCfgQwEn*/
{ BU_CFG2, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000000, 0x0 << (36-32),
0x00000000, 0x3 << (36-32)}, /* [ThrottleNbInterface]=0 */
{ BU_CFG2, AMD_FAM15_ALL, AMD_PTYPE_ALL,
1 << 10, 0x00000000,
1 << 10, 0x00000000}, /* [VicResyncChkEn]=1 */
{ BU_CFG2, AMD_FAM15_ALL, AMD_PTYPE_ALL,
1 << 11, 0x00000000,
1 << 11, 0x00000000}, /* Erratum 503: [bit 11]=1 */
{ CPU_ID_EXT_FEATURES_MSR, AMD_DR_Dx, AMD_PTYPE_ALL,
0x00000000, 1 << (51 - 32),
0x00000000, 1 << (51 - 32)}, /* G34_PKG | C32_PKG | S1G4_PKG | ASB2_PKG */
{ CPU_ID_EXT_FEATURES_MSR, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000000, 1 << (56 - 32),
0x00000000, 1 << (56 - 32)}, /* [PerfCtrExtNB]=1 */
{ CPU_ID_EXT_FEATURES_MSR, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000000, 1 << (55 - 32),
0x00000000, 1 << (55 - 32)}, /* [PerfCtrExtCore]=1 */
{ IBS_OP_DATA3, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0 << 16, 0x00000000,
1 << 16, 0x00000000}, /* [IbsDcMabHit]=0 */
{ MC4_MISC0, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000000, 0x1 << (52-32),
0x00000000, 0xf << (52-32)}, /* [LvtOffset]=1 */
{ MC4_MISC1, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000000, 0x1 << (52-32),
0x00000000, 0xf << (52-32)}, /* [LvtOffset]=1 */
{ MC4_MISC2, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000000, 0x1 << (52-32),
0x00000000, 0xf << (52-32)}, /* [LvtOffset]=1 */
};
@@ -113,37 +214,46 @@ static const struct {
static const struct {
u8 function;
u16 offset;
u32 revision;
uint64_t revision;
u32 platform;
u32 data;
u32 mask;
} fam10_pci_default[] = {
/* Function 0 - HT Config */
{ 0, 0x68, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x000e0000, 0x000e0000 }, /* [19:17] for 8bit APIC config */
{ 0, 0x68, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x004E4800, 0x006E6800 }, /* [19:17] for 8bit APIC config,
[14:13] BufPriRel = 2h [11] RspPassPW set,
[22:21] DsNpReqLmt = 10b */
{ 0, 0x68, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00400000, 0x00600000 }, /* [22:21] DsNpReqLmt = 10b */
{ 0, 0x68, AMD_FAM10_LT_D, AMD_PTYPE_ALL,
0x00004000, 0x00006000 }, /* [14:13] BufRelPri = 2h */
{ 0, 0x68, (AMD_FAM10_REV_D | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00002000, 0x00006000 }, /* [14:13] BufRelPri = 1h */
{ 0, 0x68, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00000800, 0x00000800 }, /* [11] RspPassPW = 1 */
/* Errata 281 Workaround */
{ 0, 0x68, (AMD_DR_B0 | AMD_DR_B1),
AMD_PTYPE_SVR, 0x00200000, 0x00600000 }, /* [22:21] DsNpReqLmt0 = 01b */
{ 0, 0x84, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 0, 0x84, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00002000, 0x00002000 }, /* [13] LdtStopTriEn = 1 */
{ 0, 0xA4, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 0, 0xA4, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00002000, 0x00002000 }, /* [13] LdtStopTriEn = 1 */
{ 0, 0xC4, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 0, 0xC4, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00002000, 0x00002000 }, /* [13] LdtStopTriEn = 1 */
{ 0, 0xE4, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 0, 0xE4, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00002000, 0x00002000 }, /* [13] LdtStopTriEn = 1 */
/* Link Global Retry Control Register */
{ 0, 0x150, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 0, 0x150, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00073900, 0x00073F00 },
/* Errata 351
@@ -168,14 +278,40 @@ static const struct {
0x00000000, 0x00000100 },
{ 0, 0x18C, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x00000000, 0x00000100 },
{ 0, 0x170, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x00000000, 0x00000100 },
/* Link Global Extended Control Register */
{ 0, 0x16C, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x00000014, 0x0000003F }, /* [15:13] ForceFullT0 = 0b,
* Set T0Time 14h per BKDG */
{ 0, 0x170, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000100, 0x00000100 },
{ 0, 0x174, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000100, 0x00000100 },
{ 0, 0x178, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000100, 0x00000100 },
{ 0, 0x17C, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000100, 0x00000100 },
{ 0, 0x180, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000100, 0x00000100 },
{ 0, 0x184, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000100, 0x00000100 },
{ 0, 0x188, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000100, 0x00000100 },
{ 0, 0x18C, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000100, 0x00000100 },
/* Link Global Extended Control Register */
{ 0, 0x16C, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000014, 0x0000003F }, /* [15:13] ForceFullT0 = 111b,
* Set T0Time 26h per BKDG */
{ 0, 0x16C, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x7 << 13, 0x7 << 13 }, /* [15:13] ForceFullT0 = 7h */
{ 0, 0x16C, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x26, 0x3f }, /* [5:0] T0Time = 26h */
/* Function 1 - Map Init */
@@ -201,10 +337,10 @@ static const struct {
/* Function 2 - DRAM Controller */
/* Function 3 - Misc. Control */
{ 3, 0x40, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 3, 0x40, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00000100, 0x00000100 }, /* [8] MstrAbrtEn */
{ 3, 0x44, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 3, 0x44, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x4A30005C, 0x4A30005C }, /* [30] SyncOnDramAdrParErrEn = 1,
[27] NbMcaToMstCpuEn = 1,
[25] DisPciCfgCpuErrRsp = 1,
@@ -216,8 +352,12 @@ static const struct {
[2] SyncOnUcEccEn = 1 */
/* XBAR buffer settings */
{ 3, 0x6C, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x00018052, 0x700780F7 },
{ 3, 0x6c, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x00018052, 0x700780f7 },
/* XBAR buffer settings */
{ 3, 0x6c, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x10010052, 0x700700f7 },
/* Errata 281 Workaround */
{ 3, 0x6C, ( AMD_DR_B0 | AMD_DR_B1),
@@ -229,12 +369,18 @@ static const struct {
{ 3, 0x70, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x00041153, 0x777777F7 },
{ 3, 0x70, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x10171155, 0x777777f7 },
{ 3, 0x70, AMD_FAM10_ALL, AMD_PTYPE_UMA,
0x61221151, 0x777777F7 },
{ 3, 0x74, AMD_FAM10_ALL, AMD_PTYPE_UMA,
0x00080101, 0x000F7777 },
{ 3, 0x74, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00172111, 0x77ff7777 },
{ 3, 0x7C, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x00090914, 0x707FFF1F },
@@ -242,12 +388,18 @@ static const struct {
{ 3, 0x7C, ( AMD_DR_B0 | AMD_DR_B1),
AMD_PTYPE_SVR, 0x00144514, 0x707FFF1F },
{ 3, 0x7C, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x040d0f16, 0x07ffff1f },
{ 3, 0x7C, AMD_FAM10_ALL, AMD_PTYPE_UMA,
0x00070814, 0x007FFF1F },
{ 3, 0x140, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x00800756, 0x00F3FFFF },
{ 3, 0x140, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00a11755, 0x00f3ffff },
{ 3, 0x140, AMD_FAM10_ALL, AMD_PTYPE_UMA,
0x00C37756, 0x00F3FFFF },
@@ -259,6 +411,9 @@ static const struct {
AMD_PTYPE_SVR, 0x00000001, 0x0000000F },
/* [3:0] RspTok = 0001b */
{ 3, 0x144, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x00000028, 0x000000ff },
{ 3, 0x148, AMD_FAM10_ALL, AMD_PTYPE_UMA,
0x8000052A, 0xD5FFFFFF },
@@ -266,41 +421,53 @@ static const struct {
{ 3, 0x80, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0xE6002200, 0xFFFFFFFF },
/* ACPI Power State Control Reg1 */
{ 3, 0x80, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0xe20be200, 0xefefef00 },
/* ACPI Power State Control Reg2 */
{ 3, 0x84, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0xA0E641E6, 0xFFFFFFFF },
/* ACPI Power State Control Reg2 */
{ 3, 0x84, AMD_FAM15_ALL, AMD_PTYPE_ALL,
0x01e200e2, 0xefef00ef },
{ 3, 0xA0, AMD_FAM10_ALL, AMD_PTYPE_MOB | AMD_PTYPE_DSK,
0x00000080, 0x00000080 }, /* [7] PSIVidEnable */
{ 3, 0xA0, AMD_DR_Bx, AMD_PTYPE_ALL,
0x00002800, 0x000003800 }, /* [13:11] PllLockTime = 5 */
{ 3, 0xA0, (AMD_FAM10_ALL & ~(AMD_DR_Bx)), AMD_PTYPE_ALL,
{ 3, 0xA0, ((AMD_FAM10_ALL | AMD_FAM15_ALL) & ~(AMD_DR_Bx)), AMD_PTYPE_ALL,
0x00000800, 0x000003800 }, /* [13:11] PllLockTime = 1 */
/* Reported Temp Control Register */
{ 3, 0xA4, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 3, 0xA4, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00000080, 0x00000080 }, /* [7] TempSlewDnEn = 1 */
/* Clock Power/Timing Control 0 Register */
{ 3, 0xD4, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 3, 0xD4, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0xC0000F00, 0xF0000F00 }, /* [31] NbClkDivApplyAll = 1,
[30:28] NbClkDiv = 100b,[11:8] ClkRampHystSel = 1111b */
/* Clock Power/Timing Control 1 Register */
{ 3, 0xD8, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x03000010, 0x0F000070 }, /* [6:4] VSRampTime = 1,
* [27:24] ReConDel = 3 */
/* Clock Power/Timing Control 1 Register */
{ 3, 0xD8, AMD_FAM10_ALL, AMD_PTYPE_ALL,
0x03000016, 0x0F000077 }, /* [6:4] VSRampTime = 1,
[2:0] VSSlamTime = 6, [27:24] ReConDel = 3 */
0x00000006, 0x00000007 }, /* [2:0] VSSlamTime = 6 */
/* Clock Power/Timing Control 2 Register */
{ 3, 0xDC, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 3, 0xDC, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00005000, 0x00007000 }, /* [14:12] NbsynPtrAdj = 5 */
/* Extended NB MCA Config Register */
{ 3, 0x180, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 3, 0x180, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x007003E2, 0x007003E2 }, /* [22:20] = SyncFloodOn_Err = 7,
[9] SyncOnUncNbAryEn = 1 ,
[8] SyncOnProtEn = 1,
@@ -315,12 +482,17 @@ static const struct {
0x00400000, 0x00400000 },
/* L3 Control Register */
{ 3, 0x1B8, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 3, 0x1b8, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00001000, 0x00001000 }, /* [12] = L3PrivReplEn */
/* IBS Control Register */
{ 3, 0x1CC, AMD_FAM10_ALL, AMD_PTYPE_ALL,
{ 3, 0x1cc, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL,
0x00000100, 0x00000100 }, /* [8] = LvtOffsetVal */
/* Erratum 619 - Family 15h Bx
* System software should set F5x88[14] to 1b. */
{ 5, 0x88, AMD_OR_B2, AMD_PTYPE_ALL,
1 << 14, 1 << 14 },
};
@@ -329,7 +501,7 @@ static const struct {
*/
static const struct {
u16 htreg; /* HT Phy Register index */
u32 revision;
uint64_t revision;
u32 platform;
u32 linktype;
u32 data;
@@ -438,38 +610,38 @@ static const struct {
{ 0x530A, AMD_DR_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_ALL,
0x00004400, 0x00006400 }, /* HT_PHY_DLL_REG */
{ 0xCF, AMD_FAM10_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT3,
{ 0xCF, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT3,
0x00000000, 0x000000FF }, /* Provide clear setting for logical
completeness */
{ 0xDF, AMD_FAM10_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT3,
{ 0xDF, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT3,
0x00000000, 0x000000FF }, /* Provide clear setting for logical
completeness */
{ 0xCF, AMD_FAM10_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT1,
{ 0xCF, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT1,
0x0000006D, 0x000000FF }, /* HT_PHY_HT1_FIFO_PTR_OPT_VALUE */
{ 0xDF, AMD_FAM10_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT1,
{ 0xDF, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT1,
0x0000006D, 0x000000FF }, /* HT_PHY_HT1_FIFO_PTR_OPT_VALUE */
/* Link Phy Receiver Loop Filter Registers */
{ 0xD1, AMD_FAM10_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT3,
{ 0xD1, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT3,
0x08040000, 0x3FFFC000 }, /* [29:22] LfcMax = 20h,
[21:14] LfcMin = 10h */
{ 0xC1, AMD_FAM10_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT3,
{ 0xC1, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT3,
0x08040000, 0x3FFFC000 }, /* [29:22] LfcMax = 20h,
[21:14] LfcMin = 10h */
{ 0xD1, AMD_FAM10_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT1,
{ 0xD1, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT1,
0x04020000, 0x3FFFC000 }, /* [29:22] LfcMax = 10h,
[21:14] LfcMin = 08h */
{ 0xC1, AMD_FAM10_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT1,
{ 0xC1, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL, HTPHY_LINKTYPE_HT1,
0x04020000, 0x3FFFC000 }, /* [29:22] LfcMax = 10h,
[21:14] LfcMin = 08h */
{ 0xC0, AMD_FAM10_ALL, AMD_PTYPE_ALL, HTPHY_LINKTYPE_ALL,
{ 0xC0, (AMD_FAM10_ALL | AMD_FAM15_ALL), AMD_PTYPE_ALL, HTPHY_LINKTYPE_ALL,
0x40040000, 0xe01F0000 }, /* [31:29] RttCtl = 02h,
[20:16] RttIndex = 04h */
};

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -39,7 +40,7 @@ Fam10 Bios and Kernel Development Guide #31116, rev 3.48, April 22, 2010
3.- 2.4.2.7 dualPlaneOnly(dev)
4.- 2.4.2.8 applyBoostFIDOffset(dev)
4.- 2.4.2.8 applyBoostFIDOffset(dev, nodeid)
5.- enableNbPState1(dev)
@@ -138,9 +139,9 @@ static void enable_fid_change(u8 fid)
}
}
static void applyBoostFIDOffset( device_t dev ) {
static void applyBoostFIDOffset(device_t dev, uint32_t nodeid) {
// BKDG 2.4.2.8
// revision E only, but E is apparently not supported yet, therefore untested
// Fam10h revision E only, but E is apparently not supported yet, therefore untested
if ((cpuid_edx(0x80000007) & CPB_MASK)
&& ((cpuid_ecx(0x80000008) & NC_MASK) == 5) ) {
u32 core = get_node_core_id_x().coreid;
@@ -151,12 +152,20 @@ static void applyBoostFIDOffset( device_t dev ) {
msr.lo &= ~PS_CPU_FID_MASK;
msr.lo |= cpuFid ;
wrmsr(PS_REG_BASE , msr);
} else if (is_fam15h()) {
uint32_t dword = pci_read_config32(NODE_PCI(nodeid, 4), 0x15c);
uint8_t boost_count = (dword >> 2) & 0x7;
if (boost_count > 0) {
/* Enable boost */
dword &= ~0x3;
dword |= 0x1;
pci_write_config32(NODE_PCI(nodeid, 4), 0x15c, dword);
}
}
}
static void enableNbPState1( device_t dev ) {
u32 cpuRev = mctGetLogicalCPUID(0xFF);
uint64_t cpuRev = mctGetLogicalCPUID(0xFF);
if (cpuRev & AMD_FAM10_C3) {
u32 nbPState = (pci_read_config32(dev, 0x1F0) & NB_PSTATE_MASK);
if ( nbPState){
@@ -198,7 +207,7 @@ static u8 setPStateMaxVal( device_t dev ) {
static void dualPlaneOnly( device_t dev ) {
// BKDG 2.4.2.7
u32 cpuRev = mctGetLogicalCPUID(0xFF);
uint64_t cpuRev = mctGetLogicalCPUID(0xFF);
if ((mctGetProcessorPackageType() == AMD_PKGTYPE_AM3_2r2)
&& (cpuRev & AMD_DR_Cx)) { // should be rev C or rev E but there's no constant for E
if ( (pci_read_config32(dev, 0x1FC) & DUAL_PLANE_ONLY_MASK)
@@ -278,12 +287,16 @@ static void recalculateVsSlamTimeSettingOnCorePre(device_t dev)
*/
/* Determine if this is a PVI or SVI system */
dtemp = pci_read_config32(dev, 0xA0);
if (is_fam15h()) {
pviModeFlag = 0;
} else {
dtemp = pci_read_config32(dev, 0xa0);
if (dtemp & PVI_MODE)
pviModeFlag = 1;
else
pviModeFlag = 0;
}
/* Get P0's voltage */
/* MSRC001_00[68:64] are not programmed yet when called from
@@ -510,6 +523,13 @@ static void config_nb_syn_ptr_adj(device_t dev, u32 cpuRev) {
}
static void config_acpi_pwr_state_ctrl_regs(device_t dev, u32 cpuRev, u8 procPkg) {
if (is_fam15h()) {
/* Family 15h BKDG Rev. 3.14 D18F3x80 recommended settings */
pci_write_config32(dev, 0x80, 0xe20be281);
/* Family 15h BKDG Rev. 3.14 D18F3x84 recommended settings */
pci_write_config32(dev, 0x84, 0x01e200e2);
} else {
/* step 1, chapter 2.4.2.6 of AMD Fam 10 BKDG #31116 Rev 3.48 22.4.2010 */
u32 dword;
u32 c1= 1;
@@ -537,13 +557,13 @@ static void config_acpi_pwr_state_ctrl_regs(device_t dev, u32 cpuRev, u8 procPkg
dword = (c1 << 24) | (0xE641E6);
pci_write_config32(dev, 0x84, dword);
/* FIXME: BKDG Table 100 says if the link is at a Gen1
frequency and the chipset does not support a 10us minimum LDTSTOP
assertion time, then { If ASB2 && SVI then smaf001 = F6h else
smaf001=87h. } else ... I hardly know what it means or how to check
it from here, so I bluntly assume it is false and code here the else,
which is easier */
* frequency and the chipset does not support a 10us minimum LDTSTOP
* assertion time, then { If ASB2 && SVI then smaf001 = F6h else
* smaf001=87h. } else ... I hardly know what it means or how to check
* it from here, so I bluntly assume it is false and code here the else,
* which is easier
*/
u32 smaf001 = 0xE6;
if (cpuRev & AMD_DR_Bx ) {
@@ -564,6 +584,7 @@ which is easier */
| (smaf001 << 8) | 0x81;
pci_write_config32(dev, 0x80, dword);
}
}
static void prep_fid_change(void)
{
@@ -579,7 +600,7 @@ static void prep_fid_change(void)
for (i = 0; i < nodes; i++) {
printk(BIOS_DEBUG, "Prep FID/VID Node:%02x\n", i);
dev = NODE_PCI(i, 3);
u32 cpuRev = mctGetLogicalCPUID(0xFF) ;
uint64_t cpuRev = mctGetLogicalCPUID(0xFF) ;
u8 procPkg = mctGetProcessorPackageType();
setVSRamp(dev);
@@ -640,7 +661,7 @@ static void waitCurrentPstate(u32 target_pstate){
if (pstate_msr.lo != target_pstate) {
msr_t limit_msr = rdmsr(0xc0010061);
printk(BIOS_ERR, "*** Time out waiting for P-state %01x. Current P-state %01x P-state current limit MSRC001_0061=%02x\n", target_pstate, pstate_msr.lo, limit_msr.lo);
printk(BIOS_ERR, "*** Time out waiting for P-state %01x. Current P-state %01x P-state current limit MSRC001_0061=%08x %08x\n", target_pstate, pstate_msr.lo, limit_msr.hi, limit_msr.lo);
do { // should we just go on instead ?
pstate_msr = rdmsr(CUR_PSTATE_MSR);
@@ -650,6 +671,7 @@ static void waitCurrentPstate(u32 target_pstate){
static void set_pstate(u32 nonBoostedPState) {
msr_t msr;
uint8_t skip_wait;
// Transition P0 for calling core.
msr = rdmsr(0xC0010062);
@@ -657,12 +679,21 @@ static void set_pstate(u32 nonBoostedPState) {
msr.lo = nonBoostedPState;
wrmsr(0xC0010062, msr);
/* Wait for P0 to set. */
waitCurrentPstate(nonBoostedPState);
if (is_fam15h()) {
/* Do not wait for the first (even) set of cores to transition on Family 15h systems */
if ((cpuid_ebx(0x00000001) & 0x01000000))
skip_wait = 0;
else
skip_wait = 1;
} else {
skip_wait = 0;
}
if (!skip_wait) {
/* Wait for core to transition to P0 */
waitCurrentPstate(nonBoostedPState);
}
}
static void UpdateSinglePlaneNbVid(void)
{
@@ -752,11 +783,14 @@ static u32 needs_NB_COF_VID_update(void)
u8 nodes;
u8 i;
if (is_fam15h())
return 0;
/* If any node has nb_cof_vid_update set all nodes need an update. */
nodes = get_nodes();
nb_cof_vid_update = 0;
for (i = 0; i < nodes; i++) {
u32 cpuRev = mctGetLogicalCPUID(i) ;
uint64_t cpuRev = mctGetLogicalCPUID(i);
u32 nbCofVidUpdateDefined = (cpuRev & (AMD_FAM10_LT_D));
if (nbCofVidUpdateDefined
&& (pci_read_config32(NODE_PCI(i, 3), 0x1FC)
@@ -780,8 +814,10 @@ static u32 init_fidvid_core(u32 nodeid, u32 coreid)
/* Steps 1-6 of BIOS NB COF and VID Configuration
* for SVI and Single-Plane PVI Systems. BKDG 2.4.2.9 #31116 rev 3.48
*/
dev = NODE_PCI(nodeid, 3);
if (is_fam15h())
pvimode = 0;
else
pvimode = pci_read_config32(dev, PW_CTL_MISC) & PVI_MODE;
reg1fc = pci_read_config32(dev, 0x1FC);
@@ -856,7 +892,8 @@ static void init_fidvid_bsp_stage1(u32 ap_apicid, void *gp)
while (--loop > 0) {
if (lapic_remote_read(ap_apicid, LAPIC_MSG_REG, &readback) != 0)
continue;
if ((readback & 0x3f) == F10_APSTATE_RESET) {
if (((readback & 0x3f) == F10_APSTATE_RESET)
|| (is_fam15h() && ((readback & 0x3f) == F10_APSTATE_ASLEEP))) {
timeout = 0;
break; /* target ap is in stage 1 */
}
@@ -944,6 +981,9 @@ static void init_fidvid_stage2(u32 apicid, u32 nodeid)
/* If any node has nb_cof_vid_update set all nodes need an update. */
dev = NODE_PCI(nodeid, 3);
if (is_fam15h())
pvimode = 0;
else
pvimode = (pci_read_config32(dev, 0xA0) >> 8) & 1;
reg1fc = pci_read_config32(dev, 0x1FC);
nbvid = (reg1fc >> 7) & 0x7F;
@@ -965,16 +1005,18 @@ static void init_fidvid_stage2(u32 apicid, u32 nodeid)
pci_write_config32(dev, 0xA0, dtemp);
dualPlaneOnly(dev);
applyBoostFIDOffset(dev);
applyBoostFIDOffset(dev, nodeid);
enableNbPState1(dev);
finalPstateChange();
if (!is_fam15h()) {
/* Set TSC to tick at the P0 ndfid rate */
msr = rdmsr(HWCR);
msr.lo |= 1 << 24;
wrmsr(HWCR, msr);
}
}
#if CONFIG_SET_FIDVID_STORE_AP_APICID_AT_FIRST
@@ -1007,7 +1049,6 @@ static int init_fidvid_bsp(u32 bsp_apicid, u32 nodes)
/* Steps 1-6 of BIOS NB COF and VID Configuration
* for SVI and Single-Plane PVI Systems.
*/
fv.common_fid = init_fidvid_core(0, 0);
print_debug_fv("BSP fid = ", fv.common_fid);

View File

@@ -26,9 +26,12 @@
#include <northbridge/amd/amdfam10/raminit_amdmct.c>
#include <reset.h>
#if IS_ENABLED(CONFIG_SET_FIDVID)
static void prep_fid_change(void);
static void init_fidvid_stage2(u32 apicid, u32 nodeid);
void cpuSetAMDMSR(void);
#endif
void cpuSetAMDMSR(uint8_t node_id);
#if CONFIG_PCI_IO_CFG_EXT
static void set_EnableCf8ExtCfg(void)
@@ -47,43 +50,38 @@ static void set_EnableCf8ExtCfg(void) { }
typedef void (*process_ap_t) (u32 apicid, void *gp);
//core_range = 0 : all cores
//core range = 1 : core 0 only
//core range = 2 : cores other than core0
uint32_t get_boot_apic_id(uint8_t node, uint32_t core) {
uint32_t ap_apicid;
static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
void *gp)
{
// here assume the OS don't change our apicid
u32 ap_apicid;
uint32_t nb_cfg_54;
uint32_t siblings;
uint32_t cores_found;
u32 nodes;
u32 siblings;
u32 disable_siblings;
u32 cores_found;
u32 nb_cfg_54;
int i, j;
u32 ApicIdCoreIdSize;
uint8_t fam15h = 0;
uint8_t rev_gte_d = 0;
uint8_t dual_node = 0;
uint32_t f3xe8;
uint32_t family;
uint32_t model;
/* get_nodes define in ht_wrapper.c */
nodes = get_nodes();
if (!CONFIG_LOGICAL_CPUS ||
read_option(multi_core, 0) != 0) { // 0 means multi core
disable_siblings = 1;
} else {
disable_siblings = 0;
}
uint32_t ApicIdCoreIdSize;
/* Assume that all node are same stepping, otherwise we can use use
nb_cfg_54 from bsp for all nodes */
nb_cfg_54 = read_nb_cfg_54();
f3xe8 = pci_read_config32(NODE_PCI(0, 3), 0xe8);
if (cpuid_eax(0x80000001) >= 0x8)
family = model = cpuid_eax(0x80000001);
model = ((model & 0xf0000) >> 12) | ((model & 0xf0) >> 4);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
if (family >= 0x6f) {
/* Family 15h or later */
fam15h = 1;
nb_cfg_54 = 1;
}
if ((model >= 0x8) || fam15h)
/* Revision D or later */
rev_gte_d = 1;
@@ -99,11 +97,64 @@ static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
siblings = 3; //quad core
}
for (i = 0; i < nodes; i++) {
cores_found = get_core_num_in_bsp(i);
cores_found = get_core_num_in_bsp(node);
if (siblings > cores_found)
siblings = cores_found;
if (dual_node) {
ap_apicid = 0;
if (fam15h) {
ap_apicid |= ((node >> 1) & 0x3) << 5; /* Node ID */
ap_apicid |= ((node & 0x1) * (siblings + 1)) + core; /* Core ID */
} else {
if (nb_cfg_54) {
ap_apicid |= ((node >> 1) & 0x3) << 4; /* Node ID */
ap_apicid |= ((node & 0x1) * (siblings + 1)) + core; /* Core ID */
} else {
ap_apicid |= node & 0x3; /* Node ID */
ap_apicid |= (((node & 0x1) * (siblings + 1)) + core) << 4; /* Core ID */
}
}
} else {
if (fam15h) {
ap_apicid = (node * (siblings + 1)) + core;
} else {
ap_apicid = node * (nb_cfg_54 ? (siblings + 1) : 1) +
core * (nb_cfg_54 ? 1 : 64);
}
}
return ap_apicid;
}
//core_range = 0 : all cores
//core range = 1 : core 0 only
//core range = 2 : cores other than core0
static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
void *gp)
{
// here assume the OS don't change our apicid
u32 ap_apicid;
u32 nodes;
u32 disable_siblings;
u32 cores_found;
int i, j;
/* get_nodes define in ht_wrapper.c */
nodes = get_nodes();
if (!CONFIG_LOGICAL_CPUS ||
read_option(multi_core, 0) != 0) { // 0 means multi core
disable_siblings = 1;
} else {
disable_siblings = 0;
}
for (i = 0; i < nodes; i++) {
cores_found = get_core_num_in_bsp(i);
u32 jstart, jend;
if (core_range == 2) {
@@ -119,21 +170,7 @@ static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
}
for (j = jstart; j <= jend; j++) {
if (dual_node) {
ap_apicid = 0;
if (nb_cfg_54) {
ap_apicid |= ((i >> 1) & 0x3) << 4; /* Node ID */
ap_apicid |= ((i & 0x1) * (siblings + 1)) + j; /* Core ID */
} else {
ap_apicid |= i & 0x3; /* Node ID */
ap_apicid |= (((i & 0x1) * (siblings + 1)) + j) << 4; /* Core ID */
}
} else {
ap_apicid =
i * (nb_cfg_54 ? (siblings + 1) : 1) +
j * (nb_cfg_54 ? 1 : 64);
}
ap_apicid = get_boot_apic_id(i, j);
#if CONFIG_ENABLE_APIC_EXT_ID && (CONFIG_APIC_ID_OFFSET > 0)
#if !CONFIG_LIFT_BSP_APIC_ID
@@ -193,7 +230,7 @@ void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
apicid, id.nodeid, id.coreid);
}
static u32 wait_cpu_state(u32 apicid, u32 state)
uint32_t wait_cpu_state(uint32_t apicid, uint32_t state, uint32_t state2)
{
u32 readback = 0;
u32 timeout = 1;
@@ -201,7 +238,7 @@ static u32 wait_cpu_state(u32 apicid, u32 state)
while (--loop > 0) {
if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
continue;
if ((readback & 0x3f) == state || (readback & 0x3f) == F10_APSTATE_RESET) {
if ((readback & 0x3f) == state || (readback & 0x3f) == state2 || (readback & 0x3f) == F10_APSTATE_RESET) {
timeout = 0;
break; //target cpu is in stage started
}
@@ -218,7 +255,7 @@ static u32 wait_cpu_state(u32 apicid, u32 state)
static void wait_ap_started(u32 ap_apicid, void *gp)
{
u32 timeout;
timeout = wait_cpu_state(ap_apicid, F10_APSTATE_STARTED);
timeout = wait_cpu_state(ap_apicid, F10_APSTATE_STARTED, F10_APSTATE_ASLEEP);
printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
if (timeout) {
printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
@@ -254,16 +291,27 @@ static void enable_apic_ext_id(u32 node)
pci_write_config32(NODE_HT(node), 0x68, val);
}
static void STOP_CAR_AND_CPU(void)
static void STOP_CAR_AND_CPU(uint8_t skip_sharedc_config, uint32_t apicid)
{
msr_t msr;
uint32_t family;
family = amd_fam1x_cpu_family(); // inline
if (family < 0x6f) {
/* Family 10h or earlier */
/* Disable L2 IC to L3 connection (Only for CAR) */
msr = rdmsr(BU_CFG2);
msr.lo &= ~(1 << ClLinesToNbDis);
wrmsr(BU_CFG2, msr);
}
disable_cache_as_ram(skip_sharedc_config); // inline
/* Mark the core as sleeping */
lapic_write(LAPIC_MSG_REG, (apicid << 24) | F10_APSTATE_ASLEEP);
disable_cache_as_ram(); // inline
/* stop all cores except node0/core0 the bsp .... */
stop_this_cpu();
}
@@ -272,6 +320,7 @@ static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
{
u32 bsp_apicid = 0;
u32 apicid;
uint8_t set_mtrrs;
struct node_core_id id;
/* Please refer to the calculations and explaination in cache_as_ram.inc before modifying these values */
@@ -358,7 +407,7 @@ static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
*/
update_microcode(cpuid_eax(1));
cpuSetAMDMSR();
cpuSetAMDMSR(id.nodeid);
#if CONFIG_SET_FIDVID
#if CONFIG_LOGICAL_CPUS && CONFIG_SET_FIDVID_CORE0_ONLY
@@ -381,10 +430,29 @@ static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
}
#endif
if (is_fam15h()) {
/* core 1 on node 0 is special; to avoid corrupting the
* BSP do not alter MTRRs on that core */
if (apicid == 1)
set_mtrrs = 0;
else
set_mtrrs = !!(apicid & 0x1);
} else {
set_mtrrs = 1;
}
/* AP is ready, configure MTRRs and go to sleep */
if (set_mtrrs)
set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
STOP_CAR_AND_CPU();
printk(BIOS_DEBUG, "Disabling CAR on AP %02x\n", apicid);
if (is_fam15h()) {
/* Only modify the MSRs on the odd cores (the last cores to finish booting) */
STOP_CAR_AND_CPU(!set_mtrrs, apicid);
} else {
/* Modify MSRs on all cores */
STOP_CAR_AND_CPU(0, apicid);
}
printk(BIOS_DEBUG,
"\nAP %02x should be halted but you are reading this....\n",
@@ -492,7 +560,7 @@ static void setup_remote_node(u8 node)
}
#endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
static void AMD_Errata281(u8 node, u32 revision, u32 platform)
static void AMD_Errata281(u8 node, uint64_t revision, u32 platform)
{
/* Workaround for Transaction Scheduling Conflict in
* Northbridge Cross Bar. Implement XCS Token adjustment
@@ -790,7 +858,7 @@ static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
} while (!(val & HTPHY_IS_COMPLETE_MASK));
}
void cpuSetAMDMSR(void)
void cpuSetAMDMSR(uint8_t node_id)
{
/* This routine loads the CPU with default settings in fam10_msr_default
* table . It must be run after Cache-As-RAM has been enabled, and
@@ -800,7 +868,8 @@ void cpuSetAMDMSR(void)
*/
msr_t msr;
u8 i;
u32 revision, platform;
u32 platform;
uint64_t revision;
printk(BIOS_DEBUG, "cpuSetAMDMSR ");
@@ -820,6 +889,49 @@ void cpuSetAMDMSR(void)
}
AMD_Errata298();
if (revision & AMD_FAM15_ALL) {
uint32_t f5x80;
uint8_t enabled;
uint8_t compute_unit_count = 0;
f5x80 = pci_read_config32(NODE_PCI(node_id, 5), 0x80);
enabled = f5x80 & 0xf;
if (enabled == 0x1)
compute_unit_count = 1;
if (enabled == 0x3)
compute_unit_count = 2;
if (enabled == 0x7)
compute_unit_count = 3;
if (enabled == 0xf)
compute_unit_count = 4;
msr = rdmsr(BU_CFG2);
msr.lo &= ~(0x3 << 6); /* ThrottleNbInterface[1:0] */
msr.lo |= (((compute_unit_count - 1) & 0x3) << 6);
wrmsr(BU_CFG2, msr);
}
/* Revision C0 and above */
if (revision & AMD_OR_C0) {
uint32_t f3x1fc = pci_read_config32(NODE_PCI(node_id, 3), 0x1fc);
msr = rdmsr(FP_CFG);
msr.hi &= ~(0x7 << (42-32)); /* DiDtCfg4 */
msr.hi |= (((f3x1fc >> 17) & 0x7) << (42-32));
msr.hi &= ~(0x1 << (41-32)); /* DiDtCfg5 */
msr.hi |= (((f3x1fc >> 22) & 0x1) << (41-32));
msr.hi &= ~(0x1 << (40-32)); /* DiDtCfg3 */
msr.hi |= (((f3x1fc >> 16) & 0x1) << (40-32));
msr.hi &= ~(0x7 << (32-32)); /* DiDtCfg1 (1) */
msr.hi |= (((f3x1fc >> 11) & 0x7) << (32-32));
msr.lo &= ~(0x1f << 27); /* DiDtCfg1 (2) */
msr.lo |= (((f3x1fc >> 6) & 0x1f) << 27);
msr.lo &= ~(0x3 << 25); /* DiDtCfg2 */
msr.lo |= (((f3x1fc >> 14) & 0x3) << 25);
msr.lo &= ~(0x1f << 18); /* DiDtCfg0 */
msr.lo |= (((f3x1fc >> 1) & 0x1f) << 18);
msr.lo &= ~(0x1 << 16); /* DiDtMode */
msr.lo |= ((f3x1fc & 0x1) << 16);
wrmsr(FP_CFG, msr);
}
printk(BIOS_DEBUG, " done\n");
}
@@ -831,9 +943,10 @@ static void cpuSetAMDPCI(u8 node)
* that it is run for the first core on each node
*/
u8 i, j;
u32 revision, platform;
u32 platform;
u32 val;
u8 offset;
uint64_t revision;
printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
@@ -895,6 +1008,7 @@ static void cpuSetAMDPCI(u8 node)
}
#ifdef UNUSED_CODE
/* Clearing the MCA registers is apparently handled in the ramstage CPU Function 3 driver */
static void cpuInitializeMCA(void)
{
/* Clears Machine Check Architecture (MCA) registers, which power on

View File

@@ -35,6 +35,23 @@
#define MCI_STATUS 0x401
static inline uint8_t is_fam15h(void)
{
uint8_t fam15h = 0;
uint32_t family;
family = cpuid_eax(0x80000001);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
if (family >= 0x6f)
/* Family 15h or later */
fam15h = 1;
return fam15h;
}
static volatile uint8_t fam15h_startup_flags[MAX_NODES_SUPPORTED][MAX_CORES_SUPPORTED] = {{ 0 }};
static void model_10xxx_init(device_t dev)
{
u8 i;
@@ -43,13 +60,44 @@ static void model_10xxx_init(device_t dev)
#if CONFIG_LOGICAL_CPUS
u32 siblings;
#endif
uint8_t delay_start;
id = get_node_core_id(read_nb_cfg_54()); /* nb_cfg_54 can not be set */
printk(BIOS_DEBUG, "nodeid = %02d, coreid = %02d\n", id.nodeid, id.coreid);
if (is_fam15h())
delay_start = !!(id.coreid & 0x1);
else
delay_start = 0;
/* Turn on caching if we haven't already */
x86_enable_cache();
if (!delay_start) {
/* Initialize all variable MTRRs except the first pair.
* This prevents Linux from having to correct an inconsistent
* MTRR setup, which would crash Family 15h CPUs due to the
* compute unit structure sharing MTRR MSRs between AP cores.
*/
msr.hi = 0x00000000;
msr.lo = 0x00000000;
disable_cache();
for (i = 0x2; i < 0x10; i++) {
wrmsr(0x00000200 | i, msr);
}
enable_cache();
/* Set up other MTRRs */
amd_setup_mtrrs();
} else {
while (!fam15h_startup_flags[id.nodeid][id.coreid - 1]) {
/* Wait for CU first core startup */
}
}
x86_mtrr_check();
disable_cache();
@@ -89,12 +137,19 @@ static void model_10xxx_init(device_t dev)
msr.hi &= ~(1 << (46 - 32));
wrmsr(NB_CFG_MSR, msr);
if (is_fam15h()) {
msr = rdmsr(BU_CFG3_MSR);
/* Set CombineCr0Cd */
msr.hi |= (1 << (49-32));
wrmsr(BU_CFG3_MSR, msr);
} else {
msr = rdmsr(BU_CFG2_MSR);
/* Clear ClLinesToNbDis */
msr.lo &= ~(1 << 15);
/* Clear bit 35 as per Erratum 343 */
msr.hi &= ~(1 << (35-32));
wrmsr(BU_CFG2_MSR, msr);
}
if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) {
printk(BIOS_DEBUG, "Initializing SMM ASeg memory\n");
@@ -127,6 +182,7 @@ static void model_10xxx_init(device_t dev)
msr.lo |= (1 << 0);
wrmsr(HWCR_MSR, msr);
fam15h_startup_flags[id.nodeid][id.coreid] = 1;
}
static struct device_operations cpu_dev_ops = {
@@ -143,15 +199,17 @@ static struct cpu_device_id cpu_table[] = {
{ X86_VENDOR_AMD, 0x100f22 },
{ X86_VENDOR_AMD, 0x100f23 },
{ X86_VENDOR_AMD, 0x100f40 }, /* RB-C0 */
{ X86_VENDOR_AMD, 0x100F42 }, /* RB-C2 */
{ X86_VENDOR_AMD, 0x100F43 }, /* RB-C3 */
{ X86_VENDOR_AMD, 0x100F52 }, /* BL-C2 */
{ X86_VENDOR_AMD, 0x100F62 }, /* DA-C2 */
{ X86_VENDOR_AMD, 0x100F63 }, /* DA-C3 */
{ X86_VENDOR_AMD, 0x100F80 }, /* HY-D0 */
{ X86_VENDOR_AMD, 0x100F81 }, /* HY-D1 */
{ X86_VENDOR_AMD, 0x100F91 }, /* HY-D1 */
{ X86_VENDOR_AMD, 0x100FA0 }, /* PH-E0 */
{ X86_VENDOR_AMD, 0x100f42 }, /* RB-C2 */
{ X86_VENDOR_AMD, 0x100f43 }, /* RB-C3 */
{ X86_VENDOR_AMD, 0x100f52 }, /* BL-C2 */
{ X86_VENDOR_AMD, 0x100f62 }, /* DA-C2 */
{ X86_VENDOR_AMD, 0x100f63 }, /* DA-C3 */
{ X86_VENDOR_AMD, 0x100f80 }, /* HY-D0 */
{ X86_VENDOR_AMD, 0x100f81 }, /* HY-D1 */
{ X86_VENDOR_AMD, 0x100f91 }, /* HY-D1 */
{ X86_VENDOR_AMD, 0x100fa0 }, /* PH-E0 */
{ X86_VENDOR_AMD, 0x600f12 }, /* OR-B2 */
{ X86_VENDOR_AMD, 0x600f20 }, /* OR-C0 */
{ 0, 0 },
};

View File

@@ -70,8 +70,7 @@ static void write_pstates_for_core(u8 pstate_num, u16 *pstate_feq, u32 *pstate_p
/* Revision C or greater single-link processor */
cpuid1 = cpuid(0x80000008);
acpigen_write_PSD_package(0, (cpuid1.ecx & 0xff) + 1, SW_ALL);
}
else {
} else {
/* Find the local APIC ID for the specified core ID */
struct device* cpu;
int cpu_index = 0;
@@ -95,7 +94,9 @@ static void write_pstates_for_core(u8 pstate_num, u16 *pstate_feq, u32 *pstate_p
}
/*
* For details of this algorithm, please refer to the BDKG 3.62 page 69
* For details of this algorithm, please refer to:
* Family 10h BDKG 3.62 page 69
* Family 15h BDKG 3.14 page 74
*
* WARNING: The core count algorithm below assumes that all processors
* are identical, with the same number of active cores. While the BKDG
@@ -145,6 +146,13 @@ void amd_generate_powernow(u32 pcontrol_blk, u8 plen, u8 onlyBSP)
uint8_t node_count;
uint8_t cores_per_node;
uint8_t total_core_count;
uint8_t fam15h;
uint8_t fam10h_rev_e = 0;
/* Detect Revision E processors via method used in fidvid.c */
if ((cpuid_edx(0x80000007) & CPB_MASK)
&& ((cpuid_ecx(0x80000008) & NC_MASK) == 5))
fam10h_rev_e = 1;
/*
* Based on the CPU socket type,cmp_cap and pwr_lmt , get the power limit.
@@ -152,11 +160,17 @@ void amd_generate_powernow(u32 pcontrol_blk, u8 plen, u8 onlyBSP)
* cmp_cap : 0x0 SingleCore ; 0x1 DualCore ; 0x2 TripleCore ; 0x3 QuadCore ; 0x4 QuintupleCore ; 0x5 HexCore
*/
printk(BIOS_INFO, "Pstates algorithm ...\n");
fam15h = !!(mctGetLogicalCPUID(0) & AMD_FAM15_ALL);
/* Get number of cores */
dtemp = pci_read_config32(dev_find_slot(0, PCI_DEVFN(0x18, 3)), 0xE8);
if (fam15h) {
cmp_cap = pci_read_config32(dev_find_slot(0, PCI_DEVFN(0x18, 5)), 0x84) & 0xff;
} else {
dtemp = pci_read_config32(dev_find_slot(0, PCI_DEVFN(0x18, 3)), 0xe8);
cmp_cap = (dtemp & 0x3000) >> 12;
if (mctGetLogicalCPUID(0) & AMD_FAM10_REV_D) /* revision D */
if (mctGetLogicalCPUID(0) & (AMD_FAM10_REV_D | AMD_FAM15_ALL)) /* revision D or higher */
cmp_cap |= (dtemp & 0x8000) >> 13;
}
/* Get number of nodes */
dtemp = pci_read_config32(dev_find_slot(0, PCI_DEVFN(0x18, 0)), 0x60);
node_count = ((dtemp & 0x70) >> 4) + 1;
@@ -165,6 +179,14 @@ void amd_generate_powernow(u32 pcontrol_blk, u8 plen, u8 onlyBSP)
/* Compute total number of cores installed in system */
total_core_count = cores_per_node * node_count;
/* Get number of boost states */
uint8_t boost_count = 0;
dtemp = pci_read_config32(dev_find_slot(0, PCI_DEVFN(0x18, 4)), 0x15c);
if (fam10h_rev_e)
boost_count = (dtemp >> 2) & 0x1;
else if (mctGetLogicalCPUID(0) & AMD_FAM15_ALL)
boost_count = (dtemp >> 2) & 0x7;
Pstate_num = 0;
/* See if the CPUID(0x80000007) returned EDX[7]==1b */
@@ -201,7 +223,7 @@ void amd_generate_powernow(u32 pcontrol_blk, u8 plen, u8 onlyBSP)
/* Get PSmax's index */
msr = rdmsr(0xC0010061);
Pstate_max = (uint8_t) ((msr.lo >> PS_MAX_VAL_SHFT) & BIT_MASK_3);
Pstate_max = (uint8_t) ((msr.lo >> PS_MAX_VAL_SHFT) & ((fam15h)?BIT_MASK_7:BIT_MASK_3));
/* Determine if all enabled Pstates have the same fidvid */
uint8_t i;
@@ -215,10 +237,14 @@ void amd_generate_powernow(u32 pcontrol_blk, u8 plen, u8 onlyBSP)
}
}
/* Family 15h uses slightly different PSmax numbering */
if (fam15h)
Pstate_max++;
/* Populate tables with all Pstate information */
for (Pstate_num = 0; Pstate_num < Pstate_max; Pstate_num++) {
/* Get power state information */
msr = rdmsr(0xC0010064 + Pstate_num);
msr = rdmsr(0xC0010064 + Pstate_num + boost_count);
cpufid = (msr.lo & 0x3f);
cpudid = (msr.lo & 0x1c0) >> 6;
cpuvid = (msr.lo & 0xfe00) >> 9;
@@ -228,12 +254,10 @@ void amd_generate_powernow(u32 pcontrol_blk, u8 plen, u8 onlyBSP)
if (pviModeFlag) {
if (cpuvid >= 0x20) {
core_voltage = 7625 - (((cpuvid - 0x20) * 10000) / 80);
}
else {
} else {
core_voltage = 15500 - ((cpuvid * 10000) / 40);
}
}
else {
} else {
cpuvid = cpuvid & 0x7f;
if (cpuvid >= 0x7c)
core_voltage = 0;

View File

@@ -29,6 +29,10 @@
#include <cpu/amd/mtrr.h>
#include <cpu/cpu.h>
#include <cpu/amd/model_10xxx_rev.h>
#include <device/device.h>
#include <device/pci.h>
#include <device/pnp.h>
#include <device/pci_ops.h>
/* The maximum length of CPU names is 48 bytes, including the final NULL byte.
* If you change these names your BIOS will _NOT_ pass the AMD validation and
@@ -208,9 +212,50 @@ static int strcpymax(char *dst, const char *src, int buflen)
return i;
}
#define NAME_STRING_MAXLEN 48
int init_processor_name(void)
{
msr_t msr;
ssize_t i;
char program_string[NAME_STRING_MAXLEN];
u32 *p_program_string = (u32 *)program_string;
uint8_t fam15h = 0;
uint32_t family;
family = cpuid_eax(0x80000001);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
if (family >= 0x6f)
/* Family 15h or later */
fam15h = 1;
/* null the string */
memset(program_string, 0, sizeof(program_string));
if (fam15h) {
/* Family 15h or later */
uint32_t dword;
device_t cpu_fn5_dev = dev_find_slot(0, PCI_DEVFN(0x18, 5));
pci_write_config32(cpu_fn5_dev, 0x194, 0);
dword = pci_read_config32(cpu_fn5_dev, 0x198);
if (dword == 0) {
strcpymax(program_string, sample, sizeof(program_string));
} else {
/* Assemble the string from PCI configuration register contents */
for (i = 0; i < 12; i++) {
pci_write_config32(cpu_fn5_dev, 0x194, i);
p_program_string[i] = pci_read_config32(cpu_fn5_dev, 0x198);
}
/* Correctly place the null terminator */
for (i = (NAME_STRING_MAXLEN - 2); i > 0; i--) {
if (program_string[i] != 0x20)
break;
}
program_string[i + 1] = 0;
}
} else {
/* variable names taken from fam10 revision guide for clarity */
u32 BrandId; /* CPUID Fn8000_0001_EBX */
u8 String1; /* BrandID[14:11] */
@@ -220,13 +265,9 @@ int init_processor_name(void)
u8 PkgTyp; /* BrandID[31:28] */
u8 NC; /* CPUID Fn8000_0008_ECX */
const char *processor_name_string = unknown;
char program_string[48];
u32 *p_program_string = (u32 *)program_string;
msr_t msr;
int i, j = 0, str2_checkNC = 1;
int j = 0, str2_checkNC = 1;
const struct str_s *str, *str2;
/* Find out which CPU brand it is */
BrandId = cpuid_ebx(0x80000001);
String1 = (u8)((BrandId >> 11) & 0x0F);
@@ -236,9 +277,6 @@ int init_processor_name(void)
PkgTyp = (u8)((BrandId >> 28) & 0x0F);
NC = (u8)(cpuid_ecx(0x80000008) & 0xFF);
/* null the string */
memset(program_string, 0, sizeof(program_string));
if (!Model) {
processor_name_string = Pg ? thermal : sample;
goto done;
@@ -302,10 +340,10 @@ int init_processor_name(void)
}
}
done:
strcpymax(&program_string[j], processor_name_string,
sizeof(program_string) - j);
}
printk(BIOS_DEBUG, "CPU model: %s\n", program_string);

View File

@@ -24,6 +24,7 @@ struct id_mapping {
static u16 get_equivalent_processor_rev_id(u32 orig_id) {
static const struct id_mapping id_mapping_table[] = {
/* Family 10h */
{ 0x100f00, 0x1000 },
{ 0x100f01, 0x1000 },
{ 0x100f02, 0x1000 },
@@ -38,8 +39,13 @@ static u16 get_equivalent_processor_rev_id(u32 orig_id) {
{ 0x100f62, 0x1062 }, /* DA-C2 */
{ 0x100f63, 0x1043 }, /* DA-C3 */
{ 0x100f81, 0x1081 }, /* HY-D1 */
{ 0x100f91, 0x1081 }, /* HY-D1 */
{ 0x100fa0, 0x10A0 }, /* PH-E0 */
/* Family 15h */
{ 0x600f12, 0x6012 }, /* OR-B2 */
{ 0x600f20, 0x6020 }, /* OR-C0 */
/* Array terminator */
{ 0xffffff, 0x0000 },
};

View File

@@ -199,7 +199,7 @@ static void enable_apic_ext_id(u32 node)
static void STOP_CAR_AND_CPU(void)
{
disable_cache_as_ram(); // inline
disable_cache_as_ram(0); // inline
/* stop all cores except node0/core0 the bsp .... */
stop_this_cpu();
}

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -22,9 +23,33 @@
#include "cpu/amd/quadcore/quadcore_id.c"
/* get_boot_apic_id and wait_cpu_state located in init_cpus.c */
uint32_t get_boot_apic_id(uint8_t node, uint32_t core);
uint32_t wait_cpu_state(uint32_t apicid, uint32_t state, uint32_t state2);
static inline uint8_t is_fam15h(void)
{
uint8_t fam15h = 0;
uint32_t family;
family = cpuid_eax(0x80000001);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
if (family >= 0x6f)
/* Family 15h or later */
fam15h = 1;
return fam15h;
}
static u32 get_core_num_in_bsp(u32 nodeid)
{
u32 dword;
if (is_fam15h()) {
/* Family 15h moved CmpCap to F5x84 [7:0] */
dword = pci_read_config32(NODE_PCI(nodeid, 5), 0x84);
dword &= 0xff;
} else {
dword = pci_read_config32(NODE_PCI(nodeid, 3), 0xe8);
dword >>= 12;
/* Bit 15 is CmpCap[2] since Revision D. */
@@ -32,6 +57,7 @@ static u32 get_core_num_in_bsp(u32 nodeid)
dword = ((dword & 8) >> 1) | (dword & 3);
else
dword &= 3;
}
return dword;
}
@@ -46,17 +72,56 @@ static u8 set_apicid_cpuid_lo(void)
return 1;
}
static void real_start_other_core(u32 nodeid, u32 cores)
static void real_start_other_core(uint32_t nodeid, uint32_t cores)
{
u32 dword, i;
ssize_t i;
uint32_t dword;
printk(BIOS_DEBUG, "Start other core - nodeid: %02x cores: %02x\n", nodeid, cores);
/* set PCI_DEV(0, 0x18+nodeid, 3), 0x44 bit 27 to redirect all MC4
accesses and error logging to core0 */
dword = pci_read_config32(NODE_PCI(nodeid, 3), 0x44);
dword |= 1 << 27; // NbMcaToMstCpuEn bit
dword |= 1 << 30; /* SyncFloodOnDramAdrParErr=1 */
dword |= 1 << 27; /* NbMcaToMstCpuEn=1 */
dword |= 1 << 21; /* SyncFloodOnAnyUcErr=1 */
dword |= 1 << 20; /* SyncFloodOnWDT=1 */
dword |= 1 << 2; /* SyncFloodOnDramUcEcc=1 */
pci_write_config32(NODE_PCI(nodeid, 3), 0x44, dword);
if (is_fam15h()) {
uint32_t core_activation_flags = 0;
uint32_t active_cores = 0;
/* Set PCI_DEV(0, 0x18+nodeid, 0), 0x1dc bits 7:1 to start cores */
dword = pci_read_config32(NODE_PCI(nodeid, 0), 0x1dc);
for (i = 1; i < cores + 1; i++) {
core_activation_flags |= 1 << i;
}
/* Start the first core of each compute unit */
active_cores |= core_activation_flags & 0x55;
pci_write_config32(NODE_PCI(nodeid, 0), 0x1dc, dword | active_cores);
/* Each core shares a single set of MTRR registers with
* another core in the same compute unit, therefore, it
* is important that one core in each CU starts in advance
* of the other in order to avoid one core stomping all over
* the other core's settings.
*/
/* Wait for the first core of each compute unit to start... */
uint32_t timeout;
for (i = 1; i < cores + 1; i++) {
if (!(i & 0x1)) {
uint32_t ap_apicid = get_boot_apic_id(nodeid, i);
timeout = wait_cpu_state(ap_apicid, F10_APSTATE_ASLEEP, F10_APSTATE_ASLEEP);
}
}
/* Start the second core of each compute unit */
active_cores |= core_activation_flags & 0xaa;
pci_write_config32(NODE_PCI(nodeid, 0), 0x1dc, dword | active_cores);
} else {
// set PCI_DEV(0, 0x18+nodeid, 0), 0x68 bit 5 to start core1
dword = pci_read_config32(NODE_PCI(nodeid, 0), 0x68);
dword |= 1 << 5;
@@ -70,6 +135,7 @@ static void real_start_other_core(u32 nodeid, u32 cores)
pci_write_config32(NODE_PCI(nodeid, 0), 0x168, dword);
}
}
}
//it is running on core0 of node0
static void start_other_cores(void)
@@ -87,10 +153,9 @@ static void start_other_cores(void)
for (nodeid = 0; nodeid < nodes; nodeid++) {
u32 cores = get_core_num_in_bsp(nodeid);
printk(BIOS_DEBUG, "init node: %02x cores: %02x \n", nodeid, cores);
printk(BIOS_DEBUG, "init node: %02x cores: %02x pass 1 \n", nodeid, cores);
if (cores > 0) {
real_start_other_core(nodeid, cores);
}
}
}

View File

@@ -39,9 +39,12 @@ struct node_core_id get_node_core_id(u32 nb_cfg_54)
{
struct node_core_id id;
uint8_t apicid;
uint8_t fam15h = 0;
uint8_t rev_gte_d = 0;
uint8_t dual_node = 0;
uint32_t f3xe8;
uint32_t family;
uint32_t model;
#ifdef __PRE_RAM__
f3xe8 = pci_read_config32(NODE_PCI(0, 3), 0xe8);
@@ -49,7 +52,17 @@ struct node_core_id get_node_core_id(u32 nb_cfg_54)
f3xe8 = pci_read_config32(get_node_pci(0, 3), 0xe8);
#endif
if (cpuid_eax(0x80000001) >= 0x8)
family = model = cpuid_eax(0x80000001);
model = ((model & 0xf0000) >> 12) | ((model & 0xf0) >> 4);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
if (family >= 0x6f) {
/* Family 15h or later */
fam15h = 1;
nb_cfg_54 = 1;
}
if ((model >= 0x8) || fam15h)
/* Revision D or later */
rev_gte_d = 1;
@@ -63,7 +76,13 @@ struct node_core_id get_node_core_id(u32 nb_cfg_54)
*/
apicid = (cpuid_ebx(1) >> 24) & 0xff;
if( nb_cfg_54) {
if (rev_gte_d && dual_node) {
if (fam15h && dual_node) {
id.coreid = apicid & 0x1f;
id.nodeid = (apicid & 0x60) >> 5;
} else if (fam15h && !dual_node) {
id.coreid = apicid & 0xf;
id.nodeid = (apicid & 0x70) >> 4;
} else if (rev_gte_d && dual_node) {
id.coreid = apicid & 0xf;
id.nodeid = (apicid & 0x30) >> 4;
} else if (rev_gte_d && !dual_node) {
@@ -86,7 +105,25 @@ struct node_core_id get_node_core_id(u32 nb_cfg_54)
}
}
if (rev_gte_d && dual_node) {
if (fam15h && dual_node) {
/* Coreboot expects each separate processor die to be on a different nodeid.
* Since the code above returns nodeid 0 even on internal node 1 some fixup is needed...
*/
uint32_t f5x84;
uint8_t core_count;
#ifdef __PRE_RAM__
f5x84 = pci_read_config32(NODE_PCI(0, 5), 0x84);
#else
f5x84 = pci_read_config32(get_node_pci(0, 5), 0x84);
#endif
core_count = (f5x84 & 0xff) + 1;
id.nodeid = id.nodeid * 2;
if (id.coreid >= core_count) {
id.nodeid += 1;
id.coreid = id.coreid - core_count;
}
} else if (rev_gte_d && dual_node) {
/* Coreboot expects each separate processor die to be on a different nodeid.
* Since the code above returns nodeid 0 even on internal node 1 some fixup is needed...
*/

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,7 +29,13 @@
#define IC_CFG_MSR 0xC0011021
#define DC_CFG_MSR 0xC0011022
#define BU_CFG_MSR 0xC0011023
#define FP_CFG_MSR 0xC0011028
#define DE_CFG_MSR 0xC0011029
#define BU_CFG2_MSR 0xC001102A
#define BU_CFG3_MSR 0xC001102B
#define EX_CFG_MSR 0xC001102C
#define LS_CFG2_MSR 0xC001102D
#define IBS_OP_DATA3_MSR 0xC0011037
#define CPU_ID_FEATURES_MSR 0xC0011004
#define CPU_ID_HYPER_EXT_FEATURES 0xC001100d

View File

@@ -127,7 +127,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -119,7 +119,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -121,7 +121,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -227,7 +227,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -121,7 +121,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -241,7 +241,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -358,7 +358,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -123,7 +123,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -123,7 +123,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -124,7 +124,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -128,7 +128,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -118,7 +118,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -118,7 +118,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -121,7 +121,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -133,7 +133,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -121,7 +121,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -126,7 +126,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -146,7 +146,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -142,7 +142,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -210,7 +210,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -132,7 +132,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
/* TODO: The Kernel must support 12 processor, otherwise the interrupt

View File

@@ -145,7 +145,7 @@ void cache_as_ram_main(unsigned long bist, unsigned long cpu_init_detectedx)
post_code(0x33);
cpuSetAMDMSR();
cpuSetAMDMSR(0);
post_code(0x34);
amd_ht_init(sysinfo);

View File

@@ -95,7 +95,7 @@ endif
config S3_DATA_SIZE
int
default 16384
default 32768
depends on (HAVE_ACPI_RESUME)
config S3_DATA_POS

View File

@@ -2,6 +2,8 @@ ifeq ($(CONFIG_NORTHBRIDGE_AMD_AMDFAM10),y)
ramstage-y += northbridge.c
ramstage-y += misc_control.c
ramstage-y += link_control.c
ramstage-y += nb_control.c
romstage-y += amdfam10_util.c
ramstage-y += amdfam10_util.c

View File

@@ -958,9 +958,12 @@ that are corresponding to 0x01, 0x02, 0x03, 0x05, 0x06, 0x07
#define LAPIC_MSG_REG 0x380
#define F10_APSTATE_STARTED 0x13 // start of AP execution
#define F10_APSTATE_STOPPED 0x14 // allow AP to stop
#define F10_APSTATE_ASLEEP 0x14 // AP sleeping
#define F10_APSTATE_STOPPED 0x15 // allow AP to stop
#define F10_APSTATE_RESET 0x01 // waiting for warm reset
#define MAX_CORES_SUPPORTED 128
#include "nums.h"
#ifdef __PRE_RAM__
@@ -1034,7 +1037,6 @@ struct sys_info {
struct MCTStatStruc MCTstat;
struct DCTStatStruc DCTstatA[NODE_NUMS];
} __attribute__((packed));
#ifdef __PRE_RAM__

View File

@@ -30,14 +30,14 @@ u32 Get_NB32(u32 dev, u32 reg)
}
#endif
u32 mctGetLogicalCPUID(u32 Node)
uint64_t mctGetLogicalCPUID(u32 Node)
{
/* Converts the CPUID to a logical ID MASK that is used to check
CPU version support versions */
u32 dev;
u32 val, valx;
u32 family, model, stepping;
u32 ret;
uint64_t ret;
if (Node == 0xFF) { /* current node */
val = cpuid_eax(0x80000001);
@@ -96,9 +96,16 @@ u32 mctGetLogicalCPUID(u32 Node)
case 0x100a0:
ret = AMD_PH_E0;
break;
case 0x15012:
case 0x1501f:
ret = AMD_OR_B2;
break;
case 0x15020:
ret = AMD_OR_C0;
break;
default:
/* FIXME: mabe we should die() here. */
printk(BIOS_ERR, "FIXME! CPU Version unknown or not supported! \n");
printk(BIOS_ERR, "FIXME! CPU Version unknown or not supported! %08x\n", valx);
ret = 0;
}

View File

@@ -0,0 +1,86 @@
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* Configure various power control registers, including processor
* boost support.
*/
#include <console/console.h>
#include <device/device.h>
#include <device/pci.h>
#include <device/pci_ids.h>
#include <device/pci_ops.h>
#include <pc80/mc146818rtc.h>
#include <lib.h>
#include <cpu/amd/model_10xxx_rev.h>
#include "amdfam10.h"
static inline uint8_t is_fam15h(void)
{
uint8_t fam15h = 0;
uint32_t family;
family = cpuid_eax(0x80000001);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
if (family >= 0x6f)
/* Family 15h or later */
fam15h = 1;
return fam15h;
}
static void nb_control_init(struct device *dev)
{
uint32_t dword;
printk(BIOS_DEBUG, "NB: Function 4 Link Control.. ");
if (is_fam15h()) {
/* Enable APM */
dword = pci_read_config32(dev, 0x15c);
dword |= (0x1 << 7); /* ApmMasterEn = 1 */
pci_write_config32(dev, 0x15c, dword);
}
printk(BIOS_DEBUG, "done.\n");
}
static struct device_operations mcf4_ops = {
.read_resources = pci_dev_read_resources,
.set_resources = pci_dev_set_resources,
.enable_resources = pci_dev_enable_resources,
.init = nb_control_init,
.scan_bus = 0,
.ops_pci = 0,
};
static const struct pci_driver mcf4_driver_fam10 __pci_driver = {
.ops = &mcf4_ops,
.vendor = PCI_VENDOR_ID_AMD,
.device = 0x1204,
};
static const struct pci_driver mcf4_driver_fam15 __pci_driver = {
.ops = &mcf4_ops,
.vendor = PCI_VENDOR_ID_AMD,
.device = 0x1604,
};

View File

@@ -4,6 +4,7 @@
* Copyright (C) 2003 by Eric Biederman
* Copyright (C) Stefan Reinauer
* Copyright (C) 2007 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -148,3 +149,9 @@ static const struct pci_driver mcf3_driver __pci_driver = {
.vendor = PCI_VENDOR_ID_AMD,
.device = 0x1203,
};
static const struct pci_driver mcf3_driver_fam15 __pci_driver = {
.ops = &mcf3_ops,
.vendor = PCI_VENDOR_ID_AMD,
.device = 0x1603,
};

View File

@@ -0,0 +1,85 @@
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* Configure various power control registers, including processor boost
* and TDP monitoring support.
*/
#include <console/console.h>
#include <device/device.h>
#include <device/pci.h>
#include <device/pci_ids.h>
#include <device/pci_ops.h>
#include <pc80/mc146818rtc.h>
#include <lib.h>
#include <cpu/amd/model_10xxx_rev.h>
#include "amdfam10.h"
static void nb_control_init(struct device *dev)
{
uint32_t dword;
uint32_t f5x80;
uint8_t cu_enabled;
uint8_t compute_unit_count = 0;
printk(BIOS_DEBUG, "NB: Function 5 Northbridge Control.. ");
/* Determine the number of active compute units on this node */
f5x80 = pci_read_config32(dev, 0x80);
cu_enabled = f5x80 & 0xf;
if (cu_enabled == 0x1)
compute_unit_count = 1;
if (cu_enabled == 0x3)
compute_unit_count = 2;
if (cu_enabled == 0x7)
compute_unit_count = 3;
if (cu_enabled == 0xf)
compute_unit_count = 4;
/* Configure Processor TDP Running Average */
dword = pci_read_config32(dev, 0xe0);
dword &= ~0xf; /* RunAvgRange = 0x9 */
dword |= 0x9;
pci_write_config32(dev, 0xe0, dword);
/* Configure northbridge P-states */
dword = pci_read_config32(dev, 0xe0);
dword &= ~(0x7 << 9); /* NbPstateThreshold = compute_unit_count */
dword |= (compute_unit_count & 0x7) << 9;
pci_write_config32(dev, 0xe0, dword);
printk(BIOS_DEBUG, "done.\n");
}
static struct device_operations mcf5_ops = {
.read_resources = pci_dev_read_resources,
.set_resources = pci_dev_set_resources,
.enable_resources = pci_dev_enable_resources,
.init = nb_control_init,
.scan_bus = 0,
.ops_pci = 0,
};
static const struct pci_driver mcf5_driver_fam15 __pci_driver = {
.ops = &mcf5_ops,
.vendor = PCI_VENDOR_ID_AMD,
.device = 0x1605,
};

View File

@@ -77,6 +77,21 @@ device_t get_node_pci(u32 nodeid, u32 fn)
#endif
}
static inline uint8_t is_fam15h(void)
{
uint8_t fam15h = 0;
uint32_t family;
family = cpuid_eax(0x80000001);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
if (family >= 0x6f)
/* Family 15h or later */
fam15h = 1;
return fam15h;
}
static void get_fx_devs(void)
{
int i;
@@ -198,7 +213,7 @@ static void amd_g34_fixup(struct bus *link, device_t dev)
/* Revision D or later */
rev_gte_d = 1;
if (rev_gte_d) {
if (rev_gte_d || is_fam15h()) {
f3xe8 = pci_read_config32(get_node_pci(0, 3), 0xe8);
/* Check for dual node capability */
@@ -211,6 +226,15 @@ static void amd_g34_fixup(struct bus *link, device_t dev)
*/
f3xe8 = pci_read_config32(get_node_pci(nodeid, 3), 0xe8);
uint8_t internal_node_number = ((f3xe8 & 0xc0000000) >> 30);
uint8_t defective_link_number_1;
uint8_t defective_link_number_2;
if (is_fam15h()) {
defective_link_number_1 = 4; /* Link 0 Sublink 1 */
defective_link_number_2 = 7; /* Link 3 Sublink 1 */
} else {
defective_link_number_1 = 6; /* Link 2 Sublink 1 */
defective_link_number_2 = 5; /* Link 1 Sublink 1 */
}
if (internal_node_number == 0) {
/* Node 0 */
if (link->link_num == 6) /* Link 2 Sublink 1 */
@@ -310,6 +334,46 @@ static void amdfam10_scan_chains(device_t dev)
{
struct bus *link;
#if CONFIG_CPU_AMD_SOCKET_G34_NON_AGESA
if (is_fam15h()) {
uint8_t current_link_number = 0;
for (link = dev->link_list; link; link = link->next) {
/* The following links have changed position in Fam15h G34 processors:
* Fam10 Fam15
* Node 0
* L3 --> L1
* L0 --> L3
* L1 --> L2
* L2 --> L0
* Node 1
* L0 --> L0
* L1 --> L3
* L2 --> L1
* L3 --> L2
*/
if (link->link_num == 0)
link->link_num = 3;
else if (link->link_num == 1)
link->link_num = 2;
else if (link->link_num == 2)
link->link_num = 0;
else if (link->link_num == 3)
link->link_num = 1;
else if (link->link_num == 5)
link->link_num = 7;
else if (link->link_num == 6)
link->link_num = 5;
else if (link->link_num == 7)
link->link_num = 6;
current_link_number++;
if (current_link_number > 3)
current_link_number = 0;
}
}
#endif
/* Do sb ht chain at first, in case s2885 put sb chain (8131/8111) on link2, but put 8151 on link0 */
trim_ht_chain(dev);
@@ -616,13 +680,21 @@ static const struct pci_driver mcf0_driver __pci_driver = {
.device = 0x1200,
};
static void amdfam10_nb_init(void *chip_info)
{
relocate_sb_ht_chain();
}
static const struct pci_driver mcf0_driver_fam15 __pci_driver = {
.ops = &northbridge_operations,
.vendor = PCI_VENDOR_ID_AMD,
.device = 0x1600,
};
struct chip_operations northbridge_amd_amdfam10_ops = {
CHIP_NAME("AMD FAM10 Northbridge")
CHIP_NAME("AMD Family 10h/15h Northbridge")
.enable_dev = 0,
.init = amdfam10_nb_init,
};
@@ -946,6 +1018,28 @@ static int amdfam10_get_smbios_data16(int* count, int handle, unsigned long *cur
static uint16_t amdmct_mct_speed_enum_to_mhz(uint8_t speed)
{
if (is_fam15h()) {
if (IS_ENABLED(CONFIG_DIMM_DDR3)) {
switch (speed) {
case 0x4:
return 333;
case 0x6:
return 400;
case 0xa:
return 533;
case 0xe:
return 667;
case 0x12:
return 800;
case 0x16:
return 933;
default:
return 0;
}
} else {
return 0;
}
} else {
if (IS_ENABLED(CONFIG_DIMM_DDR2)) {
switch (speed) {
case 1:
@@ -980,6 +1074,7 @@ static uint16_t amdmct_mct_speed_enum_to_mhz(uint8_t speed)
return 0;
}
}
}
static int amdfam10_get_smbios_data17(int* count, int handle, int parent_handle, unsigned long *current)
{
@@ -1072,6 +1167,8 @@ static int amdfam10_get_smbios_data17(int* count, int handle, int parent_handle,
#if IS_ENABLED(CONFIG_DIMM_DDR3)
/* Find the maximum and minimum supported voltages */
uint8_t supported_voltages = mem_info->dct_stat[node].DimmSupportedVoltages[slot];
uint8_t configured_voltage = mem_info->dct_stat[node].DimmConfiguredVoltage[slot];
if (supported_voltages & 0x8)
t->minimum_voltage = 1150;
else if (supported_voltages & 0x4)
@@ -1090,7 +1187,14 @@ static int amdfam10_get_smbios_data17(int* count, int handle, int parent_handle,
else if (supported_voltages & 0x8)
t->maximum_voltage = 1150;
t->configured_voltage = mem_info->dct_stat[node].DimmConfiguredVoltage[slot];
if (configured_voltage & 0x8)
t->configured_voltage = 1150;
else if (configured_voltage & 0x4)
t->configured_voltage = 1250;
else if (configured_voltage & 0x2)
t->configured_voltage = 1350;
else if (configured_voltage & 0x1)
t->configured_voltage = 1500;
#endif
}
t->memory_error_information_handle = 0xFFFE; /* no error information handle available */
@@ -1229,12 +1333,14 @@ static void cpu_bus_scan(device_t dev)
#if CONFIG_CBB
device_t pci_domain;
#endif
int nvram = 0;
int i,j;
int nodes;
unsigned nb_cfg_54;
unsigned siblings;
int cores_found;
int disable_siblings;
uint8_t disable_cu_siblings = 0;
unsigned ApicIdCoreIdSize;
nb_cfg_54 = 0;
@@ -1321,14 +1427,23 @@ static void cpu_bus_scan(device_t dev)
/* Always use the devicetree node with lapic_id 0 for BSP. */
remap_bsp_lapic(cpu_bus);
if (get_option(&nvram, "compute_unit_siblings") == CB_SUCCESS)
disable_cu_siblings = !!nvram;
if (disable_cu_siblings)
printk(BIOS_DEBUG, "Disabling siblings on each compute unit as requested\n");
for(i = 0; i < nodes; i++) {
device_t cdb_dev;
unsigned busn, devn;
struct bus *pbus;
uint8_t fam15h = 0;
uint8_t rev_gte_d = 0;
uint8_t dual_node = 0;
uint32_t f3xe8;
uint32_t family;
uint32_t model;
busn = CONFIG_CBB;
devn = CONFIG_CDB+i;
@@ -1368,7 +1483,16 @@ static void cpu_bus_scan(device_t dev)
f3xe8 = pci_read_config32(get_node_pci(0, 3), 0xe8);
if (cpuid_eax(0x80000001) >= 0x8)
family = model = cpuid_eax(0x80000001);
model = ((model & 0xf0000) >> 12) | ((model & 0xf0) >> 4);
if (is_fam15h()) {
/* Family 15h or later */
fam15h = 1;
nb_cfg_54 = 1;
}
if ((model >= 0x8) || fam15h)
/* Revision D or later */
rev_gte_d = 1;
@@ -1378,13 +1502,20 @@ static void cpu_bus_scan(device_t dev)
dual_node = 1;
cores_found = 0; // one core
if (fam15h)
cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 5));
else
cdb_dev = dev_find_slot(busn, PCI_DEVFN(devn, 3));
int enable_node = cdb_dev && cdb_dev->enabled;
if (enable_node) {
if (fam15h) {
cores_found = pci_read_config32(cdb_dev, 0x84) & 0xff;
} else {
j = pci_read_config32(cdb_dev, 0xe8);
cores_found = (j >> 12) & 3; // dev is func 3
if (siblings > 3)
cores_found |= (j >> 13) & 4;
}
printk(BIOS_DEBUG, " %s siblings=%d\n", dev_path(cdb_dev), cores_found);
}
@@ -1404,6 +1535,10 @@ static void cpu_bus_scan(device_t dev)
if (dual_node) {
apic_id = 0;
if (fam15h) {
apic_id |= ((i >> 1) & 0x3) << 5; /* Node ID */
apic_id |= ((i & 0x1) * (siblings + 1)) + j; /* Core ID */
} else {
if (nb_cfg_54) {
apic_id |= ((i >> 1) & 0x3) << 4; /* Node ID */
apic_id |= ((i & 0x1) * (siblings + 1)) + j; /* Core ID */
@@ -1411,9 +1546,14 @@ static void cpu_bus_scan(device_t dev)
apic_id |= i & 0x3; /* Node ID */
apic_id |= (((i & 0x1) * (siblings + 1)) + j) << 4; /* Core ID */
}
}
} else {
if (fam15h) {
apic_id = (i * (siblings + 1)) + j;
} else {
apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:64); // ?
}
}
#if CONFIG_ENABLE_APIC_EXT_ID && (CONFIG_APIC_ID_OFFSET>0)
if(sysconf.enabled_apic_ext_id) {
@@ -1422,6 +1562,9 @@ static void cpu_bus_scan(device_t dev)
}
}
#endif
if (disable_cu_siblings && (j & 0x1))
continue;
device_t cpu = add_cpu_device(cpu_bus, apic_id, enable_node);
if (cpu)
amd_cpu_topology(cpu, i, j);
@@ -1480,6 +1623,6 @@ static void root_complex_enable_dev(struct device *dev)
}
struct chip_operations northbridge_amd_amdfam10_root_complex_ops = {
CHIP_NAME("AMD FAM10 Root Complex")
CHIP_NAME("AMD Family 10h/15h Root Complex")
.enable_dev = root_complex_enable_dev,
};

View File

@@ -38,8 +38,120 @@ static void print_tf(const char *func, const char *strval)
#endif
}
static uint16_t mct_MaxLoadFreq(uint8_t count, uint8_t registered, uint16_t freq)
static inline void fam15h_switch_dct(uint32_t dev, uint8_t dct)
{
uint32_t dword;
dword = Get_NB32(dev, 0x10c);
dword &= ~0x1;
dword |= (dct & 0x1);
Set_NB32(dev, 0x10c, dword);
}
static inline void fam15h_switch_nb_pstate_config_reg(uint32_t dev, uint8_t nb_pstate)
{
uint32_t dword;
dword = Get_NB32(dev, 0x10c);
dword &= ~(0x3 << 4);
dword |= (nb_pstate & 0x3) << 4;
Set_NB32(dev, 0x10c, dword);
}
static inline uint32_t Get_NB32_DCT(uint32_t dev, uint8_t dct, uint32_t reg)
{
if (is_fam15h()) {
/* Obtain address of function 0x1 */
uint32_t dev_map = (dev & (~(0x7 << 12))) | (0x1 << 12);
fam15h_switch_dct(dev_map, dct);
return Get_NB32(dev, reg);
} else {
return Get_NB32(dev, (0x100 * dct) + reg);
}
}
static inline void Set_NB32_DCT(uint32_t dev, uint8_t dct, uint32_t reg, uint32_t val)
{
if (is_fam15h()) {
/* Obtain address of function 0x1 */
uint32_t dev_map = (dev & (~(0x7 << 12))) | (0x1 << 12);
fam15h_switch_dct(dev_map, dct);
Set_NB32(dev, reg, val);
} else {
Set_NB32(dev, (0x100 * dct) + reg, val);
}
}
static inline uint32_t Get_NB32_DCT_NBPstate(uint32_t dev, uint8_t dct, uint8_t nb_pstate, uint32_t reg)
{
if (is_fam15h()) {
/* Obtain address of function 0x1 */
uint32_t dev_map = (dev & (~(0x7 << 12))) | (0x1 << 12);
fam15h_switch_dct(dev_map, dct);
fam15h_switch_nb_pstate_config_reg(dev_map, nb_pstate);
return Get_NB32(dev, reg);
} else {
return Get_NB32(dev, (0x100 * dct) + reg);
}
}
static inline void Set_NB32_DCT_NBPstate(uint32_t dev, uint8_t dct, uint8_t nb_pstate, uint32_t reg, uint32_t val)
{
if (is_fam15h()) {
/* Obtain address of function 0x1 */
uint32_t dev_map = (dev & (~(0x7 << 12))) | (0x1 << 12);
fam15h_switch_dct(dev_map, dct);
fam15h_switch_nb_pstate_config_reg(dev_map, nb_pstate);
Set_NB32(dev, reg, val);
} else {
Set_NB32(dev, (0x100 * dct) + reg, val);
}
}
static inline uint32_t Get_NB32_index_wait_DCT(uint32_t dev, uint8_t dct, uint32_t index_reg, uint32_t index)
{
if (is_fam15h()) {
/* Obtain address of function 0x1 */
uint32_t dev_map = (dev & (~(0x7 << 12))) | (0x1 << 12);
fam15h_switch_dct(dev_map, dct);
return Get_NB32_index_wait(dev, index_reg, index);
} else {
return Get_NB32_index_wait(dev, (0x100 * dct) + index_reg, index);
}
}
static inline void Set_NB32_index_wait_DCT(uint32_t dev, uint8_t dct, uint32_t index_reg, uint32_t index, uint32_t data)
{
if (is_fam15h()) {
/* Obtain address of function 0x1 */
uint32_t dev_map = (dev & (~(0x7 << 12))) | (0x1 << 12);
fam15h_switch_dct(dev_map, dct);
Set_NB32_index_wait(dev, index_reg, index, data);
} else {
Set_NB32_index_wait(dev, (0x100 * dct) + index_reg, index, data);
}
}
static uint16_t voltage_index_to_mv(uint8_t index)
{
if (index & 0x8)
return 1150;
if (index & 0x4)
return 1250;
else if (index & 0x2)
return 1350;
else
return 1500;
}
static uint16_t mct_MaxLoadFreq(uint8_t count, uint8_t highest_rank_count, uint8_t registered, uint8_t voltage, uint16_t freq)
{
/* FIXME
* Mainboards need to be able to specify the maximum number of DIMMs installable per channel
* For now assume a maximum of 2 DIMMs per channel can be installed
*/
uint8_t MaxDimmsInstallable = 2;
/* Return limited maximum RAM frequency */
if (IS_ENABLED(CONFIG_DIMM_DDR2)) {
if (IS_ENABLED(CONFIG_DIMM_REGISTERED) && registered) {
@@ -62,25 +174,168 @@ static uint16_t mct_MaxLoadFreq(uint8_t count, uint8_t registered, uint16_t freq
}
}
} else if (IS_ENABLED(CONFIG_DIMM_DDR3)) {
if (voltage == 0) {
printk(BIOS_DEBUG, "%s: WARNING: Mainboard DDR3 voltage unknown, assuming 1.5V!\n", __func__);
voltage = 0x1;
}
if (is_fam15h()) {
if (IS_ENABLED(CONFIG_DIMM_REGISTERED) && registered) {
/* Fam15h BKDG Rev. 3.14 Table 27 */
if (voltage & 0x4) {
/* 1.25V */
if (count > 1) {
if (highest_rank_count > 1) {
/* Limit to DDR3-1066 */
if (freq > 533) {
freq = 533;
printk(BIOS_DEBUG, "%s: More than 1 registered DIMM on %dmV channel; limiting to DDR3-1066\n", __func__, voltage_index_to_mv(voltage));
}
} else {
/* Limit to DDR3-1333 */
if (freq > 666) {
freq = 666;
printk(BIOS_DEBUG, "%s: More than 1 registered DIMM on %dmV channel; limiting to DDR3-1333\n", __func__, voltage_index_to_mv(voltage));
}
}
} else {
/* Limit to DDR3-1333 */
if (freq > 666) {
freq = 666;
printk(BIOS_DEBUG, "%s: 1 registered DIMM on %dmV channel; limiting to DDR3-1333\n", __func__, voltage_index_to_mv(voltage));
}
}
} else if (voltage & 0x2) {
/* 1.35V */
if (count > 1) {
/* Limit to DDR3-1333 */
if (freq > 666) {
freq = 666;
printk(BIOS_DEBUG, "%s: More than 1 registered DIMM on %dmV channel; limiting to DDR3-1333\n", __func__, voltage_index_to_mv(voltage));
}
} else {
/* Limit to DDR3-1600 */
if (freq > 800) {
freq = 800;
printk(BIOS_DEBUG, "%s: 1 registered DIMM on %dmV channel; limiting to DDR3-1600\n", __func__, voltage_index_to_mv(voltage));
}
}
} else if (voltage & 0x1) {
/* 1.50V */
if (count > 1) {
/* Limit to DDR3-1600 */
if (freq > 800) {
freq = 800;
printk(BIOS_DEBUG, "%s: More than 1 registered DIMM on %dmV channel; limiting to DDR3-1600\n", __func__, voltage_index_to_mv(voltage));
}
} else {
/* Limit to DDR3-1866 */
if (freq > 933) {
freq = 933;
printk(BIOS_DEBUG, "%s: 1 registered DIMM on %dmV channel; limiting to DDR3-1866\n", __func__, voltage_index_to_mv(voltage));
}
}
}
} else {
/* Fam15h BKDG Rev. 3.14 Table 26 */
if (voltage & 0x4) {
/* 1.25V */
if (count > 1) {
if (highest_rank_count > 1) {
/* Limit to DDR3-1066 */
if (freq > 533) {
freq = 533;
printk(BIOS_DEBUG, "%s: More than 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1066\n", __func__, voltage_index_to_mv(voltage));
}
} else {
/* Limit to DDR3-1333 */
if (freq > 666) {
freq = 666;
printk(BIOS_DEBUG, "%s: More than 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1333\n", __func__, voltage_index_to_mv(voltage));
}
}
} else {
/* Limit to DDR3-1333 */
if (freq > 666) {
freq = 666;
printk(BIOS_DEBUG, "%s: 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1333\n", __func__, voltage_index_to_mv(voltage));
}
}
} else if (voltage & 0x2) {
/* 1.35V */
if (MaxDimmsInstallable > 1) {
/* Limit to DDR3-1333 */
if (freq > 666) {
freq = 666;
printk(BIOS_DEBUG, "%s: More than 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1333\n", __func__, voltage_index_to_mv(voltage));
}
} else {
/* Limit to DDR3-1600 */
if (freq > 800) {
freq = 800;
printk(BIOS_DEBUG, "%s: 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1600\n", __func__, voltage_index_to_mv(voltage));
}
}
} else if (voltage & 0x1) {
if (MaxDimmsInstallable == 1) {
if (count > 1) {
/* Limit to DDR3-1600 */
if (freq > 800) {
freq = 800;
printk(BIOS_DEBUG, "%s: More than 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1600\n", __func__, voltage_index_to_mv(voltage));
}
} else {
/* Limit to DDR3-1866 */
if (freq > 933) {
freq = 933;
printk(BIOS_DEBUG, "%s: 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1866\n", __func__, voltage_index_to_mv(voltage));
}
}
} else {
if (count > 1) {
if (highest_rank_count > 1) {
/* Limit to DDR3-1333 */
if (freq > 666) {
freq = 666;
printk(BIOS_DEBUG, "%s: More than 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1333\n", __func__, voltage_index_to_mv(voltage));
}
} else {
/* Limit to DDR3-1600 */
if (freq > 800) {
freq = 800;
printk(BIOS_DEBUG, "%s: More than 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1600\n", __func__, voltage_index_to_mv(voltage));
}
}
} else {
/* Limit to DDR3-1600 */
if (freq > 800) {
freq = 800;
printk(BIOS_DEBUG, "%s: 1 unbuffered DIMM on %dmV channel; limiting to DDR3-1600\n", __func__, voltage_index_to_mv(voltage));
}
}
}
}
}
} else {
if (IS_ENABLED(CONFIG_DIMM_REGISTERED) && registered) {
/* K10 BKDG Rev. 3.62 Table 34 */
if (count > 2) {
/* Limit to DDR3-800 */
if (freq > 400) {
freq = 400;
print_tf(__func__, ": More than 2 registered DIMMs on channel; limiting to DDR3-800\n");
printk(BIOS_DEBUG, "%s: More than 2 registered DIMMs on %dmV channel; limiting to DDR3-800\n", __func__, voltage_index_to_mv(voltage));
}
} else if (count == 2) {
/* Limit to DDR3-1066 */
if (freq > 533) {
freq = 533;
print_tf(__func__, ": 2 registered DIMMs on channel; limiting to DDR3-1066\n");
printk(BIOS_DEBUG, "%s: 2 registered DIMMs on %dmV channel; limiting to DDR3-1066\n", __func__, voltage_index_to_mv(voltage));
}
} else {
/* Limit to DDR3-1333 */
if (freq > 666) {
freq = 666;
print_tf(__func__, ": 1 registered DIMM on channel; limiting to DDR3-1333\n");
printk(BIOS_DEBUG, "%s: 1 registered DIMM on %dmV channel; limiting to DDR3-1333\n", __func__, voltage_index_to_mv(voltage));
}
}
} else {
@@ -88,7 +343,8 @@ static uint16_t mct_MaxLoadFreq(uint8_t count, uint8_t registered, uint16_t freq
/* Limit to DDR3-1333 */
if (freq > 666) {
freq = 666;
print_tf(__func__, ": unbuffered DIMMs on channel; limiting to DDR3-1333\n");
printk(BIOS_DEBUG, "%s: unbuffered DIMMs on %dmV channel; limiting to DDR3-1333\n", __func__, voltage_index_to_mv(voltage));
}
}
}
}
@@ -219,11 +475,13 @@ void mctGet_DIMMAddr(struct DCTStatStruc *pDCTstat, u32 node)
}
#if IS_ENABLED(CONFIG_SET_FIDVID)
static u8 mctGetProcessorPackageType(void) {
/* FIXME: I guess this belongs wherever mctGetLogicalCPUID ends up ? */
u32 BrandId = cpuid_ebx(0x80000001);
return (u8)((BrandId >> 28) & 0x0F);
}
#endif
static void raminit_amdmct(struct sys_info *sysinfo)
{

View File

@@ -39,6 +39,7 @@
#define CPU_HTNB_FUNC_04 4
#define CPU_ADDR_FUNC_01 1
#define CPU_NB_FUNC_03 3
#define CPU_NB_FUNC_05 5
/* Function 0 registers */
#define REG_ROUTE0_0X40 0x40
@@ -66,6 +67,7 @@
#define REG_NB_CPUID_3XFC 0xFC
#define REG_NB_LINK_XCS_TOKEN0_3X148 0x148
#define REG_NB_DOWNCORE_3X190 0x190
#define REG_NB_CAPABILITY_5X84 0x84
/* Function 4 registers */
@@ -551,9 +553,10 @@ static u8 fam10GetNumCoresOnNode(u8 node, cNorthBridge *nb)
15, 12, &temp);
/* bits[15,13,12] specify the cores */
/* Support Downcoring */
temp = ((temp & 8) >> 1) + (temp & 3);
cores = temp + 1;
/* Support Downcoring */
AmdPCIReadBits (MAKE_SBDFO(makePCISegmentFromNode(node),
makePCIBusFromNode(node),
makePCIDeviceFromNode(node),
@@ -570,6 +573,56 @@ static u8 fam10GetNumCoresOnNode(u8 node, cNorthBridge *nb)
return (u8)(temp+1);
}
/***************************************************************************//**
*
* static u8
* fam15GetNumCoresOnNode(u8 node, cNorthBridge *nb)
*
* Description:
* Return the number of cores (1 based count) on node.
*
* Parameters:
* @param[in] node = the node that will be examined
* @param[in] *nb = this northbridge
* @return = the number of cores
*
*
*/
static u8 fam15GetNumCoresOnNode(u8 node, cNorthBridge *nb)
{
u32 temp, leveling, cores;
u8 i;
ASSERT((node < nb->maxNodes));
/* Read CmpCap [7:0] */
AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
makePCIBusFromNode(node),
makePCIDeviceFromNode(node),
CPU_NB_FUNC_05,
REG_NB_CAPABILITY_5X84),
7, 0, &temp);
/* bits[7:0] specify the cores */
temp = temp & 0xff;
cores = temp + 1;
/* Support Downcoring */
AmdPCIReadBits (MAKE_SBDFO(makePCISegmentFromNode(node),
makePCIBusFromNode(node),
makePCIDeviceFromNode(node),
CPU_NB_FUNC_03,
REG_NB_DOWNCORE_3X190),
31, 0, &leveling);
for (i=0; i<cores; i++)
{
if (leveling & ((u32) 1 << i))
{
temp--;
}
}
return (u8)(temp+1);
}
/***************************************************************************//**
*
* static void
@@ -848,6 +901,69 @@ static BOOL fam10IsCapable(u8 node, sMainData *pDat, cNorthBridge *nb)
#endif
}
/***************************************************************************//**
*
* static BOOL
* fam15IsCapable(u8 node, sMainData *pDat, cNorthBridge *nb)
*
* Description:
* Get node capability and update the minimum supported system capability.
* Return whether the current configuration exceeds the capability.
*
* Parameters:
* @param[in] node = the node
* @param[in,out] *pDat = sysMpCap (updated) and NodesDiscovered
* @param[in] *nb = this northbridge
* @return true: system is capable of current config.
* false: system is not capable of current config.
*
* ---------------------------------------------------------------------------------------
*/
static BOOL fam15IsCapable(u8 node, sMainData *pDat, cNorthBridge *nb)
{
#ifndef HT_BUILD_NC_ONLY
u32 temp;
u8 maxNodes;
ASSERT(node < nb->maxNodes);
AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
makePCIBusFromNode(node),
makePCIDeviceFromNode(node),
CPU_NB_FUNC_03,
REG_NB_CAPABILITY_3XE8),
18, 16, &temp);
if (temp != 0)
{
maxNodes = (1 << (~temp & 0x3)); /* That is, 1, 2, 4, or 8 */
}
else
{
/* Check if CPU package is dual node */
AmdPCIReadBits(MAKE_SBDFO(makePCISegmentFromNode(node),
makePCIBusFromNode(node),
makePCIDeviceFromNode(node),
CPU_NB_FUNC_03,
REG_NB_CAPABILITY_3XE8),
29, 29, &temp);
if (temp)
maxNodes = 4;
else
maxNodes = 8;
}
if (pDat->sysMpCap > maxNodes)
{
pDat->sysMpCap = maxNodes;
}
/* Note since sysMpCap is one based and NodesDiscovered is zero based, equal is false */
return (pDat->sysMpCap > pDat->NodesDiscovered);
#else
return 1;
#endif
}
/***************************************************************************//**
*
* static void
@@ -2064,6 +2180,49 @@ void newNorthBridge(u8 node, cNorthBridge *nb)
u32 match;
u32 extFam, baseFam, model;
cNorthBridge fam15 =
{
#ifdef HT_BUILD_NC_ONLY
8,
1,
12,
#else
8,
8,
64,
#endif /* HT_BUILD_NC_ONLY*/
writeRoutingTable,
writeNodeID,
readDefLnk,
enableRoutingTables,
verifyLinkIsCoherent,
readTrueLinkFailStatus,
readToken,
writeToken,
fam15GetNumCoresOnNode,
setTotalNodesAndCores,
limitNodes,
writeFullRoutingTable,
isCompatible,
fam15IsCapable,
(void (*)(u8, u8, cNorthBridge*))commonVoid,
(BOOL (*)(u8, u8, sMainData*, cNorthBridge*))commonReturnFalse,
readSbLink,
verifyLinkIsNonCoherent,
ht3SetCFGAddrMap,
convertBitsToWidth,
convertWidthToBits,
fam10NorthBridgeFreqMask,
gatherLinkData,
setLinkData,
ht3WriteTrafficDistribution,
fam10BufferOptimizations,
0x00000001,
0x00000200,
18,
0x00000f06
};
cNorthBridge fam10 =
{
#ifdef HT_BUILD_NC_ONLY
@@ -2171,8 +2330,14 @@ void newNorthBridge(u8 node, cNorthBridge *nb)
7, 4, &model);
match = (u32)((baseFam << 8) | extFam);
/* Test each in turn looking for a match. Init the struct if found */
if (match == fam10.compatibleKey)
/* Test each in turn looking for a match.
* Initialize the struct if found.
*/
if (match == fam15.compatibleKey)
{
Amdmemcpy((void *)nb, (const void *)&fam15, (u32) sizeof(cNorthBridge));
}
else if (match == fam10.compatibleKey)
{
Amdmemcpy((void *)nb, (const void *)&fam10, (u32) sizeof(cNorthBridge));
}

View File

@@ -170,16 +170,22 @@ void amd_ht_fixup(struct sys_info *sysinfo) {
printk(BIOS_DEBUG, "amd_ht_fixup()\n");
if (IS_ENABLED(CONFIG_CPU_AMD_MODEL_10XXX)) {
uint8_t rev_gte_d = 0;
uint8_t fam15h = 0;
uint8_t dual_node = 0;
uint32_t f3xe8;
uint32_t family;
uint32_t model;
family = model = cpuid_eax(0x80000001);
model = ((model & 0xf0000) >> 16) | ((model & 0xf0) >> 4);
model = ((model & 0xf0000) >> 12) | ((model & 0xf0) >> 4);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
if (model >= 0x8)
/* Revision D or later */
if (family >= 0x6f)
/* Family 15h or later */
fam15h = 1;
if ((model >= 0x8) || fam15h)
/* Family 10h Revision D or later */
rev_gte_d = 1;
if (rev_gte_d) {
@@ -191,7 +197,8 @@ void amd_ht_fixup(struct sys_info *sysinfo) {
if (dual_node) {
/* Each G34 processor contains a defective HT link.
* See the BKDG Rev 3.62 section 2.7.1.5 for details.
* See the Family 10h BKDG Rev 3.62 section 2.7.1.5 for details
* For Family 15h see the BKDG Rev. 3.14 section 2.12.1.5 for details.
*/
uint8_t node;
uint8_t node_count = get_nodes();
@@ -201,46 +208,46 @@ void amd_ht_fixup(struct sys_info *sysinfo) {
uint8_t internal_node_number = ((f3xe8 & 0xc0000000) >> 30);
printk(BIOS_DEBUG, "amd_ht_fixup(): node %d (internal node ID %d): disabling defective HT link\n", node, internal_node_number);
if (internal_node_number == 0) {
uint8_t package_link_3_connected = pci_read_config32(NODE_PCI(node, 0), 0xd8) & 0x1;
uint8_t package_link_3_connected = pci_read_config32(NODE_PCI(node, 0), (fam15h)?0x98:0xd8) & 0x1;
if (package_link_3_connected) {
/* Set WidthIn and WidthOut to 0 */
dword = pci_read_config32(NODE_PCI(node, 0), 0xc4);
dword = pci_read_config32(NODE_PCI(node, 0), (fam15h)?0x84:0xc4);
dword &= ~0x77000000;
pci_write_config32(NODE_PCI(node, 0), 0xc4, dword);
pci_write_config32(NODE_PCI(node, 0), (fam15h)?0x84:0xc4, dword);
/* Set Ganged to 1 */
dword = pci_read_config32(NODE_PCI(node, 0), 0x178);
dword = pci_read_config32(NODE_PCI(node, 0), (fam15h)?0x170:0x178);
dword |= 0x00000001;
pci_write_config32(NODE_PCI(node, 0), 0x178, dword);
pci_write_config32(NODE_PCI(node, 0), (fam15h)?0x170:0x178, dword);
} else {
/* Set ConnDly to 1 */
dword = pci_read_config32(NODE_PCI(node, 0), 0x16c);
dword |= 0x00000100;
pci_write_config32(NODE_PCI(node, 0), 0x16c, dword);
/* Set TransOff and EndOfChain to 1 */
dword = pci_read_config32(NODE_PCI(node, 4), 0xc4);
dword = pci_read_config32(NODE_PCI(node, 4), (fam15h)?0x84:0xc4);
dword |= 0x000000c0;
pci_write_config32(NODE_PCI(node, 4), 0xc4, dword);
pci_write_config32(NODE_PCI(node, 4), (fam15h)?0x84:0xc4, dword);
}
} else if (internal_node_number == 1) {
uint8_t package_link_3_connected = pci_read_config32(NODE_PCI(node, 0), 0xb8) & 0x1;
uint8_t package_link_3_connected = pci_read_config32(NODE_PCI(node, 0), (fam15h)?0xf8:0xb8) & 0x1;
if (package_link_3_connected) {
/* Set WidthIn and WidthOut to 0 */
dword = pci_read_config32(NODE_PCI(node, 0), 0xa4);
dword = pci_read_config32(NODE_PCI(node, 0), (fam15h)?0xe4:0xa4);
dword &= ~0x77000000;
pci_write_config32(NODE_PCI(node, 0), 0xa4, dword);
pci_write_config32(NODE_PCI(node, 0), (fam15h)?0xe4:0xa4, dword);
/* Set Ganged to 1 */
dword = pci_read_config32(NODE_PCI(node, 0), 0x174);
dword = pci_read_config32(NODE_PCI(node, 0), (fam15h)?0x18c:0x174);
dword |= 0x00000001;
pci_write_config32(NODE_PCI(node, 0), 0x174, dword);
pci_write_config32(NODE_PCI(node, 0), (fam15h)?0x18c:0x174, dword);
} else {
/* Set ConnDly to 1 */
dword = pci_read_config32(NODE_PCI(node, 0), 0x16c);
dword |= 0x00000100;
pci_write_config32(NODE_PCI(node, 0), 0x16c, dword);
/* Set TransOff and EndOfChain to 1 */
dword = pci_read_config32(NODE_PCI(node, 4), 0xa4);
dword = pci_read_config32(NODE_PCI(node, 4), (fam15h)?0xe4:0xa4);
dword |= 0x000000c0;
pci_write_config32(NODE_PCI(node, 4), 0xa4, dword);
pci_write_config32(NODE_PCI(node, 4), (fam15h)?0xe4:0xa4, dword);
}
}
}

View File

@@ -16,33 +16,35 @@
/* FIXME: this file should be moved to include/cpu/amd/amddefs.h */
/* Public Revisions - USE THESE VERSIONS TO MAKE COMPARE WITH CPULOGICALID RETURN VALUE*/
#define AMD_SAFEMODE 0x80000000 /* Unknown future revision - SAFE MODE */
#define AMD_NPT_F0 0x00000001 /* F0 stepping */
#define AMD_NPT_F1 0x00000002 /* F1 stepping */
#define AMD_NPT_F2C 0x00000004
#define AMD_NPT_F2D 0x00000008
#define AMD_NPT_F2E 0x00000010 /* F2 stepping E */
#define AMD_NPT_F2G 0x00000020 /* F2 stepping G */
#define AMD_NPT_F2J 0x00000040
#define AMD_NPT_F2K 0x00000080
#define AMD_NPT_F3L 0x00000100 /* F3 Stepping */
#define AMD_NPT_G0A 0x00000200 /* G0 stepping */
#define AMD_NPT_G1B 0x00000400 /* G1 stepping */
#define AMD_DR_A0A 0x00010000 /* Barcelona A0 */
#define AMD_DR_A1B 0x00020000 /* Barcelona A1 */
#define AMD_DR_A2 0x00040000 /* Barcelona A2 */
#define AMD_DR_B0 0x00080000 /* Barcelona B0 */
#define AMD_DR_B1 0x00100000 /* Barcelona B1 */
#define AMD_DR_B2 0x00200000 /* Barcelona B2 */
#define AMD_DR_BA 0x00400000 /* Barcelona BA */
#define AMD_DR_B3 0x00800000 /* Barcelona B3 */
#define AMD_RB_C2 0x01000000 /* Shanghai C2 */
#define AMD_DA_C2 0x02000000 /* XXXX C2 */
#define AMD_HY_D0 0x04000000 /* Istanbul D0 */
#define AMD_RB_C3 0x08000000 /* ??? C3 */
#define AMD_DA_C3 0x10000000 /* XXXX C3 */
#define AMD_HY_D1 0x20000000 /* Istanbul D1 */
#define AMD_PH_E0 0x40000000 /* Phenom II X4 X6 */
#define AMD_SAFEMODE 0x8000000000000000 /* Unknown future revision - SAFE MODE */
#define AMD_NPT_F0 0x0000000000000001 /* F0 stepping */
#define AMD_NPT_F1 0x0000000000000002 /* F1 stepping */
#define AMD_NPT_F2C 0x0000000000000004
#define AMD_NPT_F2D 0x0000000000000008
#define AMD_NPT_F2E 0x0000000000000010 /* F2 stepping E */
#define AMD_NPT_F2G 0x0000000000000020 /* F2 stepping G */
#define AMD_NPT_F2J 0x0000000000000040
#define AMD_NPT_F2K 0x0000000000000080
#define AMD_NPT_F3L 0x0000000000000100 /* F3 Stepping */
#define AMD_NPT_G0A 0x0000000000000200 /* G0 stepping */
#define AMD_NPT_G1B 0x0000000000000400 /* G1 stepping */
#define AMD_DR_A0A 0x0000000000010000 /* Barcelona A0 */
#define AMD_DR_A1B 0x0000000000020000 /* Barcelona A1 */
#define AMD_DR_A2 0x0000000000040000 /* Barcelona A2 */
#define AMD_DR_B0 0x0000000000080000 /* Barcelona B0 */
#define AMD_DR_B1 0x0000000000100000 /* Barcelona B1 */
#define AMD_DR_B2 0x0000000000200000 /* Barcelona B2 */
#define AMD_DR_BA 0x0000000000400000 /* Barcelona BA */
#define AMD_DR_B3 0x0000000000800000 /* Barcelona B3 */
#define AMD_RB_C2 0x0000000001000000 /* Shanghai C2 */
#define AMD_DA_C2 0x0000000002000000 /* XXXX C2 */
#define AMD_HY_D0 0x0000000004000000 /* Istanbul D0 */
#define AMD_RB_C3 0x0000000008000000 /* ??? C3 */
#define AMD_DA_C3 0x0000000010000000 /* XXXX C3 */
#define AMD_HY_D1 0x0000000020000000 /* Istanbul D1 */
#define AMD_PH_E0 0x0000000040000000 /* Phenom II X4 X6 */
#define AMD_OR_B2 0x0000000080000000 /* Interlagos */
#define AMD_OR_C0 0x0000000100000000 /* Abu Dhabi */
/*
* Groups - Create as many as you wish, from the above public values
@@ -72,6 +74,7 @@
#define AMD_DRBH_Cx (AMD_DR_Cx | AMD_HY_D0 )
#define AMD_DRBA23_RBC2 (AMD_DR_BA | AMD_DR_B2 | AMD_DR_B3 | AMD_RB_C2 )
#define AMD_DR_DAC2_OR_C3 (AMD_DA_C2 | AMD_DA_C3 | AMD_RB_C3)
#define AMD_FAM15_ALL (AMD_OR_B2 | AMD_OR_C0)
/*
* Public Platforms - USE THESE VERSIONS TO MAKE COMPARE WITH CPUPLATFORMTYPE RETURN VALUE
@@ -121,20 +124,31 @@
#define MCG_CAP 0x00000179
#define MCG_CTL_P 8
#define MC0_CTL 0x00000400
#define MC0_STA MC0_CTL + 1
#define MC0_STA (MC0_CTL + 1)
#define MC4_MISC0 0x00000413
#define MC4_MISC1 0xC0000408
#define MC4_MISC2 0xC0000409
#define FS_Base 0xC0000100
#define SYSCFG 0xC0010010
#define HWCR 0xC0010015
#define NB_CFG 0xC001001F
#define FidVidStatus 0xC0010042
#define MC1_CTL_MASK 0xC0010045
#define MC4_CTL_MASK 0xC0010048
#define OSVW_ID_Length 0xC0010140
#define OSVW_Status 0xC0010141
#define CPUIDFEATURES 0xC0011004
#define LS_CFG 0xC0011020
#define IC_CFG 0xC0011021
#define DC_CFG 0xC0011022
#define BU_CFG 0xC0011023
#define FP_CFG 0xC0011028
#define DE_CFG 0xC0011029
#define BU_CFG2 0xC001102A
#define BU_CFG3 0xC001102B
#define EX_CFG 0xC001102C
#define LS_CFG2 0xC001102D
#define IBS_OP_DATA3 0xC0011037
/*
* Processor package types

View File

@@ -2185,6 +2185,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
pDCTstat->DimmManufacturerID[i] |= ((uint64_t)mctRead_SPD(smbaddr, SPD_MANID_START + k)) << (k * 8);
for (k = 0; k < SPD_PARTN_LENGTH; k++)
pDCTstat->DimmPartNumber[i][k] = mctRead_SPD(smbaddr, SPD_PARTN_START + k);
pDCTstat->DimmPartNumber[i][SPD_PARTN_LENGTH] = 0;
pDCTstat->DimmRevisionNumber[i] = 0;
for (k = 0; k < 2; k++)
pDCTstat->DimmRevisionNumber[i] |= ((uint16_t)mctRead_SPD(smbaddr, SPD_REVNO_START + k)) << (k * 8);
@@ -2202,8 +2203,7 @@ static u8 DIMMPresence_D(struct MCTStatStruc *pMCTstat,
if (byte & JED_REGADCMSK) {
RegDIMMPresent |= 1 << i;
pDCTstat->DimmRegistered[i] = 1;
}
else {
} else {
pDCTstat->DimmRegistered[i] = 0;
}
/* Check ECC capable */

View File

@@ -430,7 +430,7 @@ struct DCTStatStruc { /* A per Node structure*/
/* CH A byte lane 0 - 7 maximum filtered window passing DQS delay value*/
/* CH B byte lane 0 - 7 minimum filtered window passing DQS delay value*/
/* CH B byte lane 0 - 7 maximum filtered window passing DQS delay value*/
u32 LogicalCPUID; /* The logical CPUID of the node*/
uint64_t LogicalCPUID; /* The logical CPUID of the node*/
u16 HostBiosSrvc1; /* Word sized general purpose field for use by host BIOS. Scratch space.*/
u32 HostBiosSrvc2; /* Dword sized general purpose field for use by host BIOS. Scratch space.*/
u16 DimmQRPresent; /* QuadRank DIMM present?*/
@@ -525,7 +525,7 @@ struct DCTStatStruc { /* A per Node structure*/
uint8_t DimmRegistered[MAX_DIMMS_SUPPORTED];
uint64_t DimmManufacturerID[MAX_DIMMS_SUPPORTED];
char DimmPartNumber[MAX_DIMMS_SUPPORTED][SPD_PARTN_LENGTH];
char DimmPartNumber[MAX_DIMMS_SUPPORTED][SPD_PARTN_LENGTH+1];
uint16_t DimmRevisionNumber[MAX_DIMMS_SUPPORTED];
uint32_t DimmSerialNumber[MAX_DIMMS_SUPPORTED];
} __attribute__((packed));
@@ -594,17 +594,18 @@ struct DCTStatStruc { /* A per Node structure*/
266=266MHz (DDR533)
333=333MHz (DDR667)
400=400MHz (DDR800)*/
#define NV_ECC_CAP 4 /* Bus ECC capable (1-bits)
#define NV_MIN_MEMCLK 4 /* Minimum platform demonstrated Memclock (10-bits) */
#define NV_ECC_CAP 5 /* Bus ECC capable (1-bits)
0=Platform not capable
1=Platform is capable*/
#define NV_4RANKType 5 /* Quad Rank DIMM slot type (2-bits)
#define NV_4RANKType 6 /* Quad Rank DIMM slot type (2-bits)
0=Normal
1=R4 (4-Rank Registered DIMMs in AMD server configuration)
2=S4 (Unbuffered SO-DIMMs)*/
#define NV_BYPMAX 6 /* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
#define NV_BYPMAX 7 /* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
4=4 times bypass (normal for non-UMA systems)
7=7 times bypass (normal for UMA systems)*/
#define NV_RDWRQBYP 7 /* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
#define NV_RDWRQBYP 8 /* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
2=8 times (normal for non-UMA systems)
3=16 times (normal for UMA systems)*/
@@ -667,8 +668,9 @@ struct DCTStatStruc { /* A per Node structure*/
#define NV_ECCRedir 54 /* Dram ECC Redirection enable*/
#define NV_DramBKScrub 55 /* Dram ECC Background Scrubber CTL*/
#define NV_L2BKScrub 56 /* L2 ECC Background Scrubber CTL*/
#define NV_DCBKScrub 57 /* DCache ECC Background Scrubber CTL*/
#define NV_CS_SpareCTL 58 /* Chip Select Spare Control bit 0:
#define NV_L3BKScrub 57 /* L3 ECC Background Scrubber CTL*/
#define NV_DCBKScrub 58 /* DCache ECC Background Scrubber CTL*/
#define NV_CS_SpareCTL 59 /* Chip Select Spare Control bit 0:
0=disable Spare
1=enable Spare */
/* Chip Select Spare Control bit 1-4:
@@ -708,7 +710,7 @@ u8 mct_Get_Start_RcvrEnDly_1Pass(u8 Pass);
u8 mct_Average_RcvrEnDly_Pass(struct DCTStatStruc *pDCTstat, u8 RcvrEnDly, u8 RcvrEnDlyLimit, u8 Channel, u8 Receiver, u8 Pass);
void CPUMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
void UMAMemTyping_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
u32 mctGetLogicalCPUID(u32 Node);
uint64_t mctGetLogicalCPUID(u32 Node);
u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);
void TrainReceiverEn_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA, u8 Pass);
void mct_TrainDQSPos_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA);

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,7 +20,7 @@ void EarlySampleSupport_D(void)
u32 procOdtWorkaround(struct DCTStatStruc *pDCTstat, u32 dct, u32 val)
{
u32 tmp;
uint64_t tmp;
tmp = pDCTstat->LogicalCPUID;
if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
val &= 0x0FFFFFFF;
@@ -38,7 +39,7 @@ u32 OtherTiming_A_D(struct DCTStatStruc *pDCTstat, u32 val)
* ( F2x[1, 0]8C[1:0] > 00b). Silicon Status: Fixed in Rev B
* FIXME: check if this is still required.
*/
u32 tmp;
uint64_t tmp;
tmp = pDCTstat->LogicalCPUID;
if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
if(!(val & (3<<12) ))
@@ -50,7 +51,7 @@ u32 OtherTiming_A_D(struct DCTStatStruc *pDCTstat, u32 val)
void mct_ForceAutoPrecharge_D(struct DCTStatStruc *pDCTstat, u32 dct)
{
u32 tmp;
uint64_t tmp;
u32 reg;
u32 reg_off;
u32 dev;
@@ -92,7 +93,7 @@ void mct_EndDQSTraining_D(struct MCTStatStruc *pMCTstat,
* FIXME: check this.
*/
u32 tmp;
uint64_t tmp;
u32 dev;
u32 reg;
u32 val;
@@ -139,10 +140,9 @@ void mct_BeforeDQSTrain_Samp_D(struct MCTStatStruc *pMCTstat,
u32 index;
u32 reg;
u32 val;
u32 tmp;
uint64_t tmp;
u32 Channel;
tmp = pDCTstat->LogicalCPUID;
if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
@@ -202,7 +202,7 @@ u32 Modify_D3CMP(struct DCTStatStruc *pDCTstat, u32 dct, u32 value)
u32 index_reg;
u32 index;
u32 val;
u32 tmp;
uint64_t tmp;
tmp = pDCTstat->LogicalCPUID;
if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
@@ -233,7 +233,7 @@ void SyncSetting(struct DCTStatStruc *pDCTstat)
* Silicon Status: Fix TBD
*/
u32 tmp;
uint64_t tmp;
tmp = pDCTstat->LogicalCPUID;
if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
pDCTstat->CH_ODC_CTL[1] = pDCTstat->CH_ODC_CTL[0];
@@ -274,7 +274,7 @@ u32 CheckNBCOFAutoPrechg(struct DCTStatStruc *pDCTstat, u32 dct)
void mct_BeforeDramInit_D(struct DCTStatStruc *pDCTstat, u32 dct)
{
u32 tmp;
uint64_t tmp;
u32 Speed;
u32 ch, ch_start, ch_end;
u32 index_reg;
@@ -282,7 +282,6 @@ void mct_BeforeDramInit_D(struct DCTStatStruc *pDCTstat, u32 dct)
u32 dev;
u32 val;
tmp = pDCTstat->LogicalCPUID;
if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {
Speed = pDCTstat->Speed;
@@ -327,7 +326,7 @@ static u8 mct_checkFenceHoleAdjust_D(struct MCTStatStruc *pMCTstat,
u8 ChipSel, u8 *result)
{
u8 ByteLane;
u32 tmp;
uint64_t tmp;
tmp = pDCTstat->LogicalCPUID;
if ((tmp == AMD_DR_A0A) || (tmp == AMD_DR_A1B) || (tmp == AMD_DR_A2)) {

File diff suppressed because it is too large Load Diff

View File

@@ -72,6 +72,8 @@
/* #define PA_EXT_DCTADDL (((00 << 3)+5) << 8) */ /*Node x DCT function, Additional Registers PCI Address bits [15:0]*/
#define PA_NBMISC(Node) ((((0x18+Node) << 3)+3) << 12) /*Node 0 Misc PCI Address bits [15:0]*/
#define PA_LINK(Node) ((((0x18+Node) << 3)+4) << 12) /*Node 0 Link Control bits [15:0]*/
#define PA_NBCTL(Node) ((((0x18+Node) << 3)+5) << 12) /*Node 0 NB Control PCI Address bits [15:0]*/
/* #define PA_NBDEVOP (((00 << 3)+3) << 8) */ /*Node 0 Misc PCI Address bits [15:0]*/
#define DCC_EN 1 /* X:2:0x94[19]*/
@@ -125,7 +127,7 @@
#define X4Dimm 12 /* func 2, offset 90h, bit 12*/
#define UnBuffDimm 16 /* func 2, offset 90h, bit 16*/
#define DimmEcEn 19 /* func 2, offset 90h, bit 19*/
#define MemClkFreqVal 3 /* func 2, offset 94h, bit 3*/
#define MemClkFreqVal ((is_fam15h())?7:3) /* func 2, offset 94h, bit 3 or 7*/
#define RDqsEn 12 /* func 2, offset 94h, bit 12*/
#define DisDramInterface 14 /* func 2, offset 94h, bit 14*/
#define PowerDownEn 15 /* func 2, offset 94h, bit 15*/
@@ -200,6 +202,7 @@
#define JED_PROBEMSK 0x40 /*Analysis Probe installed*/
#define JED_RDIMM 0x1 /* RDIMM */
#define JED_MiniRDIMM 0x5 /* Mini-RDIMM */
#define JED_LRDIMM 0xb /* Load-reduced DIMM */
#define SPD_Density 4 /* Bank address bits,SDRAM capacity */
#define SPD_Addressing 5 /* Row/Column address bits */
#define SPD_Voltage 6 /* Supported voltage bitfield */
@@ -293,6 +296,7 @@ struct MCTStatStruc {
of sub 4GB dram hole for HW remapping.*/
u32 Sub4GCacheTop; /* If not zero, the 32-bit top of cacheable memory.*/
u32 SysLimit; /* LIMIT[39:8] (system address)*/
uint32_t TSCFreq;
} __attribute__((packed));
/*=============================================================================
@@ -317,6 +321,7 @@ struct MCTStatStruc {
struct DCTStatStruc { /* A per Node structure*/
/* DCTStatStruct_F - start */
u8 Node_ID; /* Node ID of current controller */
uint8_t stopDCT; /* Set if the DCT will be stopped */
u8 ErrCode; /* Current error condition of Node
0= no error
1= Variance Error, DCT is running but not in an optimal configuration.
@@ -460,7 +465,7 @@ struct DCTStatStruc { /* A per Node structure*/
/* CH A byte lane 0 - 7 maximum filtered window passing DQS delay value*/
/* CH B byte lane 0 - 7 minimum filtered window passing DQS delay value*/
/* CH B byte lane 0 - 7 maximum filtered window passing DQS delay value*/
u32 LogicalCPUID; /* The logical CPUID of the node*/
uint64_t LogicalCPUID; /* The logical CPUID of the node*/
u16 HostBiosSrvc1; /* Word sized general purpose field for use by host BIOS. Scratch space.*/
u32 HostBiosSrvc2; /* Dword sized general purpose field for use by host BIOS. Scratch space.*/
u16 DimmQRPresent; /* QuadRank DIMM present?*/
@@ -554,12 +559,20 @@ struct DCTStatStruc { /* A per Node structure*/
u8 ClToNB_flag; /* is used to restore ClLinesToNbDis bit after memory */
u32 NodeSysBase; /* for channel interleave usage */
/* Fam15h specific backup variables */
uint8_t SwNbPstateLoDis;
uint8_t NbPstateDisOnP0;
uint8_t NbPstateThreshold;
uint8_t NbPstateHi;
/* New for LB Support */
u8 NodePresent;
u32 dev_host;
u32 dev_map;
u32 dev_dct;
u32 dev_nbmisc;
u32 dev_link;
u32 dev_nbctl;
u8 TargetFreq;
u8 TargetCASL;
u8 CtrlWrd3;
@@ -592,9 +605,10 @@ struct DCTStatStruc { /* A per Node structure*/
uint8_t DimmBanks[MAX_DIMMS_SUPPORTED];
uint8_t DimmWidth[MAX_DIMMS_SUPPORTED];
uint8_t DimmRegistered[MAX_DIMMS_SUPPORTED];
uint8_t DimmLoadReduced[MAX_DIMMS_SUPPORTED];
uint64_t DimmManufacturerID[MAX_DIMMS_SUPPORTED];
char DimmPartNumber[MAX_DIMMS_SUPPORTED][SPD_PARTN_LENGTH];
char DimmPartNumber[MAX_DIMMS_SUPPORTED][SPD_PARTN_LENGTH+1];
uint16_t DimmRevisionNumber[MAX_DIMMS_SUPPORTED];
uint32_t DimmSerialNumber[MAX_DIMMS_SUPPORTED];
} __attribute__((packed));
@@ -697,7 +711,64 @@ struct amd_s3_persistent_mct_channel_data {
/* Other (1 dword) */
uint32_t f3x58;
/* TOTAL: 250 dwords */
/* Family 15h-specific registers (90 dwords) */
uint32_t f2x200;
uint32_t f2x204;
uint32_t f2x208;
uint32_t f2x20c;
uint32_t f2x210[4]; /* [nb pstate] */
uint32_t f2x214;
uint32_t f2x218;
uint32_t f2x21c;
uint32_t f2x22c;
uint32_t f2x230;
uint32_t f2x234;
uint32_t f2x238;
uint32_t f2x23c;
uint32_t f2x240;
uint32_t f2x9cx0d0fe003;
uint32_t f2x9cx0d0fe013;
uint32_t f2x9cx0d0f0_8_0_1f[9]; /* [lane]*/
uint32_t f2x9cx0d0f201f;
uint32_t f2x9cx0d0f211f;
uint32_t f2x9cx0d0f221f;
uint32_t f2x9cx0d0f801f;
uint32_t f2x9cx0d0f811f;
uint32_t f2x9cx0d0f821f;
uint32_t f2x9cx0d0fc01f;
uint32_t f2x9cx0d0fc11f;
uint32_t f2x9cx0d0fc21f;
uint32_t f2x9cx0d0f4009;
uint32_t f2x9cx0d0f0_8_0_02[9]; /* [lane]*/
uint32_t f2x9cx0d0f0_8_0_06[9]; /* [lane]*/
uint32_t f2x9cx0d0f0_8_0_0a[9]; /* [lane]*/
uint32_t f2x9cx0d0f2002;
uint32_t f2x9cx0d0f2102;
uint32_t f2x9cx0d0f2202;
uint32_t f2x9cx0d0f8002;
uint32_t f2x9cx0d0f8006;
uint32_t f2x9cx0d0f800a;
uint32_t f2x9cx0d0f8102;
uint32_t f2x9cx0d0f8106;
uint32_t f2x9cx0d0f810a;
uint32_t f2x9cx0d0fc002;
uint32_t f2x9cx0d0fc006;
uint32_t f2x9cx0d0fc00a;
uint32_t f2x9cx0d0fc00e;
uint32_t f2x9cx0d0fc012;
uint32_t f2x9cx0d0f2031;
uint32_t f2x9cx0d0f2131;
uint32_t f2x9cx0d0f2231;
uint32_t f2x9cx0d0f8031;
uint32_t f2x9cx0d0f8131;
uint32_t f2x9cx0d0f8231;
uint32_t f2x9cx0d0fc031;
uint32_t f2x9cx0d0fc131;
uint32_t f2x9cx0d0fc231;
uint32_t f2x9cx0d0f0_0_f_31[9]; /* [lane] */
uint32_t f2x9cx0d0f8021;
/* TOTAL: 340 dwords */
} __attribute__((packed));
struct amd_s3_persistent_node_data {
@@ -742,18 +813,19 @@ struct amd_s3_persistent_data {
Local Configuration Status (DCTStatStruc.Status[31:0])
===============================================================================*/
#define SB_Registered 0 /* All DIMMs are Registered*/
#define SB_ECCDIMMs 1 /* All banks ECC capable*/
#define SB_PARDIMMs 2 /* All banks Addr/CMD Parity capable*/
#define SB_DiagClks 3 /* Jedec ALL slots clock enable diag mode*/
#define SB_128bitmode 4 /* DCT in 128-bit mode operation*/
#define SB_64MuxedMode 5 /* DCT in 64-bit mux'ed mode.*/
#define SB_2TMode 6 /* 2T CMD timing mode is enabled.*/
#define SB_SWNodeHole 7 /* Remapping of Node Base on this Node to create a gap.*/
#define SB_HWHole 8 /* Memory Hole created on this Node using HW remapping.*/
#define SB_Over400MHz 9 /* DCT freq >= 400MHz flag*/
#define SB_DQSPos_Pass2 10 /* Using for TrainDQSPos DIMM0/1, when freq>=400MHz*/
#define SB_DQSRcvLimit 11 /* Using for DQSRcvEnTrain to know we have reached to upper bound.*/
#define SB_ExtConfig 12 /* Indicator the default setting for extend PCI configuration support*/
#define SB_LoadReduced 1 /* All DIMMs are Load-Reduced*/
#define SB_ECCDIMMs 2 /* All banks ECC capable*/
#define SB_PARDIMMs 3 /* All banks Addr/CMD Parity capable*/
#define SB_DiagClks 4 /* Jedec ALL slots clock enable diag mode*/
#define SB_128bitmode 5 /* DCT in 128-bit mode operation*/
#define SB_64MuxedMode 6 /* DCT in 64-bit mux'ed mode.*/
#define SB_2TMode 7 /* 2T CMD timing mode is enabled.*/
#define SB_SWNodeHole 8 /* Remapping of Node Base on this Node to create a gap.*/
#define SB_HWHole 9 /* Memory Hole created on this Node using HW remapping.*/
#define SB_Over400MHz 10 /* DCT freq >= 400MHz flag*/
#define SB_DQSPos_Pass2 11 /* Using for TrainDQSPos DIMM0/1, when freq>=400MHz*/
#define SB_DQSRcvLimit 12 /* Using for DQSRcvEnTrain to know we have reached to upper bound.*/
#define SB_ExtConfig 13 /* Indicator the default setting for extend PCI configuration support*/
/*===============================================================================
@@ -771,17 +843,18 @@ struct amd_s3_persistent_data {
266=266MHz (DDR533)
333=333MHz (DDR667)
400=400MHz (DDR800)*/
#define NV_ECC_CAP 4 /* Bus ECC capable (1-bits)
#define NV_MIN_MEMCLK 4 /* Minimum platform demonstrated Memclock (10-bits) */
#define NV_ECC_CAP 5 /* Bus ECC capable (1-bits)
0=Platform not capable
1=Platform is capable*/
#define NV_4RANKType 5 /* Quad Rank DIMM slot type (2-bits)
#define NV_4RANKType 6 /* Quad Rank DIMM slot type (2-bits)
0=Normal
1=R4 (4-Rank Registered DIMMs in AMD server configuration)
2=S4 (Unbuffered SO-DIMMs)*/
#define NV_BYPMAX 6 /* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
#define NV_BYPMAX 7 /* Value to set DcqBypassMax field (See Function 2, Offset 94h, [27:24] of BKDG for field definition).
4=4 times bypass (normal for non-UMA systems)
7=7 times bypass (normal for UMA systems)*/
#define NV_RDWRQBYP 7 /* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
#define NV_RDWRQBYP 8 /* Value to set RdWrQByp field (See Function 2, Offset A0h, [3:2] of BKDG for field definition).
2=8 times (normal for non-UMA systems)
3=16 times (normal for UMA systems)*/
@@ -844,8 +917,9 @@ struct amd_s3_persistent_data {
#define NV_ECCRedir 54 /* Dram ECC Redirection enable*/
#define NV_DramBKScrub 55 /* Dram ECC Background Scrubber CTL*/
#define NV_L2BKScrub 56 /* L2 ECC Background Scrubber CTL*/
#define NV_DCBKScrub 57 /* DCache ECC Background Scrubber CTL*/
#define NV_CS_SpareCTL 58 /* Chip Select Spare Control bit 0:
#define NV_L3BKScrub 57 /* L3 ECC Background Scrubber CTL*/
#define NV_DCBKScrub 58 /* DCache ECC Background Scrubber CTL*/
#define NV_CS_SpareCTL 59 /* Chip Select Spare Control bit 0:
0=disable Spare
1=enable Spare */
/* Chip Select Spare Control bit 1-4:
@@ -896,10 +970,12 @@ void mct_SetRcvrEnDly_D(struct DCTStatStruc *pDCTstat, u16 RcvrEnDly, u8 FinalVa
void SetEccDQSRcvrEn_D(struct DCTStatStruc *pDCTstat, u8 Channel);
void mctGet_PS_Cfg_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u32 dct);
void InterleaveBanks_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct);
void mct_SetDramConfigHi_D(struct DCTStatStruc *pDCTstat, u32 dct, u32 DramConfigHi);
void mct_SetDramConfigHi_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u32 dct, u32 DramConfigHi);
void mct_DramInit_Hw_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 dct);
void mct_SetClToNB_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
void mct_SetWbEnhWsbDis_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
void mct_ForceNBPState0_En_Fam15(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
void mct_ForceNBPState0_Dis_Fam15(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
void mct_TrainRcvrEn_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 Pass);
void mct_EnableDimmEccEn_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat, u8 _DisableDramECC);
u32 procOdtWorkaround(struct DCTStatStruc *pDCTstat, u32 dct, u32 val);

View File

@@ -94,6 +94,15 @@ static u32 bsf(u32 x)
u32 SetUpperFSbase(u32 addr_hi);
static void proc_MFENCE(void)
{
__asm__ volatile (
"outb %%al, $0xed\n\t" /* _EXECFENCE */
"mfence\n\t"
:::"memory"
);
}
static void proc_CLFLUSH(u32 addr_hi)
{
SetUpperFSbase(addr_hi);

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2010 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -13,6 +14,8 @@
* GNU General Public License for more details.
*/
/* AM3/ASB2/C32/G34 DDR3 */
static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload,
u32 *AddrTmgCTL, u32 *ODC_CTL,
u8 *CMDmode);
@@ -20,17 +23,23 @@ static void Get_ChannelPS_Cfg0_D(u8 MAAdimms, u8 Speed, u8 MAAload,
void mctGet_PS_Cfg_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u32 dct)
{
if (is_fam15h()) {
pDCTstat->CH_ADDR_TMG[dct] = fam15h_address_timing_compensation_code(pDCTstat, dct);
pDCTstat->CH_ODC_CTL[dct] = fam15h_output_driver_compensation_code(pDCTstat, dct);
pDCTstat->_2Tmode = fam15h_slow_access_mode(pDCTstat, dct);
} else {
Get_ChannelPS_Cfg0_D(pDCTstat->MAdimms[dct], pDCTstat->Speed,
pDCTstat->MAload[dct],
&(pDCTstat->CH_ADDR_TMG[dct]), &(pDCTstat->CH_ODC_CTL[dct]),
&pDCTstat->_2Tmode);
pDCTstat->CH_ODC_CTL[dct] |= 0x20000000; /* 60ohms */
}
pDCTstat->CH_EccDQSLike[0] = 0x0403;
pDCTstat->CH_EccDQSScale[0] = 0x70;
pDCTstat->CH_EccDQSLike[1] = 0x0403;
pDCTstat->CH_EccDQSScale[1] = 0x70;
pDCTstat->CH_ODC_CTL[dct] |= 0x20000000; /* 60ohms */
}
/*

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2010 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -31,7 +32,6 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
u32 dev;
u32 reg;
u32 reg_off;
u32 val;
u32 val_lo, val_hi;
@@ -40,16 +40,15 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
EnChipSels = 0;
dev = pDCTstat->dev_dct;
reg_off = 0x100 * dct;
ChipSel = 0; /* Find out if current configuration is capable */
while (DoIntlv && (ChipSel < MAX_CS_SUPPORTED)) {
reg = 0x40+(ChipSel<<2) + reg_off; /* Dram CS Base 0 */
val = Get_NB32(dev, reg);
reg = 0x40+(ChipSel<<2); /* Dram CS Base 0 */
val = Get_NB32_DCT(dev, dct, reg);
if ( val & (1<<CSEnable)) {
EnChipSels++;
reg = 0x60+((ChipSel>>1)<<2)+reg_off; /*Dram CS Mask 0 */
val = Get_NB32(dev, reg);
reg = 0x60+((ChipSel>>1)<<2); /*Dram CS Mask 0 */
val = Get_NB32_DCT(dev, dct, reg);
val >>= 19;
val &= 0x3ff;
val++;
@@ -59,8 +58,8 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
/*If mask sizes not same then skip */
if (val != MemSize)
break;
reg = 0x80 + reg_off; /*Dram Bank Addressing */
val = Get_NB32(dev, reg);
reg = 0x80; /*Dram Bank Addressing */
val = Get_NB32_DCT(dev, dct, reg);
val >>= (ChipSel>>1)<<2;
val &= 0x0f;
if(EnChipSels == 1)
@@ -99,8 +98,8 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
BitDelta = bsf(AddrHiMask) - bsf(AddrLoMask);
for (ChipSel = 0; ChipSel < MAX_CS_SUPPORTED; ChipSel++) {
reg = 0x40+(ChipSel<<2) + reg_off; /*Dram CS Base 0 */
val = Get_NB32(dev, reg);
reg = 0x40+(ChipSel<<2); /*Dram CS Base 0 */
val = Get_NB32_DCT(dev, dct, reg);
if (val & 3) {
val_lo = val & AddrLoMask;
val_hi = val & AddrHiMask;
@@ -110,13 +109,13 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
val_hi >>= BitDelta;
val |= val_lo;
val |= val_hi;
Set_NB32(dev, reg, val);
Set_NB32_DCT(dev, dct, reg, val);
if(ChipSel & 1)
continue;
reg = 0x60 + ((ChipSel>>1)<<2) + reg_off; /*Dram CS Mask 0 */
val = Get_NB32(dev, reg);
reg = 0x60 + ((ChipSel>>1)<<2); /*Dram CS Mask 0 */
val = Get_NB32_DCT(dev, dct, reg);
val_lo = val & AddrLoMask;
val_hi = val & AddrHiMask;
val &= AddrLoMaskN;
@@ -125,7 +124,7 @@ void InterleaveBanks_D(struct MCTStatStruc *pMCTstat,
val_hi >>= BitDelta;
val |= val_lo;
val |= val_hi;
Set_NB32(dev, reg, val);
Set_NB32_DCT(dev, dct, reg, val);
}
}
} /* DoIntlv */

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2010 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -92,14 +93,16 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
OB_ECCRedir = mctGet_NVbits(NV_ECCRedir); /* ECC Redirection */
OB_ChipKill = mctGet_NVbits(NV_ChipKill); /* ECC Chip-kill mode */
OF_ScrubCTL = 0; /* Scrub CTL for Dcache, L2, and dram */
if (!is_fam15h()) {
nvbits = mctGet_NVbits(NV_DCBKScrub);
/* mct_AdjustScrub_D(pDCTstatA, &nvbits); */ /* Need not adjust */
OF_ScrubCTL |= (u32) nvbits << 16;
nvbits = mctGet_NVbits(NV_L2BKScrub);
OF_ScrubCTL |= (u32) nvbits << 8;
}
nvbits = mctGet_NVbits(NV_DramBKScrub);
OF_ScrubCTL |= nvbits;
@@ -173,6 +176,10 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
/*WE/RE is checked because memory config may have been */
if((val & 3)==3) { /* Node has dram populated */
if (isDramECCEn_D(pDCTstat)) { /* if ECC is enabled on this dram */
if (is_fam15h()) {
/* Erratum 505 */
fam15h_switch_dct(pDCTstat->dev_map, 0);
}
dev = pDCTstat->dev_nbmisc;
val = curBase << 8;
if(OB_ECCRedir) {
@@ -183,6 +190,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
Set_NB32(dev, 0x60, val); /* Dram Scrub Addr High */
Set_NB32(dev, 0x58, OF_ScrubCTL); /*Scrub Control */
if (!is_fam15h()) {
/* Divisor should not be set deeper than
* divide by 16 when Dcache scrubber or
* L2 scrubber is enabled.
@@ -195,6 +203,7 @@ u8 ECCInit_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstatA)
Set_NB32(dev, 0x84, val);
}
}
}
} /* this node has ECC enabled dram */
} /*Node has Dram */
} /*if Node present */
@@ -263,8 +272,8 @@ static u8 isDramECCEn_D(struct DCTStatStruc *pDCTstat)
}
for(i=0; i<ch_end; i++) {
if(pDCTstat->DIMMValidDCT[i] > 0){
reg = 0x90 + i * 0x100; /* Dram Config Low */
val = Get_NB32(dev, reg);
reg = 0x90; /* Dram Config Low */
val = Get_NB32_DCT(dev, i, reg);
if(val & (1<<DimmEcEn)) {
/* set local flag 'dram ecc capable' */
isDimmECCEn = 1;

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2010 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -21,8 +22,8 @@ void mct_DramInit_Hw_D(struct MCTStatStruc *pMCTstat,
u32 dev = pDCTstat->dev_dct;
/*flag for selecting HW/SW DRAM Init HW DRAM Init */
reg = 0x90 + 0x100 * dct; /*DRAM Configuration Low */
val = Get_NB32(dev, reg);
reg = 0x90; /*DRAM Configuration Low */
val = Get_NB32_DCT(dev, dct, reg);
val |= (1<<InitDram);
Set_NB32(dev, reg, val);
Set_NB32_DCT(dev, dct, reg, val);
}

View File

@@ -14,10 +14,12 @@
* GNU General Public License for more details.
*/
static void SetTargetFreq(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat);
static void AgesaHwWlPhase1(sMCTStruct *pMCTData,
sDCTStruct *pDCTData, u8 dimm, u8 pass);
static void AgesaHwWlPhase1(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct, u8 dimm, u8 pass);
static void AgesaHwWlPhase2(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct, u8 dimm, u8 pass);
static void AgesaHwWlPhase3(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct, u8 dimm, u8 pass);
static void EnableZQcalibration(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
static void DisableZQcalibration(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
static void PrepareC_MCT(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDCTstat);
@@ -52,7 +54,7 @@ static void SetEccWrDQS_D(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pD
Addl_Index = 0x32;
Addl_Index += DimmNum * 3;
val = Get_NB32_index_wait(pDCTstat->dev_dct, Channel * 0x100 + 0x98, Addl_Index);
val = Get_NB32_index_wait_DCT(pDCTstat->dev_dct, Channel, 0x98, Addl_Index);
if (OddByte)
val >>= 16;
/* Save WrDqs to stack for later usage */
@@ -70,13 +72,13 @@ static void EnableAutoRefresh_D(struct MCTStatStruc *pMCTstat, struct DCTStatStr
{
u32 val;
val = Get_NB32(pDCTstat->dev_dct, 0x8C);
val = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x8C);
val &= ~(1 << DisAutoRefresh);
Set_NB32(pDCTstat->dev_dct, 0x8C, val);
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x8C, val);
val = Get_NB32(pDCTstat->dev_dct, 0x8C + 0x100);
val = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x8C);
val &= ~(1 << DisAutoRefresh);
Set_NB32(pDCTstat->dev_dct, 0x8C + 0x100, val);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x8C, val);
}
static void DisableAutoRefresh_D(struct MCTStatStruc *pMCTstat,
@@ -84,13 +86,13 @@ static void DisableAutoRefresh_D(struct MCTStatStruc *pMCTstat,
{
u32 val;
val = Get_NB32(pDCTstat->dev_dct, 0x8C);
val = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x8C);
val |= 1 << DisAutoRefresh;
Set_NB32(pDCTstat->dev_dct, 0x8C, val);
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x8C, val);
val = Get_NB32(pDCTstat->dev_dct, 0x8C + 0x100);
val = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x8C);
val |= 1 << DisAutoRefresh;
Set_NB32(pDCTstat->dev_dct, 0x8C + 0x100, val);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x8C, val);
}
@@ -114,8 +116,11 @@ static void PhyWLPass1(struct MCTStatStruc *pMCTstat,
DIMMValid = pDCTstat->DIMMValid;
PrepareC_DCT(pMCTstat, pDCTstat, dct);
for (dimm = 0; dimm < MAX_DIMMS_SUPPORTED; dimm ++) {
if (DIMMValid & (1 << (dimm << 1)))
AgesaHwWlPhase1(pDCTstat->C_MCTPtr, DCTPtr, dimm, FirstPass);
if (DIMMValid & (1 << (dimm << 1))) {
AgesaHwWlPhase1(pMCTstat, pDCTstat, dct, dimm, FirstPass);
AgesaHwWlPhase2(pMCTstat, pDCTstat, dct, dimm, FirstPass);
AgesaHwWlPhase3(pMCTstat, pDCTstat, dct, dimm, FirstPass);
}
}
}
}
@@ -142,27 +147,40 @@ static void PhyWLPass2(struct MCTStatStruc *pMCTstat,
pDCTstat->Speed = pDCTstat->DIMMAutoSpeed = pDCTstat->TargetFreq;
pDCTstat->CASL = pDCTstat->DIMMCASL = pDCTstat->TargetCASL;
SPD2ndTiming(pMCTstat, pDCTstat, dct);
if (!is_fam15h()) {
ProgDramMRSReg_D(pMCTstat, pDCTstat, dct);
PlatformSpec_D(pMCTstat, pDCTstat, dct);
fenceDynTraining_D(pMCTstat, pDCTstat, dct);
}
Restore_OnDimmMirror(pMCTstat, pDCTstat);
StartupDCT_D(pMCTstat, pDCTstat, dct);
Clear_OnDimmMirror(pMCTstat, pDCTstat);
SetDllSpeedUp_D(pMCTstat, pDCTstat, dct);
DisableAutoRefresh_D(pMCTstat, pDCTstat);
for (dimm = 0; dimm < MAX_DIMMS_SUPPORTED; dimm ++) {
if (DIMMValid & (1 << (dimm << 1)))
AgesaHwWlPhase1(pDCTstat->C_MCTPtr, pDCTstat->C_DCTPtr[dct], dimm, SecondPass);
if (DIMMValid & (1 << (dimm << 1))) {
AgesaHwWlPhase1(pMCTstat, pDCTstat, dct, dimm, SecondPass);
AgesaHwWlPhase2(pMCTstat, pDCTstat, dct, dimm, SecondPass);
AgesaHwWlPhase3(pMCTstat, pDCTstat, dct, dimm, SecondPass);
}
}
}
}
static uint16_t fam15h_next_highest_memclk_freq(uint16_t memclk_freq)
{
uint16_t fam15h_next_highest_freq_tab[] = {0, 0, 0, 0, 0x6, 0, 0xa, 0, 0, 0, 0xe, 0, 0, 0, 0x12, 0, 0, 0, 0x16, 0, 0, 0, 0x16};
return fam15h_next_highest_freq_tab[memclk_freq];
}
/* Write Levelization Training
* Algorithm detailed in the Fam10h BKDG Rev. 3.62 section 2.8.9.9.1
*/
static void WriteLevelization_HW(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat)
struct DCTStatStruc *pDCTstat, uint8_t Pass)
{
uint16_t final_target_freq;
pDCTstat->C_MCTPtr = &(pDCTstat->s_C_MCTPtr);
pDCTstat->C_DCTPtr[0] = &(pDCTstat->s_C_DCTPtr[0]);
pDCTstat->C_DCTPtr[1] = &(pDCTstat->s_C_DCTPtr[1]);
@@ -178,25 +196,48 @@ static void WriteLevelization_HW(struct MCTStatStruc *pMCTstat,
pDCTstat->DIMMValidDCT[1] = pDCTstat->DIMMValidDCT[0];
}
if (Pass == FirstPass) {
PhyWLPass1(pMCTstat, pDCTstat, 0);
PhyWLPass1(pMCTstat, pDCTstat, 1);
}
if (pDCTstat->TargetFreq > 4) {
if (Pass == SecondPass) {
if (pDCTstat->TargetFreq > mhz_to_memclk_config(mctGet_NVbits(NV_MIN_MEMCLK))) {
/* 8.Prepare the memory subsystem for the target MEMCLK frequency.
* Note: BIOS must program both DCTs to the same frequency.
* NOTE: BIOS must program both DCTs to the same frequency.
* NOTE: Fam15h steps the frequency, Fam10h slams the frequency.
*/
final_target_freq = pDCTstat->TargetFreq;
while (pDCTstat->Speed != final_target_freq) {
if (is_fam15h())
pDCTstat->TargetFreq = fam15h_next_highest_memclk_freq(pDCTstat->Speed);
else
pDCTstat->TargetFreq = final_target_freq;
SetTargetFreq(pMCTstat, pDCTstat);
PhyWLPass2(pMCTstat, pDCTstat, 0);
PhyWLPass2(pMCTstat, pDCTstat, 1);
}
pDCTstat->TargetFreq = final_target_freq;
uint8_t dct;
for (dct = 0; dct < 2; dct++) {
sDCTStruct *pDCTData = pDCTstat->C_DCTPtr[dct];
memcpy(pDCTData->WLGrossDelayFinalPass, pDCTData->WLGrossDelayPrevPass, sizeof(pDCTData->WLGrossDelayPrevPass));
memcpy(pDCTData->WLFineDelayFinalPass, pDCTData->WLFineDelayPrevPass, sizeof(pDCTData->WLFineDelayPrevPass));
pDCTData->WLCriticalGrossDelayFinalPass = pDCTData->WLCriticalGrossDelayPrevPass;
}
}
}
SetEccWrDQS_D(pMCTstat, pDCTstat);
EnableAutoRefresh_D(pMCTstat, pDCTstat);
EnableZQcalibration(pMCTstat, pDCTstat);
}
void mct_WriteLevelization_HW(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstatA)
struct DCTStatStruc *pDCTstatA, uint8_t Pass)
{
u8 Node;
@@ -207,7 +248,7 @@ void mct_WriteLevelization_HW(struct MCTStatStruc *pMCTstat,
if (pDCTstat->NodePresent) {
mctSMBhub_Init(Node);
Clear_OnDimmMirror(pMCTstat, pDCTstat);
WriteLevelization_HW(pMCTstat, pDCTstat);
WriteLevelization_HW(pMCTstat, pDCTstat, Pass);
Restore_OnDimmMirror(pMCTstat, pDCTstat);
}
}

View File

@@ -30,7 +30,7 @@ u32 mct_SetDramConfigMisc2(struct DCTStatStruc *pDCTstat, u8 dct, u32 misc2)
if (pDCTstat->LogicalCPUID & AMD_DR_Cx)
misc2 |= 1 << OdtSwizzle;
val = Get_NB32(pDCTstat->dev_dct, dct * 0x100 + 0x78);
val = Get_NB32_DCT(pDCTstat->dev_dct, dct, 0x78);
val &= 7;
val = ((~val) & 0xff) + 1;

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2010 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -19,7 +20,6 @@ static u32 mct_ControlRC(struct MCTStatStruc *pMCTstat,
u8 Dimms, DimmNum, MaxDimm, Speed;
u32 val;
u32 dct = 0;
u32 reg_off = 0;
DimmNum = (MrsChipSel >> 20) & 0xFE;
@@ -37,7 +37,6 @@ static u32 mct_ControlRC(struct MCTStatStruc *pMCTstat,
dct = 1;
DimmNum ++;
}
reg_off = 0x100 * dct;
Dimms = pDCTstat->MAdimms[dct];
val = 0;
@@ -91,21 +90,21 @@ static u32 mct_ControlRC(struct MCTStatStruc *pMCTstat,
static void mct_SendCtrlWrd(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u32 val)
{
u32 reg_off = 0;
uint8_t dct = 0;
u32 dev = pDCTstat->dev_dct;
if (pDCTstat->CSPresent_DCT[0] > 0) {
reg_off = 0;
dct = 0;
} else if (pDCTstat->CSPresent_DCT[1] > 0 ){
reg_off = 0x100;
dct = 1;
}
val |= Get_NB32(dev, reg_off + 0x7C) & ~0xFFFFFF;
val |= Get_NB32_DCT(dev, dct, 0x7C) & ~0xFFFFFF;
val |= 1 << SendControlWord;
Set_NB32(dev, reg_off + 0x7C, val);
Set_NB32_DCT(dev, dct, 0x7C, val);
do {
val = Get_NB32(dev, reg_off + 0x7C);
val = Get_NB32_DCT(dev, dct, 0x7C);
} while (val & (1 << SendControlWord));
}
@@ -115,7 +114,6 @@ void mct_DramControlReg_Init_D(struct MCTStatStruc *pMCTstat,
u8 MrsChipSel;
u32 dev = pDCTstat->dev_dct;
u32 val, cw;
u32 reg_off = 0x100 * dct;
mct_Wait(1600);
@@ -123,7 +121,7 @@ void mct_DramControlReg_Init_D(struct MCTStatStruc *pMCTstat,
for (MrsChipSel = 0; MrsChipSel < 8; MrsChipSel ++, MrsChipSel ++) {
if (pDCTstat->CSPresent & (1 << MrsChipSel)) {
val = Get_NB32(dev, reg_off + 0xA8);
val = Get_NB32_DCT(dev, dct, 0xa8);
val &= ~(0xF << 8);
switch (MrsChipSel) {
@@ -140,7 +138,7 @@ void mct_DramControlReg_Init_D(struct MCTStatStruc *pMCTstat,
case 7:
val |= (3 << 6) << 8;
}
Set_NB32(dev, reg_off + 0xA8 , val);
Set_NB32_DCT(dev, dct, 0xa8, val);
for (cw=0; cw <=15; cw ++) {
mct_Wait(1600);
@@ -167,10 +165,10 @@ void FreqChgCtrlWrd(struct MCTStatStruc *pMCTstat,
for (MrsChipSel=0; MrsChipSel < 8; MrsChipSel++, MrsChipSel++) {
if (pDCTstat->CSPresent & (1 << MrsChipSel)) {
/* 2. Program F2x[1, 0]A8[CtrlWordCS]=bit mask for target chip selects. */
val = Get_NB32(dev, 0xA8); /* TODO: dct * 0x100 + 0xA8 */
val = Get_NB32_DCT(dev, 0, 0xA8); /* TODO: dct 0 / 1 select */
val &= ~(0xFF << 8);
val |= (0x3 << (MrsChipSel & 0xFE)) << 8;
Set_NB32(dev, 0xA8, val); /* TODO: dct * 0x100 + 0xA8 */
Set_NB32_DCT(dev, 0, 0xA8, val); /* TODO: dct 0 / 1 select */
/* Resend control word 10 */
mct_Wait(1600);

View File

@@ -14,17 +14,182 @@
* GNU General Public License for more details.
*/
static uint8_t fam15_dimm_dic(struct DCTStatStruc *pDCTstat, uint8_t dct, uint8_t dimm, uint8_t rank, uint8_t package_type)
{
uint8_t dic;
/* Calculate DIC based on recommendations in MR1_dct[1:0] */
if (pDCTstat->Status & (1 << SB_LoadReduced)) {
/* TODO
* LRDIMM unimplemented
*/
dic = 0x0;
} else {
dic = 0x1;
}
return dic;
}
static uint8_t fam15_rttwr(struct DCTStatStruc *pDCTstat, uint8_t dct, uint8_t dimm, uint8_t rank, uint8_t package_type)
{
uint8_t term = 0;
sDCTStruct *pDCTData = pDCTstat->C_DCTPtr[dct];
uint8_t number_of_dimms = pDCTData->MaxDimmsInstalled;
uint8_t frequency_index;
uint8_t rank_count = pDCTData->DimmRanks[dimm];
if (is_fam15h())
frequency_index = Get_NB32_DCT(pDCTstat->dev_dct, dct, 0x94) & 0x1f;
else
frequency_index = Get_NB32_DCT(pDCTstat->dev_dct, dct, 0x94) & 0x7;
/* FIXME
* Mainboards need to be able to specify the maximum number of DIMMs installable per channel
* For now assume a maximum of 2 DIMMs per channel can be installed
*/
uint8_t MaxDimmsInstallable = 2;
if (is_fam15h()) {
if (pDCTstat->Status & (1 << SB_Registered)) {
/* TODO
* RDIMM unimplemented
*/
} else {
if (package_type == PT_GR) {
/* Socket G34: Fam15h BKDG v3.14 Table 56 */
if (MaxDimmsInstallable == 1) {
term = 0x0;
} else if (MaxDimmsInstallable == 2) {
if ((number_of_dimms == 2) && (frequency_index == 0x12)) {
term = 0x1;
} else if (number_of_dimms == 1) {
term = 0x0;
} else {
term = 0x2;
}
} else if (MaxDimmsInstallable == 3) {
if (number_of_dimms == 1) {
if (frequency_index <= 0xa) {
term = 0x2;
} else {
if (rank_count < 3) {
term = 0x1;
} else {
term = 0x2;
}
}
} else if (number_of_dimms == 2) {
term = 0x2;
}
}
} else {
/* TODO
* Other sockets unimplemented
*/
}
}
}
return term;
}
static uint8_t fam15_rttnom(struct DCTStatStruc *pDCTstat, uint8_t dct, uint8_t dimm, uint8_t rank, uint8_t package_type)
{
uint8_t term = 0;
sDCTStruct *pDCTData = pDCTstat->C_DCTPtr[dct];
uint8_t number_of_dimms = pDCTData->MaxDimmsInstalled;
uint8_t frequency_index;
if (is_fam15h())
frequency_index = Get_NB32_DCT(pDCTstat->dev_dct, dct, 0x94) & 0x1f;
else
frequency_index = Get_NB32_DCT(pDCTstat->dev_dct, dct, 0x94) & 0x7;
/* FIXME
* Mainboards need to be able to specify the maximum number of DIMMs installable per channel
* For now assume a maximum of 2 DIMMs per channel can be installed
*/
uint8_t MaxDimmsInstallable = 2;
if (is_fam15h()) {
if (pDCTstat->Status & (1 << SB_LoadReduced)) {
/* TODO
* LRDIMM unimplemented
*/
} else if (pDCTstat->Status & (1 << SB_Registered)) {
/* TODO
* RDIMM unimplemented
*/
} else {
if (package_type == PT_GR) {
/* Socket G34: Fam15h BKDG v3.14 Table 56 */
if (MaxDimmsInstallable == 1) {
if ((frequency_index == 0x4) || (frequency_index == 0x6))
term = 0x2;
else if ((frequency_index == 0xa) || (frequency_index == 0xe))
term = 0x1;
else
term = 0x3;
}
if (MaxDimmsInstallable == 2) {
if (number_of_dimms == 1) {
if (frequency_index <= 0x6) {
term = 0x2;
} else if (frequency_index <= 0xe) {
term = 0x1;
} else {
term = 0x3;
}
} else {
if (frequency_index <= 0xa) {
term = 0x3;
} else if (frequency_index <= 0xe) {
term = 0x5;
} else {
term = 0x4;
}
}
} else if (MaxDimmsInstallable == 3) {
if (number_of_dimms == 1) {
term = 0x0;
} else if (number_of_dimms == 2) {
if (frequency_index <= 0xa) {
if (rank == 1) {
term = 0x0;
} else {
term = 0x3;
}
} else if (frequency_index <= 0xe) {
if (rank == 1) {
term = 0x0;
} else {
term = 0x5;
}
}
}
}
} else {
/* TODO
* Other sockets unimplemented
*/
}
}
}
return term;
}
static void mct_DramControlReg_Init_D(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct);
static void mct_DCTAccessDone(struct DCTStatStruc *pDCTstat, u8 dct)
{
u32 reg_off = 0x100 * dct;
u32 dev = pDCTstat->dev_dct;
u32 val;
do {
val = Get_NB32(dev, reg_off + 0x98);
val = Get_NB32_DCT(dev, dct, 0x98);
} while (!(val & (1 << DctAccessDone)));
}
@@ -50,9 +215,15 @@ static u32 swapAddrBits(struct DCTStatStruc *pDCTstat, u32 MR_register_setting,
if (MR_register_setting & (1 << 6)) ret |= 1 << 5;
if (MR_register_setting & (1 << 7)) ret |= 1 << 8;
if (MR_register_setting & (1 << 8)) ret |= 1 << 7;
if (is_fam15h()) {
if (MR_register_setting & (1 << 18)) ret |= 1 << 19;
if (MR_register_setting & (1 << 19)) ret |= 1 << 18;
MR_register_setting &= ~0x000c01f8;
} else {
if (MR_register_setting & (1 << 16)) ret |= 1 << 17;
if (MR_register_setting & (1 << 17)) ret |= 1 << 16;
MR_register_setting &= ~0x301f8;
MR_register_setting &= ~0x000301f8;
}
MR_register_setting |= ret;
}
}
@@ -61,47 +232,76 @@ static u32 swapAddrBits(struct DCTStatStruc *pDCTstat, u32 MR_register_setting,
static void mct_SendMrsCmd(struct DCTStatStruc *pDCTstat, u8 dct, u32 EMRS)
{
u32 reg_off = 0x100 * dct;
u32 dev = pDCTstat->dev_dct;
u32 val;
val = Get_NB32(dev, reg_off + 0x7C);
val &= ~0xFFFFFF;
val = Get_NB32_DCT(dev, dct, 0x7c);
val &= ~0x00ffffff;
val |= EMRS;
val |= 1 << SendMrsCmd;
Set_NB32(dev, reg_off + 0x7C, val);
Set_NB32_DCT(dev, dct, 0x7c, val);
do {
val = Get_NB32(dev, reg_off + 0x7C);
val = Get_NB32_DCT(dev, dct, 0x7c);
} while (val & (1 << SendMrsCmd));
}
static u32 mct_MR2(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct, u32 MrsChipSel)
{
u32 reg_off = 0x100 * dct;
u32 dev = pDCTstat->dev_dct;
u32 dword, ret;
if (is_fam15h()) {
uint8_t package_type = mctGet_NVbits(NV_PACK_TYPE);
/* The formula for chip select number is: CS = dimm*2+rank */
uint8_t dimm = MrsChipSel / 2;
uint8_t rank = MrsChipSel % 2;
/* FIXME: These parameters should be configurable
* For now, err on the side of caution and enable automatic 2x refresh
* when the DDR temperature rises above the internal limits
*/
uint8_t force_2x_self_refresh = 0; /* ASR */
uint8_t auto_2x_self_refresh = 1; /* SRT */
ret = 0x80000;
ret |= (MrsChipSel << 21);
/* Set self refresh parameters */
ret |= (force_2x_self_refresh << 6);
ret |= (auto_2x_self_refresh << 7);
/* Obtain Tcwl, adjust, and set CWL with the adjusted value */
dword = Get_NB32_DCT(dev, dct, 0x20c) & 0x1f;
ret |= ((dword - 5) << 3);
/* Obtain and set RttWr */
ret |= (fam15_rttwr(pDCTstat, dct, dimm, rank, package_type) << 9);
} else {
ret = 0x20000;
ret |= MrsChipSel;
ret |= (MrsChipSel << 20);
/* program MrsAddress[5:3]=CAS write latency (CWL):
* based on F2x[1,0]84[Tcwl] */
dword = Get_NB32(dev, reg_off + 0x84);
dword = Get_NB32_DCT(dev, dct, 0x84);
dword = mct_AdjustSPDTimings(pMCTstat, pDCTstat, dword);
ret |= ((dword >> 20) & 7) << 3;
/* program MrsAddress[6]=auto self refresh method (ASR):
based on F2x[1,0]84[ASR]
program MrsAddress[7]=self refresh temperature range (SRT):
based on F2x[1,0]84[ASR and SRT] */
* based on F2x[1,0]84[ASR]
* program MrsAddress[7]=self refresh temperature range (SRT):
* based on F2x[1,0]84[ASR and SRT]
*/
ret |= ((dword >> 18) & 3) << 6;
/* program MrsAddress[10:9]=dynamic termination during writes (RTT_WR)
based on F2x[1,0]84[DramTermDyn] */
* based on F2x[1,0]84[DramTermDyn]
*/
ret |= ((dword >> 10) & 3) << 9;
}
return ret;
}
@@ -109,20 +309,28 @@ static u32 mct_MR2(struct MCTStatStruc *pMCTstat,
static u32 mct_MR3(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct, u32 MrsChipSel)
{
u32 reg_off = 0x100 * dct;
u32 dev = pDCTstat->dev_dct;
u32 dword, ret;
if (is_fam15h()) {
ret = 0xc0000;
ret |= (MrsChipSel << 21);
/* Program MPR and MPRLoc to 0 */
// ret |= 0x0; /* MPR */
// ret |= (0x0 << 2); /* MPRLoc */
} else {
ret = 0x30000;
ret |= MrsChipSel;
ret |= (MrsChipSel << 20);
/* program MrsAddress[1:0]=multi purpose register address location
(MPR Location):based on F2x[1,0]84[MprLoc]
program MrsAddress[2]=multi purpose register
(MPR):based on F2x[1,0]84[MprEn]
* (MPR Location):based on F2x[1,0]84[MprLoc]
* program MrsAddress[2]=multi purpose register
* (MPR):based on F2x[1,0]84[MprEn]
*/
dword = Get_NB32(dev, reg_off + 0x84);
dword = Get_NB32_DCT(dev, dct, 0x84);
ret |= (dword >> 24) & 7;
}
return ret;
}
@@ -130,23 +338,67 @@ static u32 mct_MR3(struct MCTStatStruc *pMCTstat,
static u32 mct_MR1(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct, u32 MrsChipSel)
{
u32 reg_off = 0x100 * dct;
u32 dev = pDCTstat->dev_dct;
u32 dword, ret;
if (is_fam15h()) {
uint8_t package_type = mctGet_NVbits(NV_PACK_TYPE);
/* Set defaults */
uint8_t qoff = 0; /* Enable output buffers */
uint8_t wrlvl = 0; /* Disable write levelling */
uint8_t tqds = 0;
uint8_t rttnom = 0;
uint8_t dic = 0;
uint8_t additive_latency = 0;
uint8_t dll_enable = 0;
ret = 0x40000;
ret |= (MrsChipSel << 21);
/* The formula for chip select number is: CS = dimm*2+rank */
uint8_t dimm = MrsChipSel / 2;
uint8_t rank = MrsChipSel % 2;
/* Determine if TQDS should be set */
if ((pDCTstat->Dimmx8Present & (1 << dimm))
&& (((dimm & 0x1)?(pDCTstat->Dimmx4Present&0x55):(pDCTstat->Dimmx4Present&0xaa)) != 0x0)
&& (pDCTstat->Status & (1 << SB_LoadReduced)))
tqds = 1;
/* Obtain RttNom */
rttnom = fam15_rttnom(pDCTstat, dct, dimm, rank, package_type);
/* Obtain DIC */
dic = fam15_dimm_dic(pDCTstat, dct, dimm, rank, package_type);
/* Load data into MRS word */
ret |= (qoff & 0x1) << 12;
ret |= (tqds & 0x1) << 11;
ret |= ((rttnom & 0x4) >> 2) << 9;
ret |= ((rttnom & 0x2) >> 1) << 6;
ret |= ((rttnom & 0x1) >> 0) << 2;
ret |= (wrlvl & 0x1) << 7;
ret |= ((dic & 0x2) >> 1) << 5;
ret |= ((dic & 0x1) >> 0) << 1;
ret |= (additive_latency & 0x3) << 3;
ret |= (dll_enable & 0x1);
} else {
ret = 0x10000;
ret |= MrsChipSel;
ret |= (MrsChipSel << 20);
/* program MrsAddress[5,1]=output driver impedance control (DIC):
* based on F2x[1,0]84[DrvImpCtrl] */
dword = Get_NB32(dev, reg_off + 0x84);
* based on F2x[1,0]84[DrvImpCtrl]
*/
dword = Get_NB32_DCT(dev, dct, 0x84);
if (dword & (1 << 3))
ret |= 1 << 5;
if (dword & (1 << 2))
ret |= 1 << 1;
/* program MrsAddress[9,6,2]=nominal termination resistance of ODT (RTT):
based on F2x[1,0]84[DramTerm] */
* based on F2x[1,0]84[DramTerm]
*/
if (!(pDCTstat->Status & (1 << SB_Registered))) {
if (dword & (1 << 9))
ret |= 1 << 9;
@@ -159,7 +411,7 @@ static u32 mct_MR1(struct MCTStatStruc *pMCTstat,
}
/* program MrsAddress[11]=TDQS: based on F2x[1,0]94[RDqsEn] */
if (Get_NB32(dev, reg_off + 0x94) & (1 << RDqsEn)) {
if (Get_NB32_DCT(dev, dct, 0x94) & (1 << RDqsEn)) {
u8 bit;
/* Set TDQS=1b for x8 DIMM, TDQS=0b for x4 DIMM, when mixed x8 & x4 */
bit = (ret >> 21) << 1;
@@ -172,6 +424,7 @@ static u32 mct_MR1(struct MCTStatStruc *pMCTstat,
/* program MrsAddress[12]=QOFF: based on F2x[1,0]84[Qoff] */
if (dword & (1 << 13))
ret |= 1 << 12;
}
return ret;
}
@@ -179,16 +432,95 @@ static u32 mct_MR1(struct MCTStatStruc *pMCTstat,
static u32 mct_MR0(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat, u8 dct, u32 MrsChipSel)
{
u32 reg_off = 0x100 * dct;
u32 dev = pDCTstat->dev_dct;
u32 dword, ret, dword2;
if (is_fam15h()) {
ret = 0x00000;
ret |= MrsChipSel;
ret |= (MrsChipSel << 21);
/* Set defaults */
uint8_t ppd = 0;
uint8_t wr_ap = 0;
uint8_t dll_reset = 1;
uint8_t test_mode = 0;
uint8_t cas_latency = 0;
uint8_t read_burst_type = 1;
uint8_t burst_length = 0;
/* Obtain PchgPDModeSel */
dword = Get_NB32_DCT(dev, dct, 0x84);
ppd = (dword >> 23) & 0x1;
/* Obtain Twr */
dword = Get_NB32_DCT(dev, dct, 0x22c) & 0x1f;
/* Calculate wr_ap (Fam15h BKDG v3.14 Table 82) */
if (dword == 0x10)
wr_ap = 0x0;
else if (dword == 0x5)
wr_ap = 0x1;
else if (dword == 0x6)
wr_ap = 0x2;
else if (dword == 0x7)
wr_ap = 0x3;
else if (dword == 0x8)
wr_ap = 0x4;
else if (dword == 0xa)
wr_ap = 0x5;
else if (dword == 0xc)
wr_ap = 0x6;
else if (dword == 0xe)
wr_ap = 0x7;
/* Obtain Tcl */
dword = Get_NB32_DCT(dev, dct, 0x200) & 0x1f;
/* Calculate cas_latency (Fam15h BKDG v3.14 Table 83) */
if (dword == 0x5)
cas_latency = 0x2;
else if (dword == 0x6)
cas_latency = 0x4;
else if (dword == 0x7)
cas_latency = 0x6;
else if (dword == 0x8)
cas_latency = 0x8;
else if (dword == 0x9)
cas_latency = 0xa;
else if (dword == 0xa)
cas_latency = 0xc;
else if (dword == 0xb)
cas_latency = 0xe;
else if (dword == 0xc)
cas_latency = 0x1;
else if (dword == 0xd)
cas_latency = 0x3;
else if (dword == 0xe)
cas_latency = 0x5;
else if (dword == 0xf)
cas_latency = 0x7;
else if (dword == 0x10)
cas_latency = 0x9;
/* Obtain BurstCtrl */
burst_length = Get_NB32_DCT(dev, dct, 0x84) & 0x3;
/* Load data into MRS word */
ret |= (ppd & 0x1) << 12;
ret |= (wr_ap & 0x3) << 9;
ret |= (dll_reset & 0x1) << 8;
ret |= (test_mode & 0x1) << 7;
ret |= ((cas_latency & 0xe) >> 1) << 4;
ret |= ((cas_latency & 0x1) >> 0) << 2;
ret |= (read_burst_type & 0x1) << 3;
ret |= (burst_length & 0x3);
} else {
ret = 0x00000;
ret |= (MrsChipSel << 20);
/* program MrsAddress[1:0]=burst length and control method
(BL):based on F2x[1,0]84[BurstCtrl] */
dword = Get_NB32(dev, reg_off + 0x84);
dword = Get_NB32_DCT(dev, dct, 0x84);
ret |= dword & 3;
/* program MrsAddress[3]=1 (BT):interleaved */
@@ -196,7 +528,7 @@ static u32 mct_MR0(struct MCTStatStruc *pMCTstat,
/* program MrsAddress[6:4,2]=read CAS latency
(CL):based on F2x[1,0]88[Tcl] */
dword2 = Get_NB32(dev, reg_off + 0x88);
dword2 = Get_NB32_DCT(dev, dct, 0x88);
ret |= (dword2 & 0x7) << 4; /* F2x88[2:0] to MrsAddress[6:4] */
ret |= ((dword2 & 0x8) >> 3) << 2; /* F2x88[3] to MrsAddress[2] */
@@ -211,28 +543,28 @@ static u32 mct_MR0(struct MCTStatStruc *pMCTstat,
/* program MrsAddress[8]=1 (DLL):DLL reset
just issue DLL reset at first time */
ret |= 1 << 8;
}
return ret;
}
static void mct_SendZQCmd(struct DCTStatStruc *pDCTstat, u8 dct)
{
u32 reg_off = 0x100 * dct;
u32 dev = pDCTstat->dev_dct;
u32 dword;
/*1.Program MrsAddress[10]=1
2.Set SendZQCmd=1
*/
dword = Get_NB32(dev, reg_off + 0x7C);
dword = Get_NB32_DCT(dev, dct, 0x7C);
dword &= ~0xFFFFFF;
dword |= 1 << 10;
dword |= 1 << SendZQCmd;
Set_NB32(dev, reg_off + 0x7C, dword);
Set_NB32_DCT(dev, dct, 0x7C, dword);
/* Wait for SendZQCmd=0 */
do {
dword = Get_NB32(dev, reg_off + 0x7C);
dword = Get_NB32_DCT(dev, dct, 0x7C);
} while (dword & (1 << SendZQCmd));
/* 4.Wait 512 MEMCLKs */
@@ -244,31 +576,30 @@ void mct_DramInit_Sw_D(struct MCTStatStruc *pMCTstat,
{
u8 MrsChipSel;
u32 dword;
u32 reg_off = 0x100 * dct;
u32 dev = pDCTstat->dev_dct;
if (pDCTstat->DIMMAutoSpeed == 4) {
if (pDCTstat->DIMMAutoSpeed == mhz_to_memclk_config(mctGet_NVbits(NV_MIN_MEMCLK))) {
/* 3.Program F2x[1,0]7C[EnDramInit]=1 */
dword = Get_NB32(dev, reg_off + 0x7C);
dword = Get_NB32_DCT(dev, dct, 0x7c);
dword |= 1 << EnDramInit;
Set_NB32(dev, reg_off + 0x7C, dword);
Set_NB32_DCT(dev, dct, 0x7c, dword);
mct_DCTAccessDone(pDCTstat, dct);
/* 4.wait 200us */
mct_Wait(40000);
/* 5.On revision C processors, program F2x[1, 0]7C[DeassertMemRstX] = 1. */
dword = Get_NB32(dev, reg_off + 0x7C);
/* 5.Program F2x[1, 0]7C[DeassertMemRstX] = 1. */
dword = Get_NB32_DCT(dev, dct, 0x7c);
dword |= 1 << DeassertMemRstX;
Set_NB32(dev, reg_off + 0x7C, dword);
Set_NB32_DCT(dev, dct, 0x7c, dword);
/* 6.wait 500us */
mct_Wait(200000);
/* 7.Program F2x[1,0]7C[AssertCke]=1 */
dword = Get_NB32(dev, reg_off + 0x7C);
dword = Get_NB32_DCT(dev, dct, 0x7c);
dword |= 1 << AssertCke;
Set_NB32(dev, reg_off + 0x7C, dword);
Set_NB32_DCT(dev, dct, 0x7c, dword);
/* 8.wait 360ns */
mct_Wait(80);
@@ -277,6 +608,13 @@ void mct_DramInit_Sw_D(struct MCTStatStruc *pMCTstat,
* must be done for each chip select pair */
if (pDCTstat->Status & (1 << SB_Registered))
mct_DramControlReg_Init_D(pMCTstat, pDCTstat, dct);
/* The following steps are performed with load reduced DIMMs only and
* must be done for each DIMM */
// if (pDCTstat->Status & (1 << SB_LoadReduced))
/* TODO
* Implement LRDIMM configuration
*/
}
/* The following steps are performed once for unbuffered DIMMs and once for each
@@ -285,23 +623,23 @@ void mct_DramInit_Sw_D(struct MCTStatStruc *pMCTstat,
if (pDCTstat->CSPresent & (1 << MrsChipSel)) {
u32 EMRS;
/* 13.Send EMRS(2) */
EMRS = mct_MR2(pMCTstat, pDCTstat, dct, MrsChipSel << 20);
EMRS = mct_MR2(pMCTstat, pDCTstat, dct, MrsChipSel);
EMRS = swapAddrBits(pDCTstat, EMRS, MrsChipSel, dct);
mct_SendMrsCmd(pDCTstat, dct, EMRS);
/* 14.Send EMRS(3). Ordinarily at this time, MrsAddress[2:0]=000b */
EMRS= mct_MR3(pMCTstat, pDCTstat, dct, MrsChipSel << 20);
EMRS= mct_MR3(pMCTstat, pDCTstat, dct, MrsChipSel);
EMRS = swapAddrBits(pDCTstat, EMRS, MrsChipSel, dct);
mct_SendMrsCmd(pDCTstat, dct, EMRS);
/* 15.Send EMRS(1) */
EMRS= mct_MR1(pMCTstat, pDCTstat, dct, MrsChipSel << 20);
EMRS= mct_MR1(pMCTstat, pDCTstat, dct, MrsChipSel);
EMRS = swapAddrBits(pDCTstat, EMRS, MrsChipSel, dct);
mct_SendMrsCmd(pDCTstat, dct, EMRS);
/* 16.Send MRS with MrsAddress[8]=1(reset the DLL) */
EMRS= mct_MR0(pMCTstat, pDCTstat, dct, MrsChipSel << 20);
EMRS= mct_MR0(pMCTstat, pDCTstat, dct, MrsChipSel);
EMRS = swapAddrBits(pDCTstat, EMRS, MrsChipSel, dct);
mct_SendMrsCmd(pDCTstat, dct, EMRS);
if (pDCTstat->DIMMAutoSpeed == 4)
if (pDCTstat->DIMMAutoSpeed == mhz_to_memclk_config(mctGet_NVbits(NV_MIN_MEMCLK)))
if (!(pDCTstat->Status & (1 << SB_Registered)))
break; /* For UDIMM, only send MR commands once per channel */
}
@@ -310,16 +648,15 @@ void mct_DramInit_Sw_D(struct MCTStatStruc *pMCTstat,
MrsChipSel ++;
}
mct_Wait(100000);
if (pDCTstat->DIMMAutoSpeed == 4) {
if (pDCTstat->DIMMAutoSpeed == mhz_to_memclk_config(mctGet_NVbits(NV_MIN_MEMCLK))) {
/* 17.Send two ZQCL commands */
mct_SendZQCmd(pDCTstat, dct);
mct_SendZQCmd(pDCTstat, dct);
/* 18.Program F2x[1,0]7C[EnDramInit]=0 */
dword = Get_NB32(dev, reg_off + 0x7C);
dword = Get_NB32_DCT(dev, dct, 0x7C);
dword &= ~(1 << EnDramInit);
Set_NB32(dev, reg_off + 0x7C, dword);
Set_NB32_DCT(dev, dct, 0x7C, dword);
mct_DCTAccessDone(pDCTstat, dct);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -17,8 +17,14 @@
u8 mct_checkNumberOfDqsRcvEn_1Pass(u8 pass)
{
u8 ret = 1;
if (is_fam15h()) {
/* Fam15h needs two passes */
ret = 1;
} else {
if (pass == SecondPass)
ret = 0;
}
return ret;
}

View File

@@ -214,12 +214,12 @@ static void mct_setMaxRdLatTrnVal_D(struct DCTStatStruc *pDCTstat,
}
dev = pDCTstat->dev_dct;
reg = 0x78 + Channel * 0x100;
val = Get_NB32(dev, reg);
reg = 0x78;
val = Get_NB32_DCT(dev, Channel, reg);
val &= ~(0x3ff<<22);
val |= MaxRdLatVal<<22;
/* program MaxRdLatency to correspond with current delay */
Set_NB32(dev, reg, val);
Set_NB32_DCT(dev, Channel, reg, val);
}
static u8 CompareMaxRdLatTestPattern_D(u32 pattern_buf, u32 addr)
@@ -316,30 +316,28 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat,
u32 valx;
u32 valxx;
u32 index_reg;
u32 reg_off;
u32 dev;
if(pDCTstat->GangedMode)
Channel = 0;
index_reg = 0x98 + 0x100 * Channel;
index_reg = 0x98;
reg_off = 0x100 * Channel;
dev = pDCTstat->dev_dct;
/* Multiply the CAS Latency by two to get a number of 1/2 MEMCLKs units.*/
val = Get_NB32(dev, 0x88 + reg_off);
val = Get_NB32_DCT(dev, Channel, 0x88);
SubTotal = ((val & 0x0f) + 1) << 1; /* SubTotal is 1/2 Memclk unit */
/* If registered DIMMs are being used then add 1 MEMCLK to the sub-total*/
val = Get_NB32(dev, 0x90 + reg_off);
val = Get_NB32_DCT(dev, Channel, 0x90);
if(!(val & (1 << UnBuffDimm)))
SubTotal += 2;
/*If the address prelaunch is setup for 1/2 MEMCLKs then add 1,
* else add 2 to the sub-total. if (AddrCmdSetup || CsOdtSetup
* || CkeSetup) then K := K + 2; */
val = Get_NB32_index_wait(dev, index_reg, 0x04);
val = Get_NB32_index_wait_DCT(dev, Channel, index_reg, 0x04);
if(!(val & 0x00202020))
SubTotal += 1;
else
@@ -347,7 +345,7 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat,
/* If the F2x[1, 0]78[RdPtrInit] field is 4, 5, 6 or 7 MEMCLKs,
* then add 4, 3, 2, or 1 MEMCLKs, respectively to the sub-total. */
val = Get_NB32(dev, 0x78 + reg_off);
val = Get_NB32_DCT(dev, Channel, 0x78);
SubTotal += 8 - (val & 0x0f);
/* Convert bits 7-5 (also referred to as the course delay) of the current
@@ -363,7 +361,7 @@ u8 mct_GetStartMaxRdLat_D(struct MCTStatStruc *pMCTstat,
/*New formula:
SubTotal *= 3*(Fn2xD4[NBFid]+4)/(3+Fn2x94[MemClkFreq])/2 */
val = Get_NB32(dev, 0x94 + reg_off);
val = Get_NB32_DCT(dev, Channel, 0x94);
/* SubTotal div 4 to scale 1/4 MemClk back to MemClk */
val &= 7;
if (val >= 3) {

View File

@@ -79,6 +79,12 @@ void PrepareC_DCT(struct MCTStatStruc *pMCTstat,
pDCTstat->C_DCTPtr[dct]->Status[DCT_STATUS_REGISTERED] = 0;
}
if (pDCTstat->Status & (1 << SB_LoadReduced)) {
pDCTstat->C_DCTPtr[dct]->Status[DCT_STATUS_LOAD_REDUCED] = 1;
} else {
pDCTstat->C_DCTPtr[dct]->Status[DCT_STATUS_LOAD_REDUCED] = 0;
}
pDCTstat->C_DCTPtr[dct]->RegMan1Present = pDCTstat->RegMan1Present;
for (dimm = 0; dimm < MAX_TOTAL_DIMMS; dimm++) {
@@ -99,13 +105,13 @@ void EnableZQcalibration(struct MCTStatStruc *pMCTstat, struct DCTStatStruc *pDC
{
u32 val;
val = Get_NB32(pDCTstat->dev_dct, 0x94);
val = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x94);
val |= 1 << 11;
Set_NB32(pDCTstat->dev_dct, 0x94, val);
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x94, val);
val = Get_NB32(pDCTstat->dev_dct, 0x94 + 0x100);
val = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x94);
val |= 1 << 11;
Set_NB32(pDCTstat->dev_dct, 0x94 + 0x100, val);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x94, val);
}
void DisableZQcalibration(struct MCTStatStruc *pMCTstat,
@@ -113,15 +119,15 @@ void DisableZQcalibration(struct MCTStatStruc *pMCTstat,
{
u32 val;
val = Get_NB32(pDCTstat->dev_dct, 0x94);
val = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x94);
val &= ~(1 << 11);
val &= ~(1 << 10);
Set_NB32(pDCTstat->dev_dct, 0x94, val);
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x94, val);
val = Get_NB32(pDCTstat->dev_dct, 0x94 + 0x100);
val = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x94);
val &= ~(1 << 11);
val &= ~(1 << 10);
Set_NB32(pDCTstat->dev_dct, 0x94 + 0x100, val);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x94, val);
}
static void EnterSelfRefresh(struct MCTStatStruc *pMCTstat,
@@ -138,23 +144,23 @@ static void EnterSelfRefresh(struct MCTStatStruc *pMCTstat,
/* Program F2x[1, 0]90[EnterSelfRefresh]=1. */
if (DCT0Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x90);
val = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x90);
val |= 1 << EnterSelfRef;
Set_NB32(pDCTstat->dev_dct, 0x90, val);
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x90, val);
}
if (DCT1Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x90 + 0x100);
val = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x90);
val |= 1 << EnterSelfRef;
Set_NB32(pDCTstat->dev_dct, 0x90 + 0x100, val);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x90, val);
}
/* Wait until the hardware resets F2x[1, 0]90[EnterSelfRefresh]=0. */
if (DCT0Present)
do {
val = Get_NB32(pDCTstat->dev_dct, 0x90);
val = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x90);
} while (val & (1 <<EnterSelfRef));
if (DCT1Present)
do {
val = Get_NB32(pDCTstat->dev_dct, 0x90 + 0x100);
val = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x90);
} while (val & (1 <<EnterSelfRef));
}
@@ -164,8 +170,11 @@ static void EnterSelfRefresh(struct MCTStatStruc *pMCTstat,
static void ChangeMemClk(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat)
{
u8 DCT0Present, DCT1Present;
u32 val;
uint8_t DCT0Present;
uint8_t DCT1Present;
uint32_t dword;
uint32_t mask;
uint32_t offset;
DCT0Present = pDCTstat->DIMMValidDCT[0];
if (pDCTstat->GangedMode)
@@ -173,76 +182,134 @@ static void ChangeMemClk(struct MCTStatStruc *pMCTstat,
else
DCT1Present = pDCTstat->DIMMValidDCT[1];
/* Program F2x[1, 0]90[EnterSelfRefresh]=1. */
if (is_fam15h()) {
/* Program D18F2x9C_x0D0F_E006_dct[1:0][PllLockTime] = 0x190 */
if (DCT0Present) {
val = Get_NB32_index_wait(pDCTstat->dev_dct, 0x98, 8);
val |= 1 << DisAutoComp;
Set_NB32_index_wait(pDCTstat->dev_dct, 0x98, 8, val);
dword = Get_NB32_index_wait_DCT(pDCTstat->dev_dct, 0, 0x98, 0x0d0fe006);
dword &= ~(0x0000ffff);
dword |= 0x00000190;
Set_NB32_index_wait_DCT(pDCTstat->dev_dct, 0, 0x98, 0x0d0fe006, dword);
}
if (DCT1Present) {
val = Get_NB32_index_wait(pDCTstat->dev_dct, 0x98 + 0x100, 8);
val |= 1 << DisAutoComp;
Set_NB32_index_wait(pDCTstat->dev_dct, 0x98 + 0x100, 8, val);
dword = Get_NB32_index_wait_DCT(pDCTstat->dev_dct, 1, 0x98, 0x0d0fe006);
dword &= ~(0x0000ffff);
dword |= 0x00000190;
Set_NB32_index_wait_DCT(pDCTstat->dev_dct, 1, 0x98, 0x0d0fe006, dword);
}
} else {
/* Program F2x[1, 0]9C[DisAutoComp]=1. */
if (DCT0Present) {
dword = Get_NB32_index_wait_DCT(pDCTstat->dev_dct, 0, 0x98, 8);
dword |= 1 << DisAutoComp;
Set_NB32_index_wait_DCT(pDCTstat->dev_dct, 0, 0x98, 8, dword);
mct_Wait(100); /* Wait for 5us */
}
if (DCT1Present) {
dword = Get_NB32_index_wait_DCT(pDCTstat->dev_dct, 1, 0x98, 8);
dword |= 1 << DisAutoComp;
Set_NB32_index_wait_DCT(pDCTstat->dev_dct, 1, 0x98, 8, dword);
mct_Wait(100); /* Wait for 5us */
}
}
/* Program F2x[1, 0]94[MemClkFreqVal] = 0. */
if (DCT0Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x94);
val &= ~(1 << MemClkFreqVal);
Set_NB32(pDCTstat->dev_dct, 0x94, val);
dword = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x94);
dword &= ~(1 << MemClkFreqVal);
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x94, dword);
}
if (DCT1Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x94 + 0x100);
val &= ~(1 << MemClkFreqVal);
Set_NB32(pDCTstat->dev_dct, 0x94 + 0x100, val);
dword = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x94);
dword &= ~(1 << MemClkFreqVal);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x94, dword);
}
/* Program F2x[1, 0]94[MemClkFreq] to specify the target MEMCLK frequency. */
if (is_fam15h()) {
offset = 0x0;
mask = 0x1f;
} else {
offset = 0x1;
mask = 0x7;
}
if (DCT0Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x94);
val &= 0xFFFFFFF8;
val |= pDCTstat->TargetFreq - 1;
Set_NB32(pDCTstat->dev_dct, 0x94, val);
dword = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x94);
dword &= ~mask;
dword |= (pDCTstat->TargetFreq - offset) & mask;
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x94, dword);
}
if (DCT1Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x94 + 0x100);
val &= 0xFFFFFFF8;
val |= pDCTstat->TargetFreq - 1;
Set_NB32(pDCTstat->dev_dct, 0x94 + 0x100, val);
dword = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x94);
dword &= ~mask;
dword |= (pDCTstat->TargetFreq - offset) & mask;
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x94, dword);
}
if (is_fam15h()) {
if (DCT0Present) {
mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 0);
set_2t_configuration(pMCTstat, pDCTstat, 0);
mct_BeforePlatformSpec(pMCTstat, pDCTstat, 0);
mct_PlatformSpec(pMCTstat, pDCTstat, 0);
}
if (DCT1Present) {
mctGet_PS_Cfg_D(pMCTstat, pDCTstat, 1);
set_2t_configuration(pMCTstat, pDCTstat, 1);
mct_BeforePlatformSpec(pMCTstat, pDCTstat, 1);
mct_PlatformSpec(pMCTstat, pDCTstat, 1);
}
}
/* Program F2x[1, 0]94[MemClkFreqVal] = 1. */
if (DCT0Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x94);
val |= 1 << MemClkFreqVal;
Set_NB32(pDCTstat->dev_dct, 0x94, val);
dword = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x94);
dword |= 1 << MemClkFreqVal;
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x94, dword);
}
if (DCT1Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x94 + 0x100);
val |= 1 << MemClkFreqVal;
Set_NB32(pDCTstat->dev_dct, 0x94 + 0x100, val);
dword = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x94);
dword |= 1 << MemClkFreqVal;
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x94, dword);
}
/* Wait until F2x[1, 0]94[FreqChgInProg]=0. */
if (DCT0Present)
do {
val = Get_NB32(pDCTstat->dev_dct, 0x94);
} while (val & (1 << FreqChgInProg));
dword = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x94);
} while (dword & (1 << FreqChgInProg));
if (DCT1Present)
do {
val = Get_NB32(pDCTstat->dev_dct, 0x94 + 0x100);
} while (val & (1 << FreqChgInProg));
dword = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x94);
} while (dword & (1 << FreqChgInProg));
/* Program F2x[1, 0]94[MemClkFreqVal] = 0. */
if (is_fam15h()) {
/* Program D18F2x9C_x0D0F_E006_dct[1:0][PllLockTime] = 0xf */
if (DCT0Present) {
val = Get_NB32_index_wait(pDCTstat->dev_dct, 0x98, 8);
val &= ~(1 << DisAutoComp);
Set_NB32_index_wait(pDCTstat->dev_dct, 0x98, 8, val);
dword = Get_NB32_index_wait_DCT(pDCTstat->dev_dct, 0, 0x98, 0x0d0fe006);
dword &= ~(0x0000ffff);
dword |= 0x0000000f;
Set_NB32_index_wait_DCT(pDCTstat->dev_dct, 0, 0x98, 0x0d0fe006, dword);
}
if (DCT1Present) {
val = Get_NB32_index_wait(pDCTstat->dev_dct, 0x98 + 0x100, 8);
val &= ~(1 << DisAutoComp);
Set_NB32_index_wait(pDCTstat->dev_dct, 0x98 + 0x100, 8, val);
dword = Get_NB32_index_wait_DCT(pDCTstat->dev_dct, 1, 0x98, 0x0d0fe006);
dword &= ~(0x0000ffff);
dword |= 0x0000000f;
Set_NB32_index_wait_DCT(pDCTstat->dev_dct, 1, 0x98, 0x0d0fe006, dword);
}
} else {
/* Program F2x[1, 0]9C[DisAutoComp] = 0. */
if (DCT0Present) {
dword = Get_NB32_index_wait_DCT(pDCTstat->dev_dct, 0, 0x98, 8);
dword &= ~(1 << DisAutoComp);
Set_NB32_index_wait_DCT(pDCTstat->dev_dct, 0, 0x98, 8, dword);
mct_Wait(15000); /* Wait for 750us */
}
if (DCT1Present) {
dword = Get_NB32_index_wait_DCT(pDCTstat->dev_dct, 1, 0x98, 8);
dword &= ~(1 << DisAutoComp);
Set_NB32_index_wait_DCT(pDCTstat->dev_dct, 1, 0x98, 8, dword);
mct_Wait(15000); /* Wait for 750us */
}
}
}
@@ -263,29 +330,46 @@ static void ExitSelfRefresh(struct MCTStatStruc *pMCTstat,
/* Program F2x[1, 0]90[ExitSelfRef]=1 for both DCTs. */
if (DCT0Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x90);
val = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x90);
val |= 1 << ExitSelfRef;
Set_NB32(pDCTstat->dev_dct, 0x90, val);
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x90, val);
}
if (DCT1Present) {
val = Get_NB32(pDCTstat->dev_dct, 0x90 + 0x100);
val = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x90);
val |= 1 << ExitSelfRef;
Set_NB32(pDCTstat->dev_dct, 0x90 + 0x100, val);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x90, val);
}
/* Wait until the hardware resets F2x[1, 0]90[ExitSelfRef]=0. */
if (DCT0Present)
do {
val = Get_NB32(pDCTstat->dev_dct, 0x90);
val = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x90);
} while (val & (1 << ExitSelfRef));
if (DCT1Present)
do {
val = Get_NB32(pDCTstat->dev_dct, 0x90 + 0x100);
val = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x90);
} while (val & (1 << ExitSelfRef));
}
void SetTargetFreq(struct MCTStatStruc *pMCTstat,
struct DCTStatStruc *pDCTstat)
{
uint32_t dword;
uint8_t package_type = mctGet_NVbits(NV_PACK_TYPE);
if (is_fam15h()) {
/* Program F2x[1, 0]90[DisDllShutDownSR]=1. */
if (pDCTstat->DIMMValidDCT[0]) {
dword = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x90);
dword |= (0x1 << 27);
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x90, dword);
}
if (pDCTstat->DIMMValidDCT[1]) {
dword = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x90);
dword |= (0x1 << 27);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x90, dword);
}
}
/* Program F2x[1,0]90[EnterSelfRefresh]=1.
* Wait until the hardware resets F2x[1,0]90[EnterSelfRefresh]=0.
*/
@@ -301,11 +385,38 @@ void SetTargetFreq(struct MCTStatStruc *pMCTstat,
*/
ChangeMemClk(pMCTstat, pDCTstat);
if (is_fam15h()) {
uint8_t dct;
for (dct = 0; dct < 2; dct++) {
if (pDCTstat->DIMMValidDCT[dct]) {
phyAssistedMemFnceTraining(pMCTstat, pDCTstat);
InitPhyCompensation(pMCTstat, pDCTstat, dct);
}
}
}
/* Program F2x[1,0]90[ExitSelfRef]=1 for both DCTs.
* Wait until the hardware resets F2x[1, 0]90[ExitSelfRef]=0.
*/
ExitSelfRefresh(pMCTstat, pDCTstat);
if (is_fam15h()) {
if ((package_type == PT_C3) || (package_type == PT_GR)) {
/* Socket C32 or G34 */
/* Program F2x[1, 0]90[DisDllShutDownSR]=0. */
if (pDCTstat->DIMMValidDCT[0]) {
dword = Get_NB32_DCT(pDCTstat->dev_dct, 0, 0x90);
dword &= ~(0x1 << 27);
Set_NB32_DCT(pDCTstat->dev_dct, 0, 0x90, dword);
}
if (pDCTstat->DIMMValidDCT[1]) {
dword = Get_NB32_DCT(pDCTstat->dev_dct, 1, 0x90);
dword &= ~(0x1 << 27);
Set_NB32_DCT(pDCTstat->dev_dct, 1, 0x90, dword);
}
}
}
/* wait for 500 MCLKs after ExitSelfRef, 500*2.5ns=1250ns */
mct_Wait(250);
@@ -332,13 +443,13 @@ void SetTargetFreq(struct MCTStatStruc *pMCTstat,
static void Modify_OnDimmMirror(struct DCTStatStruc *pDCTstat, u8 dct, u8 set)
{
u32 val;
u32 reg_off = dct * 0x100 + 0x44;
while (reg_off < (dct * 0x100 + 0x60)) {
val = Get_NB32(pDCTstat->dev_dct, reg_off);
u32 reg = 0x44;
while (reg < 0x60) {
val = Get_NB32_DCT(pDCTstat->dev_dct, dct, reg);
if (val & (1 << CSEnable))
set ? (val |= 1 << onDimmMirror) : (val &= ~(1<<onDimmMirror));
Set_NB32(pDCTstat->dev_dct, reg_off, val);
reg_off += 8;
Set_NB32_DCT(pDCTstat->dev_dct, dct, reg, val);
reg += 8;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2010 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -130,26 +131,50 @@ static u32 get_Bits(sDCTStruct *pDCTData,
u16 offset, u8 low, u8 high)
{
u32 temp;
uint32_t dword;
/* ASSERT(node < MAX_NODES); */
if (dct == BOTH_DCTS)
{
/* Registers exist on DCT0 only */
if (is_fam15h())
{
/* Select DCT 0 */
AmdMemPCIRead(MAKE_SBDFO(0,0,24+node,1,0x10c), &dword);
dword &= ~0x1;
AmdMemPCIWrite(MAKE_SBDFO(0,0,24+node,1,0x10c), &dword);
}
AmdMemPCIReadBits(MAKE_SBDFO(0,0,24+node,func,offset), high, low, &temp);
}
else
{
if (is_fam15h())
{
/* Select DCT */
AmdMemPCIRead(MAKE_SBDFO(0,0,24+node,1,0x10c), &dword);
dword &= ~0x1;
dword |= (dct & 0x1);
AmdMemPCIWrite(MAKE_SBDFO(0,0,24+node,1,0x10c), &dword);
/* Read from the selected DCT */
AmdMemPCIReadBits(MAKE_SBDFO(0,0,24+node,func,offset), high, low, &temp);
}
else
{
if (dct == 1)
{
/* Write to dct 1 */
/* Read from dct 1 */
offset += 0x100;
AmdMemPCIReadBits(MAKE_SBDFO(0,0,24+node,func,offset), high, low, &temp);
}
else
{
/* Write to dct 0 */
/* Read from dct 0 */
AmdMemPCIReadBits(MAKE_SBDFO(0,0,24+node,func,offset), high, low, &temp);
}
}
}
return temp;
}
@@ -180,11 +205,34 @@ static void set_Bits(sDCTStruct *pDCTData,
u16 offset, u8 low, u8 high, u32 value)
{
u32 temp;
uint32_t dword;
temp = value;
if (dct == BOTH_DCTS)
{
/* Registers exist on DCT0 only */
if (is_fam15h())
{
/* Select DCT 0 */
AmdMemPCIRead(MAKE_SBDFO(0,0,24+node,1,0x10c), &dword);
dword &= ~0x1;
AmdMemPCIWrite(MAKE_SBDFO(0,0,24+node,1,0x10c), &dword);
}
AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+node,func,offset), high, low, &temp);
}
else
{
if (is_fam15h())
{
/* Select DCT */
AmdMemPCIRead(MAKE_SBDFO(0,0,24+node,1,0x10c), &dword);
dword &= ~0x1;
dword |= (dct & 0x1);
AmdMemPCIWrite(MAKE_SBDFO(0,0,24+node,1,0x10c), &dword);
/* Write to the selected DCT */
AmdMemPCIWriteBits(MAKE_SBDFO(0,0,24+node,func,offset), high, low, &temp);
}
else
@@ -202,6 +250,7 @@ static void set_Bits(sDCTStruct *pDCTData,
}
}
}
}
/*-------------------------------------------------
* u32 get_ADD_DCT_Bits(DCTStruct *DCTData,u8 DCT,u8 Node,u8 func,

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2010 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -30,6 +31,7 @@
/* STATUS Definition */
#define DCT_STATUS_REGISTERED 3 /* Registered DIMMs support */
#define DCT_STATUS_LOAD_REDUCED 4 /* Load-Reduced DIMMs support */
#define DCT_STATUS_OnDimmMirror 24 /* OnDimmMirror support */
/* PCI Defintions */
@@ -74,12 +76,18 @@
#define SendMrsCmd 26
#define Qoff 12
#define MRS_Level 7
#define MrsAddressStart 0
#define MrsAddressEnd 15
#define MrsBankStart 16
#define MrsBankEnd 18
#define MrsChipSelStart 20
#define MrsChipSelEnd 22
#define MrsAddressStartFam10 0
#define MrsAddressEndFam10 15
#define MrsAddressStartFam15 0
#define MrsAddressEndFam15 17
#define MrsBankStartFam10 16
#define MrsBankEndFam10 18
#define MrsBankStartFam15 18
#define MrsBankEndFam15 20
#define MrsChipSelStartFam10 20
#define MrsChipSelEndFam10 22
#define MrsChipSelStartFam15 21
#define MrsChipSelEndFam15 23
#define ASR 18
#define SRT 19
#define DramTermDynStart 10
@@ -111,10 +119,32 @@ typedef struct _sDCTStruct
u8 DctTrain; /* Current DCT being trained */
u8 CurrDct; /* Current DCT number (0 or 1) */
u8 DctCSPresent; /* Current DCT CS mapping */
int32_t WLSeedGrossDelay[MAX_BYTE_LANES*MAX_LDIMMS]; /* Write Levelization Seed Gross Delay */
/* per byte Lane Per Logical DIMM*/
int32_t WLSeedFineDelay[MAX_BYTE_LANES*MAX_LDIMMS]; /* Write Levelization Seed Fine Delay */
/* per byte Lane Per Logical DIMM*/
int32_t WLSeedPreGrossDelay[MAX_BYTE_LANES*MAX_LDIMMS]; /* Write Levelization Seed Pre-Gross Delay */
/* per byte Lane Per Logical DIMM*/
u8 WLGrossDelay[MAX_BYTE_LANES*MAX_LDIMMS]; /* Write Levelization Gross Delay */
/* per byte Lane Per Logical DIMM*/
u8 WLFineDelay[MAX_BYTE_LANES*MAX_LDIMMS]; /* Write Levelization Fine Delay */
/* per byte Lane Per Logical DIMM*/
u8 WLGrossDelayFirstPass[MAX_BYTE_LANES*MAX_LDIMMS]; /* First-Pass Write Levelization Gross Delay */
/* per byte Lane Per Logical DIMM*/
u8 WLFineDelayFirstPass[MAX_BYTE_LANES*MAX_LDIMMS]; /* First-Pass Write Levelization Fine Delay */
/* per byte Lane Per Logical DIMM*/
u8 WLGrossDelayPrevPass[MAX_BYTE_LANES*MAX_LDIMMS]; /* Previous Pass Write Levelization Gross Delay */
/* per byte Lane Per Logical DIMM*/
u8 WLFineDelayPrevPass[MAX_BYTE_LANES*MAX_LDIMMS]; /* Previous Pass Write Levelization Fine Delay */
/* per byte Lane Per Logical DIMM*/
u8 WLGrossDelayFinalPass[MAX_BYTE_LANES*MAX_LDIMMS]; /* Final-Pass Write Levelization Gross Delay */
/* per byte Lane Per Logical DIMM*/
u8 WLFineDelayFinalPass[MAX_BYTE_LANES*MAX_LDIMMS]; /* Final-Pass Write Levelization Fine Delay */
/* per byte Lane Per Logical DIMM*/
int32_t WLCriticalGrossDelayFirstPass;
int32_t WLCriticalGrossDelayPrevPass;
int32_t WLCriticalGrossDelayFinalPass;
uint16_t WLPrevMemclkFreq;
u16 RegMan1Present;
u8 DimmPresent[MAX_TOTAL_DIMMS];/* Indicates which DIMMs are present */
/* from Total Number of DIMMs(per Node)*/
@@ -128,7 +158,7 @@ typedef struct _sDCTStruct
/* per byte lane */
u8 MaxDimmsInstalled; /* Max Dimms Installed for current DCT */
u8 DimmRanks[MAX_TOTAL_DIMMS]; /* Total Number of Ranks(per Dimm) */
u32 LogicalCPUID;
uint64_t LogicalCPUID;
u8 WLPass;
} sDCTStruct;

View File

@@ -14,6 +14,7 @@
*/
#include <string.h>
#include <arch/cpu.h>
#include <arch/acpi.h>
#include <cpu/x86/msr.h>
#include <device/device.h>
@@ -28,6 +29,23 @@
#define S3NV_FILE_NAME "s3nv"
#ifdef __RAMSTAGE__
static inline uint8_t is_fam15h(void)
{
uint8_t fam15h = 0;
uint32_t family;
family = cpuid_eax(0x80000001);
family = ((family & 0xf00000) >> 16) | ((family & 0xf00) >> 8);
if (family >= 0x6f)
/* Family 15h or later */
fam15h = 1;
return fam15h;
}
#endif
static ssize_t get_s3nv_file_offset(void);
ssize_t get_s3nv_file_offset(void)
@@ -43,6 +61,28 @@ ssize_t get_s3nv_file_offset(void)
return s3nv_region.region.offset;
}
static uint32_t read_config32_dct(device_t dev, uint8_t node, uint8_t dct, uint32_t reg) {
if (is_fam15h()) {
uint32_t dword;
#ifdef __PRE_RAM__
device_t dev_fn1 = PCI_DEV(0, 0x18 + node, 1);
#else
device_t dev_fn1 = dev_find_slot(0, PCI_DEVFN(0x18 + node, 1));
#endif
/* Select DCT */
dword = pci_read_config32(dev_fn1, 0x10c);
dword &= ~0x1;
dword |= (dct & 0x1);
pci_write_config32(dev_fn1, 0x10c, dword);
} else {
/* Apply offset */
reg += dct * 0x100;
}
return pci_read_config32(dev, reg);
}
static uint32_t read_amd_dct_index_register(device_t dev, uint32_t index_ctl_reg, uint32_t index)
{
uint32_t dword;
@@ -57,12 +97,54 @@ static uint32_t read_amd_dct_index_register(device_t dev, uint32_t index_ctl_reg
return dword;
}
static uint32_t read_amd_dct_index_register_dct(device_t dev, uint8_t node, uint8_t dct, uint32_t index_ctl_reg, uint32_t index)
{
if (is_fam15h()) {
uint32_t dword;
#ifdef __PRE_RAM__
device_t dev_fn1 = PCI_DEV(0, 0x18 + node, 1);
#else
device_t dev_fn1 = dev_find_slot(0, PCI_DEVFN(0x18 + node, 1));
#endif
/* Select DCT */
dword = pci_read_config32(dev_fn1, 0x10c);
dword &= ~0x1;
dword |= (dct & 0x1);
pci_write_config32(dev_fn1, 0x10c, dword);
} else {
/* Apply offset */
index_ctl_reg += dct * 0x100;
}
return read_amd_dct_index_register(dev, index_ctl_reg, index);
}
#ifdef __RAMSTAGE__
static uint64_t rdmsr_uint64_t(unsigned long index) {
msr_t msr = rdmsr(index);
return (((uint64_t)msr.hi) << 32) | ((uint64_t)msr.lo);
}
static uint32_t read_config32_dct_nbpstate(device_t dev, uint8_t node, uint8_t dct, uint8_t nb_pstate, uint32_t reg) {
uint32_t dword;
device_t dev_fn1 = dev_find_slot(0, PCI_DEVFN(0x18 + node, 1));
/* Select DCT */
dword = pci_read_config32(dev_fn1, 0x10c);
dword &= ~0x1;
dword |= (dct & 0x1);
pci_write_config32(dev_fn1, 0x10c, dword);
/* Select NB Pstate index */
dword = pci_read_config32(dev_fn1, 0x10c);
dword &= ~(0x3 << 4);
dword |= (nb_pstate & 0x3) << 4;
pci_write_config32(dev_fn1, 0x10c, dword);
return pci_read_config32(dev, reg);
}
void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_data)
{
uint8_t i;
@@ -78,7 +160,8 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
device_t dev_fn1 = dev_find_slot(0, PCI_DEVFN(0x18 + node, 1));
device_t dev_fn2 = dev_find_slot(0, PCI_DEVFN(0x18 + node, 2));
device_t dev_fn3 = dev_find_slot(0, PCI_DEVFN(0x18 + node, 3));
if ((!dev_fn1) || (!dev_fn2) || (!dev_fn3)) {
/* Test for node presence */
if ((!dev_fn1) || (pci_read_config32(dev_fn1, PCI_VENDOR_ID) == 0xffffffff)) {
persistent_data->node[node].node_present = 0;
continue;
}
@@ -91,22 +174,22 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
data->f2x110 = pci_read_config32(dev_fn2, 0x110);
/* Stage 2 */
data->f1x40 = pci_read_config32(dev_fn1, 0x40 + (0x100 * channel));
data->f1x44 = pci_read_config32(dev_fn1, 0x44 + (0x100 * channel));
data->f1x48 = pci_read_config32(dev_fn1, 0x48 + (0x100 * channel));
data->f1x4c = pci_read_config32(dev_fn1, 0x4c + (0x100 * channel));
data->f1x50 = pci_read_config32(dev_fn1, 0x50 + (0x100 * channel));
data->f1x54 = pci_read_config32(dev_fn1, 0x54 + (0x100 * channel));
data->f1x58 = pci_read_config32(dev_fn1, 0x58 + (0x100 * channel));
data->f1x5c = pci_read_config32(dev_fn1, 0x5c + (0x100 * channel));
data->f1x60 = pci_read_config32(dev_fn1, 0x60 + (0x100 * channel));
data->f1x64 = pci_read_config32(dev_fn1, 0x64 + (0x100 * channel));
data->f1x68 = pci_read_config32(dev_fn1, 0x68 + (0x100 * channel));
data->f1x6c = pci_read_config32(dev_fn1, 0x6c + (0x100 * channel));
data->f1x70 = pci_read_config32(dev_fn1, 0x70 + (0x100 * channel));
data->f1x74 = pci_read_config32(dev_fn1, 0x74 + (0x100 * channel));
data->f1x78 = pci_read_config32(dev_fn1, 0x78 + (0x100 * channel));
data->f1x7c = pci_read_config32(dev_fn1, 0x7c + (0x100 * channel));
data->f1x40 = read_config32_dct(dev_fn1, node, channel, 0x40);
data->f1x44 = read_config32_dct(dev_fn1, node, channel, 0x44);
data->f1x48 = read_config32_dct(dev_fn1, node, channel, 0x48);
data->f1x4c = read_config32_dct(dev_fn1, node, channel, 0x4c);
data->f1x50 = read_config32_dct(dev_fn1, node, channel, 0x50);
data->f1x54 = read_config32_dct(dev_fn1, node, channel, 0x54);
data->f1x58 = read_config32_dct(dev_fn1, node, channel, 0x58);
data->f1x5c = read_config32_dct(dev_fn1, node, channel, 0x5c);
data->f1x60 = read_config32_dct(dev_fn1, node, channel, 0x60);
data->f1x64 = read_config32_dct(dev_fn1, node, channel, 0x64);
data->f1x68 = read_config32_dct(dev_fn1, node, channel, 0x68);
data->f1x6c = read_config32_dct(dev_fn1, node, channel, 0x6c);
data->f1x70 = read_config32_dct(dev_fn1, node, channel, 0x70);
data->f1x74 = read_config32_dct(dev_fn1, node, channel, 0x74);
data->f1x78 = read_config32_dct(dev_fn1, node, channel, 0x78);
data->f1x7c = read_config32_dct(dev_fn1, node, channel, 0x7c);
data->f1xf0 = pci_read_config32(dev_fn1, 0xf0);
data->f1x120 = pci_read_config32(dev_fn1, 0x120);
data->f1x124 = pci_read_config32(dev_fn1, 0x124);
@@ -130,75 +213,144 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
data->msrc001001f = rdmsr_uint64_t(0xc001001f);
/* Stage 3 */
data->f2x40 = pci_read_config32(dev_fn2, 0x40 + (0x100 * channel));
data->f2x44 = pci_read_config32(dev_fn2, 0x44 + (0x100 * channel));
data->f2x48 = pci_read_config32(dev_fn2, 0x48 + (0x100 * channel));
data->f2x4c = pci_read_config32(dev_fn2, 0x4c + (0x100 * channel));
data->f2x50 = pci_read_config32(dev_fn2, 0x50 + (0x100 * channel));
data->f2x54 = pci_read_config32(dev_fn2, 0x54 + (0x100 * channel));
data->f2x58 = pci_read_config32(dev_fn2, 0x58 + (0x100 * channel));
data->f2x5c = pci_read_config32(dev_fn2, 0x5c + (0x100 * channel));
data->f2x60 = pci_read_config32(dev_fn2, 0x60 + (0x100 * channel));
data->f2x64 = pci_read_config32(dev_fn2, 0x64 + (0x100 * channel));
data->f2x68 = pci_read_config32(dev_fn2, 0x68 + (0x100 * channel));
data->f2x6c = pci_read_config32(dev_fn2, 0x6c + (0x100 * channel));
data->f2x78 = pci_read_config32(dev_fn2, 0x78 + (0x100 * channel));
data->f2x7c = pci_read_config32(dev_fn2, 0x7c + (0x100 * channel));
data->f2x80 = pci_read_config32(dev_fn2, 0x80 + (0x100 * channel));
data->f2x84 = pci_read_config32(dev_fn2, 0x84 + (0x100 * channel));
data->f2x88 = pci_read_config32(dev_fn2, 0x88 + (0x100 * channel));
data->f2x8c = pci_read_config32(dev_fn2, 0x8c + (0x100 * channel));
data->f2x90 = pci_read_config32(dev_fn2, 0x90 + (0x100 * channel));
data->f2xa4 = pci_read_config32(dev_fn2, 0xa4 + (0x100 * channel));
data->f2xa8 = pci_read_config32(dev_fn2, 0xa8 + (0x100 * channel));
data->f2x40 = read_config32_dct(dev_fn2, node, channel, 0x40);
data->f2x44 = read_config32_dct(dev_fn2, node, channel, 0x44);
data->f2x48 = read_config32_dct(dev_fn2, node, channel, 0x48);
data->f2x4c = read_config32_dct(dev_fn2, node, channel, 0x4c);
data->f2x50 = read_config32_dct(dev_fn2, node, channel, 0x50);
data->f2x54 = read_config32_dct(dev_fn2, node, channel, 0x54);
data->f2x58 = read_config32_dct(dev_fn2, node, channel, 0x58);
data->f2x5c = read_config32_dct(dev_fn2, node, channel, 0x5c);
data->f2x60 = read_config32_dct(dev_fn2, node, channel, 0x60);
data->f2x64 = read_config32_dct(dev_fn2, node, channel, 0x64);
data->f2x68 = read_config32_dct(dev_fn2, node, channel, 0x68);
data->f2x6c = read_config32_dct(dev_fn2, node, channel, 0x6c);
data->f2x78 = read_config32_dct(dev_fn2, node, channel, 0x78);
data->f2x7c = read_config32_dct(dev_fn2, node, channel, 0x7c);
data->f2x80 = read_config32_dct(dev_fn2, node, channel, 0x80);
data->f2x84 = read_config32_dct(dev_fn2, node, channel, 0x84);
data->f2x88 = read_config32_dct(dev_fn2, node, channel, 0x88);
data->f2x8c = read_config32_dct(dev_fn2, node, channel, 0x8c);
data->f2x90 = read_config32_dct(dev_fn2, node, channel, 0x90);
data->f2xa4 = read_config32_dct(dev_fn2, node, channel, 0xa4);
data->f2xa8 = read_config32_dct(dev_fn2, node, channel, 0xa8);
/* Family 15h-specific configuration */
if (is_fam15h()) {
data->f2x200 = read_config32_dct(dev_fn2, node, channel, 0x200);
data->f2x204 = read_config32_dct(dev_fn2, node, channel, 0x204);
data->f2x208 = read_config32_dct(dev_fn2, node, channel, 0x208);
data->f2x20c = read_config32_dct(dev_fn2, node, channel, 0x20c);
for (i=0; i<4; i++)
data->f2x210[i] = read_config32_dct_nbpstate(dev_fn2, node, channel, i, 0x210);
data->f2x214 = read_config32_dct(dev_fn2, node, channel, 0x214);
data->f2x218 = read_config32_dct(dev_fn2, node, channel, 0x218);
data->f2x21c = read_config32_dct(dev_fn2, node, channel, 0x21c);
data->f2x22c = read_config32_dct(dev_fn2, node, channel, 0x22c);
data->f2x230 = read_config32_dct(dev_fn2, node, channel, 0x230);
data->f2x234 = read_config32_dct(dev_fn2, node, channel, 0x234);
data->f2x238 = read_config32_dct(dev_fn2, node, channel, 0x238);
data->f2x23c = read_config32_dct(dev_fn2, node, channel, 0x23c);
data->f2x240 = read_config32_dct(dev_fn2, node, channel, 0x240);
data->f2x9cx0d0fe003 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe003);
data->f2x9cx0d0fe013 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe013);
for (i=0; i<9; i++)
data->f2x9cx0d0f0_8_0_1f[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f001f | (i << 8));
data->f2x9cx0d0f201f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f201f);
data->f2x9cx0d0f211f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f211f);
data->f2x9cx0d0f221f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f221f);
data->f2x9cx0d0f801f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f801f);
data->f2x9cx0d0f811f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f811f);
data->f2x9cx0d0f821f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f821f);
data->f2x9cx0d0fc01f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc01f);
data->f2x9cx0d0fc11f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc11f);
data->f2x9cx0d0fc21f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc21f);
data->f2x9cx0d0f4009 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f4009);
for (i=0; i<9; i++)
data->f2x9cx0d0f0_8_0_02[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0002 | (i << 8));
for (i=0; i<9; i++)
data->f2x9cx0d0f0_8_0_06[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0006 | (i << 8));
for (i=0; i<9; i++)
data->f2x9cx0d0f0_8_0_0a[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f000a | (i << 8));
data->f2x9cx0d0f2002 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2002);
data->f2x9cx0d0f2102 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2102);
data->f2x9cx0d0f2202 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2202);
data->f2x9cx0d0f8002 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8002);
data->f2x9cx0d0f8006 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8006);
data->f2x9cx0d0f800a = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f800a);
data->f2x9cx0d0f8102 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8102);
data->f2x9cx0d0f8106 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8106);
data->f2x9cx0d0f810a = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f810a);
data->f2x9cx0d0fc002 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc002);
data->f2x9cx0d0fc006 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc006);
data->f2x9cx0d0fc00a = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc00a);
data->f2x9cx0d0fc00e = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc00e);
data->f2x9cx0d0fc012 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc012);
data->f2x9cx0d0f2031 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2031);
data->f2x9cx0d0f2131 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2131);
data->f2x9cx0d0f2231 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2231);
data->f2x9cx0d0f8031 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8031);
data->f2x9cx0d0f8131 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8131);
data->f2x9cx0d0f8231 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8231);
data->f2x9cx0d0fc031 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc031);
data->f2x9cx0d0fc131 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc131);
data->f2x9cx0d0fc231 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fc231);
for (i=0; i<9; i++)
data->f2x9cx0d0f0_0_f_31[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0031 | (i << 8));
data->f2x9cx0d0f8021 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f8021);
}
/* Stage 4 */
data->f2x94 = pci_read_config32(dev_fn2, 0x94 + (0x100 * channel));
data->f2x94 = read_config32_dct(dev_fn2, node, channel, 0x94);
/* Stage 6 */
for (i=0; i<9; i++)
for (j=0; j<3; j++)
data->f2x9cx0d0f0_f_8_0_0_8_4_0[i][j] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0d0f0000 | (i << 8) | (j * 4));
data->f2x9cx00 = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x00);
data->f2x9cx0a = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0a);
data->f2x9cx0c = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0c);
data->f2x9cx0d0f0_f_8_0_0_8_4_0[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4));
data->f2x9cx00 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x00);
data->f2x9cx0a = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0a);
data->f2x9cx0c = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0c);
/* Stage 7 */
data->f2x9cx04 = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x04);
data->f2x9cx04 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x04);
/* Stage 9 */
data->f2x9cx0d0fe006 = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0d0fe006);
data->f2x9cx0d0fe007 = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0d0fe007);
data->f2x9cx0d0fe006 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe006);
data->f2x9cx0d0fe007 = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0fe007);
/* Stage 10 */
for (i=0; i<12; i++)
data->f2x9cx10[i] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x10 + i);
data->f2x9cx10[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x10 + i);
for (i=0; i<12; i++)
data->f2x9cx20[i] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x20 + i);
data->f2x9cx20[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x20 + i);
for (i=0; i<4; i++)
for (j=0; j<3; j++)
data->f2x9cx3_0_0_3_1[i][j] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), (0x01 + i) + (0x100 * j));
data->f2x9cx3_0_0_3_1[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, (0x01 + i) + (0x100 * j));
for (i=0; i<4; i++)
for (j=0; j<3; j++)
data->f2x9cx3_0_0_7_5[i][j] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), (0x05 + i) + (0x100 * j));
data->f2x9cx0d = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0d);
data->f2x9cx3_0_0_7_5[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, (0x05 + i) + (0x100 * j));
data->f2x9cx0d = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d);
for (i=0; i<9; i++)
data->f2x9cx0d0f0_f_0_13[i] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0d0f0013 | (i << 8));
data->f2x9cx0d0f0_f_0_13[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0013 | (i << 8));
for (i=0; i<9; i++)
data->f2x9cx0d0f0_f_0_30[i] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0d0f0030 | (i << 8));
data->f2x9cx0d0f0_f_0_30[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0030 | (i << 8));
for (i=0; i<4; i++)
data->f2x9cx0d0f2_f_0_30[i] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0d0f2030 | (i << 8));
data->f2x9cx0d0f2_f_0_30[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f2030 | (i << 8));
for (i=0; i<2; i++)
for (j=0; j<3; j++)
data->f2x9cx0d0f8_8_4_0[i][j] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0d0f0000 | (i << 8) | (j * 4));
data->f2x9cx0d0f812f = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x0d0f812f);
data->f2x9cx0d0f8_8_4_0[i][j] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4));
data->f2x9cx0d0f812f = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x0d0f812f);
/* Stage 11 */
if (IS_ENABLED(CONFIG_DIMM_DDR3)) {
for (i=0; i<12; i++)
data->f2x9cx30[i] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x30 + i);
data->f2x9cx30[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x30 + i);
for (i=0; i<12; i++)
data->f2x9cx40[i] = read_amd_dct_index_register(dev_fn2, 0x98 + (0x100 * channel), 0x40 + i);
data->f2x9cx40[i] = read_amd_dct_index_register_dct(dev_fn2, node, channel, 0x98, 0x40 + i);
}
/* Other */
@@ -208,6 +360,43 @@ void copy_mct_data_to_save_variable(struct amd_s3_persistent_data* persistent_da
}
}
#else
static void write_config32_dct(device_t dev, uint8_t node, uint8_t dct, uint32_t reg, uint32_t value) {
if (is_fam15h()) {
uint32_t dword;
device_t dev_fn1 = PCI_DEV(0, 0x18 + node, 1);
/* Select DCT */
dword = pci_read_config32(dev_fn1, 0x10c);
dword &= ~0x1;
dword |= (dct & 0x1);
pci_write_config32(dev_fn1, 0x10c, dword);
} else {
/* Apply offset */
reg += dct * 0x100;
}
pci_write_config32(dev, reg, value);
}
static void write_config32_dct_nbpstate(device_t dev, uint8_t node, uint8_t dct, uint8_t nb_pstate, uint32_t reg, uint32_t value) {
uint32_t dword;
device_t dev_fn1 = PCI_DEV(0, 0x18 + node, 1);
/* Select DCT */
dword = pci_read_config32(dev_fn1, 0x10c);
dword &= ~0x1;
dword |= (dct & 0x1);
pci_write_config32(dev_fn1, 0x10c, dword);
/* Select NB Pstate index */
dword = pci_read_config32(dev_fn1, 0x10c);
dword &= ~(0x3 << 4);
dword |= (nb_pstate & 0x3) << 4;
pci_write_config32(dev_fn1, 0x10c, dword);
pci_write_config32(dev, reg, value);
}
static void write_amd_dct_index_register(device_t dev, uint32_t index_ctl_reg, uint32_t index, uint32_t value)
{
uint32_t dword;
@@ -219,6 +408,25 @@ static void write_amd_dct_index_register(device_t dev, uint32_t index_ctl_reg, u
dword = pci_read_config32(dev, index_ctl_reg);
} while (!(dword & (1 << 31)));
}
static void write_amd_dct_index_register_dct(device_t dev, uint8_t node, uint8_t dct, uint32_t index_ctl_reg, uint32_t index, uint32_t value)
{
if (is_fam15h()) {
uint32_t dword;
device_t dev_fn1 = PCI_DEV(0, 0x18 + node, 1);
/* Select DCT */
dword = pci_read_config32(dev_fn1, 0x10c);
dword &= ~0x1;
dword |= (dct & 0x1);
pci_write_config32(dev_fn1, 0x10c, dword);
} else {
/* Apply offset */
index_ctl_reg += dct * 0x100;
}
return write_amd_dct_index_register(dev, index_ctl_reg, index, value);
}
#endif
#ifdef __PRE_RAM__
@@ -258,31 +466,31 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
if (!persistent_data->node[node].node_present)
continue;
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x40 + (0x100 * channel), data->f1x40);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x44 + (0x100 * channel), data->f1x44);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x48 + (0x100 * channel), data->f1x48);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x4c + (0x100 * channel), data->f1x4c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x50 + (0x100 * channel), data->f1x50);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x54 + (0x100 * channel), data->f1x54);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x58 + (0x100 * channel), data->f1x58);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x5c + (0x100 * channel), data->f1x5c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x60 + (0x100 * channel), data->f1x60);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x64 + (0x100 * channel), data->f1x64);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x68 + (0x100 * channel), data->f1x68);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x6c + (0x100 * channel), data->f1x6c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x70 + (0x100 * channel), data->f1x70);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x74 + (0x100 * channel), data->f1x74);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x78 + (0x100 * channel), data->f1x78);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x7c + (0x100 * channel), data->f1x7c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0xf0 + (0x100 * channel), data->f1xf0);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x120 + (0x100 * channel), data->f1x120);
pci_write_config32(PCI_DEV(0, 0x18 + node, 1), 0x124 + (0x100 * channel), data->f1x124);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x10c + (0x100 * channel), data->f2x10c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x114 + (0x100 * channel), data->f2x114);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x118 + (0x100 * channel), data->f2x118);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x11c + (0x100 * channel), data->f2x11c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x1b0 + (0x100 * channel), data->f2x1b0);
pci_write_config32(PCI_DEV(0, 0x18 + node, 3), 0x44 + (0x100 * channel), data->f3x44);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x40, data->f1x40);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x44, data->f1x44);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x48, data->f1x48);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x4c, data->f1x4c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x50, data->f1x50);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x54, data->f1x54);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x58, data->f1x58);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x5c, data->f1x5c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x60, data->f1x60);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x64, data->f1x64);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x68, data->f1x68);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x6c, data->f1x6c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x70, data->f1x70);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x74, data->f1x74);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x78, data->f1x78);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x7c, data->f1x7c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0xf0, data->f1xf0);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x120, data->f1x120);
write_config32_dct(PCI_DEV(0, 0x18 + node, 1), node, channel, 0x124, data->f1x124);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x10c, data->f2x10c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x114, data->f2x114);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x118, data->f2x118);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x11c, data->f2x11c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x1b0, data->f2x1b0);
write_config32_dct(PCI_DEV(0, 0x18 + node, 3), node, channel, 0x44, data->f3x44);
for (i=0; i<16; i++) {
wrmsr_uint64_t(0x00000200 | i, data->msr0000020[i]);
}
@@ -309,31 +517,97 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
if (!persistent_data->node[node].node_present)
continue;
if (is_fam15h())
ganged = 0;
else
ganged = !!(data->f2x110 & 0x10);
if ((ganged == 1) && (channel > 0))
continue;
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x40 + (0x100 * channel), data->f2x40);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x44 + (0x100 * channel), data->f2x44);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x48 + (0x100 * channel), data->f2x48);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x4c + (0x100 * channel), data->f2x4c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x50 + (0x100 * channel), data->f2x50);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x54 + (0x100 * channel), data->f2x54);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x58 + (0x100 * channel), data->f2x58);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x5c + (0x100 * channel), data->f2x5c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x60 + (0x100 * channel), data->f2x60);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x64 + (0x100 * channel), data->f2x64);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x68 + (0x100 * channel), data->f2x68);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x6c + (0x100 * channel), data->f2x6c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x78 + (0x100 * channel), data->f2x78);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x7c + (0x100 * channel), data->f2x7c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x80 + (0x100 * channel), data->f2x80);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x84 + (0x100 * channel), data->f2x84);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x88 + (0x100 * channel), data->f2x88);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x8c + (0x100 * channel), data->f2x8c);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x90 + (0x100 * channel), data->f2x90);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0xa4 + (0x100 * channel), data->f2xa4);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0xa8 + (0x100 * channel), data->f2xa8);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x40, data->f2x40);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x44, data->f2x44);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x48, data->f2x48);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x4c, data->f2x4c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x50, data->f2x50);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x54, data->f2x54);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x58, data->f2x58);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x5c, data->f2x5c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x60, data->f2x60);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x64, data->f2x64);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x68, data->f2x68);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x6c, data->f2x6c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x78, data->f2x78);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x7c, data->f2x7c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x80, data->f2x80);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x84, data->f2x84);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x88, data->f2x88);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x8c, data->f2x8c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x90, data->f2x90);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0xa4, data->f2xa4);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0xa8, data->f2xa8);
}
}
/* Family 15h-specific configuration */
if (is_fam15h()) {
for (node = 0; node < MAX_NODES_SUPPORTED; node++) {
for (channel = 0; channel < 2; channel++) {
struct amd_s3_persistent_mct_channel_data* data = &persistent_data->node[node].channel[channel];
if (!persistent_data->node[node].node_present)
continue;
/* Initialize DCT */
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0000000b, 0x80000000);
dword = read_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe013);
dword &= ~0xffff;
dword |= 0x118;
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe013, dword);
/* Restore values */
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x200, data->f2x200);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x204, data->f2x204);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x208, data->f2x208);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x20c, data->f2x20c);
for (i=0; i<4; i++)
write_config32_dct_nbpstate(PCI_DEV(0, 0x18 + node, 2), node, channel, i, 0x210, data->f2x210[i]);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x214, data->f2x214);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x218, data->f2x218);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x21c, data->f2x21c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x22c, data->f2x22c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x230, data->f2x230);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x234, data->f2x234);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x238, data->f2x238);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x23c, data->f2x23c);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x240, data->f2x240);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe013, data->f2x9cx0d0fe013);
for (i=0; i<9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f001f | (i << 8), data->f2x9cx0d0f0_8_0_1f[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f201f, data->f2x9cx0d0f201f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f211f, data->f2x9cx0d0f211f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f221f, data->f2x9cx0d0f221f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f801f, data->f2x9cx0d0f801f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f811f, data->f2x9cx0d0f811f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f821f, data->f2x9cx0d0f821f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc01f, data->f2x9cx0d0fc01f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc11f, data->f2x9cx0d0fc11f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc21f, data->f2x9cx0d0fc21f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f4009, data->f2x9cx0d0f4009);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f2031, data->f2x9cx0d0f2031);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f2131, data->f2x9cx0d0f2131);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f2231, data->f2x9cx0d0f2231);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8031, data->f2x9cx0d0f8031);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8131, data->f2x9cx0d0f8131);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8231, data->f2x9cx0d0f8231);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc031, data->f2x9cx0d0fc031);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc131, data->f2x9cx0d0fc131);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc231, data->f2x9cx0d0fc231);
for (i=0; i<9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0031 | (i << 8), data->f2x9cx0d0f0_0_f_31[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8021, data->f2x9cx0d0f8021);
}
}
}
@@ -344,33 +618,44 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
if (!persistent_data->node[node].node_present)
continue;
if (is_fam15h())
ganged = 0;
else
ganged = !!(data->f2x110 & 0x10);
if ((ganged == 1) && (channel > 0))
continue;
if (is_fam15h()) {
/* Program PllLockTime = 0x190 */
dword = read_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe006);
dword &= ~0xffff;
dword |= 0x190;
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe006, dword);
/* Program MemClkFreqVal = 0 */
dword = read_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x94);
dword &= (0x1 << 7);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x94, dword);
/* Restore DRAM Adddress/Timing Control Register */
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x04, data->f2x9cx04);
} else {
/* Disable PHY auto-compensation engine */
dword = read_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x08);
dword = read_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x08);
if (!(dword & (1 << 30))) {
dword |= (1 << 30);
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x08, dword);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x08, dword);
/* Wait for 5us */
mct_Wait(100);
}
}
/* Restore DRAM Configuration High Register */
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x94 + (0x100 * channel), data->f2x94);
/* Enable PHY auto-compensation engine */
dword = read_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x08);
dword &= ~(1 << 30);
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x08, dword);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x94, data->f2x94);
}
}
/* Wait for 750us */
mct_Wait(15000);
/* Stage 5 */
for (node = 0; node < MAX_NODES_SUPPORTED; node++) {
for (channel = 0; channel < 2; channel++) {
@@ -378,16 +663,39 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
if (!persistent_data->node[node].node_present)
continue;
if (is_fam15h())
ganged = 0;
else
ganged = !!(data->f2x110 & 0x10);
if ((ganged == 1) && (channel > 0))
continue;
dct_enabled = !(data->f2x94 & (1 << 14));
if (!dct_enabled)
continue;
/* Wait for any pending PHY frequency changes to complete */
do {
dword = read_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x08);
dword = read_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x94);
} while (dword & (1 << 21));
if (is_fam15h()) {
/* Program PllLockTime = 0xf */
dword = read_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe006);
dword &= ~0xffff;
dword |= 0xf;
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe006, dword);
} else {
/* Enable PHY auto-compensation engine */
dword = read_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x08);
dword &= ~(1 << 30);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x08, dword);
}
}
}
/* Wait for 750us */
mct_Wait(15000);
/* Stage 6 */
for (node = 0; node < MAX_NODES_SUPPORTED; node++) {
@@ -398,10 +706,49 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
for (i=0; i<9; i++)
for (j=0; j<3; j++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0d0f0000 | (i << 8) | (j * 4), data->f2x9cx0d0f0_f_8_0_0_8_4_0[i][j]);
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x00, data->f2x9cx00);
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0a, data->f2x9cx0a);
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0c, data->f2x9cx0c);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4), data->f2x9cx0d0f0_f_8_0_0_8_4_0[i][j]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x00, data->f2x9cx00);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0a, data->f2x9cx0a);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0c, data->f2x9cx0c);
}
}
/* Family 15h-specific configuration */
if (is_fam15h()) {
for (node = 0; node < MAX_NODES_SUPPORTED; node++) {
for (channel = 0; channel < 2; channel++) {
struct amd_s3_persistent_mct_channel_data* data = &persistent_data->node[node].channel[channel];
if (!persistent_data->node[node].node_present)
continue;
dword = read_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe003);
dword |= (0x3 << 13); /* DisAutoComp, DisablePredriverCal = 1 */
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe003, dword);
for (i=0; i<9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0006 | (i << 8), data->f2x9cx0d0f0_8_0_06[i]);
for (i=0; i<9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f000a | (i << 8), data->f2x9cx0d0f0_8_0_0a[i]);
for (i=0; i<9; i++)
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0002 | (i << 8), (0x8000 | data->f2x9cx0d0f0_8_0_02[i]));
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8006, data->f2x9cx0d0f8006);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f800a, data->f2x9cx0d0f800a);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8106, data->f2x9cx0d0f8106);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f810a, data->f2x9cx0d0f810a);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc006, data->f2x9cx0d0fc006);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc00a, data->f2x9cx0d0fc00a);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc00e, data->f2x9cx0d0fc00e);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc012, data->f2x9cx0d0fc012);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8002, (0x8000 | data->f2x9cx0d0f8002));
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f8102, (0x8000 | data->f2x9cx0d0f8102));
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fc002, (0x8000 | data->f2x9cx0d0fc002));
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f2002, (0x8000 | data->f2x9cx0d0f2002));
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f2102, (0x8000 | data->f2x9cx0d0f2102));
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f2202, (0x8000 | data->f2x9cx0d0f2202));
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe003, data->f2x9cx0d0fe003);
}
}
}
@@ -412,11 +759,15 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
if (!persistent_data->node[node].node_present)
continue;
if (is_fam15h())
ganged = 0;
else
ganged = !!(data->f2x110 & 0x10);
if ((ganged == 1) && (channel > 0))
continue;
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x04, data->f2x9cx04);
if (!is_fam15h())
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x04, data->f2x9cx04);
}
}
@@ -431,6 +782,9 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
if (!dct_enabled)
continue;
if (is_fam15h())
ganged = 0;
else
ganged = !!(data->f2x110 & 0x10);
if ((ganged == 1) && (channel > 0))
continue;
@@ -438,9 +792,9 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
printk(BIOS_SPEW, "Taking DIMMs out of self refresh node: %d channel: %d\n", node, channel);
/* Exit self refresh mode */
dword = pci_read_config32(PCI_DEV(0, 0x18 + node, 2), 0x90 + (0x100 * channel));
dword = read_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x90);
dword |= (1 << 1);
pci_write_config32(PCI_DEV(0, 0x18 + node, 2), 0x90 + (0x100 * channel), dword);
write_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x90, dword);
}
}
@@ -459,12 +813,12 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
/* Wait for transition from self refresh mode to complete */
do {
dword = pci_read_config32(PCI_DEV(0, 0x18 + node, 2), 0x90 + (0x100 * channel));
dword = read_config32_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x90);
} while (dword & (1 << 1));
/* Restore registers */
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0d0fe006, data->f2x9cx0d0fe006);
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0d0fe007, data->f2x9cx0d0fe007);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe006, data->f2x9cx0d0fe006);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0fe007, data->f2x9cx0d0fe007);
}
}
@@ -476,26 +830,26 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
continue;
for (i=0; i<12; i++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x10 + i, data->f2x9cx10[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x10 + i, data->f2x9cx10[i]);
for (i=0; i<12; i++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x20 + i, data->f2x9cx20[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x20 + i, data->f2x9cx20[i]);
for (i=0; i<4; i++)
for (j=0; j<3; j++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), (0x01 + i) + (0x100 * j), data->f2x9cx3_0_0_3_1[i][j]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x01 + i) + (0x100 * j), data->f2x9cx3_0_0_3_1[i][j]);
for (i=0; i<4; i++)
for (j=0; j<3; j++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), (0x05 + i) + (0x100 * j), data->f2x9cx3_0_0_7_5[i][j]);
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0d, data->f2x9cx0d);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, (0x05 + i) + (0x100 * j), data->f2x9cx3_0_0_7_5[i][j]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d, data->f2x9cx0d);
for (i=0; i<9; i++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0d0f0013 | (i << 8), data->f2x9cx0d0f0_f_0_13[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0013 | (i << 8), data->f2x9cx0d0f0_f_0_13[i]);
for (i=0; i<9; i++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0d0f0030 | (i << 8), data->f2x9cx0d0f0_f_0_30[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0030 | (i << 8), data->f2x9cx0d0f0_f_0_30[i]);
for (i=0; i<4; i++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0d0f2030 | (i << 8), data->f2x9cx0d0f2_f_0_30[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f2030 | (i << 8), data->f2x9cx0d0f2_f_0_30[i]);
for (i=0; i<2; i++)
for (j=0; j<3; j++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0d0f0000 | (i << 8) | (j * 4), data->f2x9cx0d0f8_8_4_0[i][j]);
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x0d0f812f, data->f2x9cx0d0f812f);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f0000 | (i << 8) | (j * 4), data->f2x9cx0d0f8_8_4_0[i][j]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x0d0f812f, data->f2x9cx0d0f812f);
}
}
@@ -508,9 +862,9 @@ void restore_mct_data_from_save_variable(struct amd_s3_persistent_data* persiste
continue;
for (i=0; i<12; i++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x30 + i, data->f2x9cx30[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x30 + i, data->f2x9cx30[i]);
for (i=0; i<12; i++)
write_amd_dct_index_register(PCI_DEV(0, 0x18 + node, 2), 0x98 + (0x100 * channel), 0x40 + i, data->f2x9cx40[i]);
write_amd_dct_index_register_dct(PCI_DEV(0, 0x18 + node, 2), node, channel, 0x98, 0x40 + i, data->f2x9cx40[i]);
}
}
}

View File

@@ -2,6 +2,7 @@
* This file is part of the coreboot project.
*
* Copyright (C) 2007 Advanced Micro Devices, Inc.
* Copyright (C) 2015 Timothy Pearson <tpearson@raptorengineeringinc.com>, Raptor Engineering
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -59,9 +60,14 @@ UPDATE AS NEEDED
#ifndef MEM_MAX_LOAD_FREQ
#if (CONFIG_DIMM_SUPPORT & 0x000F)==0x0005 /* AMD_FAM10_DDR3 */
#define MEM_MAX_LOAD_FREQ 800
#else
#define MEM_MAX_LOAD_FREQ 933
#define MEM_MIN_PLATFORM_FREQ_FAM10 400
#define MEM_MIN_PLATFORM_FREQ_FAM15 333
#else /* AMD_FAM10_DDR2 */
#define MEM_MAX_LOAD_FREQ 400
#define MEM_MIN_PLATFORM_FREQ_FAM10 200
/* DDR2 not available on Family 15h */
#define MEM_MIN_PLATFORM_FREQ_FAM15 0
#endif
#endif

View File

@@ -40,7 +40,7 @@
#define MINIMUM_DRAM_BELOW_4G 0x1000000
static const uint16_t ddr2_limits[4] = {400, 333, 266, 200};
static const uint16_t ddr3_limits[4] = {800, 666, 533, 400};
static const uint16_t ddr3_limits[16] = {933, 800, 666, 533, 400, 333, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
static u16 mctGet_NVbits(u8 index)
{
@@ -77,12 +77,19 @@ static u16 mctGet_NVbits(u8 index)
if (get_option(&nvram, "max_mem_clock") == CB_SUCCESS) {
int limit = val;
if (IS_ENABLED(CONFIG_DIMM_DDR3))
limit = ddr3_limits[nvram & 3];
limit = ddr3_limits[nvram & 0xf];
else if (IS_ENABLED(CONFIG_DIMM_DDR2))
limit = ddr2_limits[nvram & 3];
limit = ddr2_limits[nvram & 0x3];
val = min(limit, val);
}
break;
case NV_MIN_MEMCLK:
/* Minimum platform supported memclk */
if (is_fam15h())
val = MEM_MIN_PLATFORM_FREQ_FAM15;
else
val = MEM_MIN_PLATFORM_FREQ_FAM10;
break;
case NV_ECC_CAP:
#if SYSTEM_TYPE == SERVER
val = 1; /* memory bus ECC capable */
@@ -250,6 +257,9 @@ static u16 mctGet_NVbits(u8 index)
case NV_L2BKScrub:
val = 0; /* Disabled - See L2Scrub in BKDG */
break;
case NV_L3BKScrub:
val = 0; /* Disabled - See L3Scrub in BKDG */
break;
case NV_DCBKScrub:
val = 0; /* Disabled - See DcacheScrub in BKDG */
break;
@@ -299,6 +309,9 @@ static void mctGet_MaxLoadFreq(struct DCTStatStruc *pDCTstat)
int ch2_count = 0;
uint8_t ch1_registered = 0;
uint8_t ch2_registered = 0;
uint8_t ch1_voltage = 0;
uint8_t ch2_voltage = 0;
uint8_t highest_rank_count[2];
int i;
for (i = 0; i < 15; i = i + 2) {
if (pDCTstat->DIMMValid & (1 << i))
@@ -317,8 +330,28 @@ static void mctGet_MaxLoadFreq(struct DCTStatStruc *pDCTstat)
printk(BIOS_DEBUG, "mctGet_MaxLoadFreq: Channel 2: %d DIMM(s) detected\n", ch2_count);
}
#if (CONFIG_DIMM_SUPPORT & 0x000F)==0x0005 /* AMD_FAM10_DDR3 */
uint8_t dimm;
for (i = 0; i < 15; i = i + 2) {
if (pDCTstat->DIMMValid & (1 << i))
ch1_voltage |= pDCTstat->DimmConfiguredVoltage[i];
if (pDCTstat->DIMMValid & (1 << (i + 1)))
ch2_voltage |= pDCTstat->DimmConfiguredVoltage[i + 1];
}
for (i = 0; i < 2; i++) {
sDCTStruct *pDCTData = pDCTstat->C_DCTPtr[i];
highest_rank_count[i] = 0x0;
for (dimm = 0; dimm < 8; dimm++) {
if (pDCTData->DimmRanks[dimm] > highest_rank_count[i])
highest_rank_count[i] = pDCTData->DimmRanks[dimm];
}
}
#endif
/* Set limits if needed */
pDCTstat->PresetmaxFreq = mct_MaxLoadFreq(max(ch1_count, ch2_count), (ch1_registered || ch2_registered), pDCTstat->PresetmaxFreq);
pDCTstat->PresetmaxFreq = mct_MaxLoadFreq(max(ch1_count, ch2_count), max(highest_rank_count[0], highest_rank_count[1]), (ch1_registered || ch2_registered), (ch1_voltage | ch2_voltage), pDCTstat->PresetmaxFreq);
}
#ifdef UNUSED_CODE
@@ -482,7 +515,7 @@ static void mctHookAfterAnyTraining(void)
{
}
static u32 mctGetLogicalCPUID_D(u8 node)
static uint64_t mctGetLogicalCPUID_D(u8 node)
{
return mctGetLogicalCPUID(node);
}