nb/intel/sandybridge: Tidy up code and comments

- Reformat some lines of code
- Move MCHBAR registers and documentation into a separate file
- Add a few missing macros
- Rename some registers
- Rewrite several comments
- Use C-style comments for consistency
- Rewrite some hex constants
- Use HOST_BRIDGE instead of PCI_DEV(0, 0, 0)

With BUILD_TIMELESS=1, this commit does not change the result of:
- Asus P8Z77-V LX2 with native raminit.
- Asus P8Z77-M PRO with MRC raminit.

Change-Id: I6e113e48afd685ca63cfcb11ff9fcf9df6e41e46
Signed-off-by: Angel Pons <th3fanbus@gmail.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/39599
Reviewed-by: Felix Held <felix-coreboot@felixheld.de>
Reviewed-by: Patrick Rudolph <patrick.rudolph@9elements.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
Angel Pons
2020-03-16 23:17:32 +01:00
committed by Matt DeVillier
parent 1cd7d3e664
commit 7c49cb8f9c
22 changed files with 2029 additions and 1998 deletions

View File

@ -35,36 +35,37 @@ unsigned long acpi_fill_mcfg(unsigned long current)
pciexbar_reg = pci_read_config32(dev, PCIEXBAR);
// MMCFG not supported or not enabled.
/* MMCFG not supported or not enabled */
if (!(pciexbar_reg & (1 << 0)))
return current;
switch ((pciexbar_reg >> 1) & 3) {
case 0: // 256MB
pciexbar = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28));
case 0: /* 256MB */
pciexbar = pciexbar_reg & (0xffffffffULL << 28);
max_buses = 256;
break;
case 1: // 128M
pciexbar = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
case 1: /* 128M */
pciexbar = pciexbar_reg & (0xffffffffULL << 27);
max_buses = 128;
break;
case 2: // 64M
pciexbar = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)|(1 << 26));
case 2: /* 64M */
pciexbar = pciexbar_reg & (0xffffffffULL << 26);
max_buses = 64;
break;
default: // RSVD
default: /* RSVD */
return current;
}
if (!pciexbar)
return current;
current += acpi_create_mcfg_mmconfig((acpi_mcfg_mmconfig_t *) current,
pciexbar, 0x0, 0x0, max_buses - 1);
current += acpi_create_mcfg_mmconfig((acpi_mcfg_mmconfig_t *) current, pciexbar, 0, 0,
max_buses - 1);
return current;
}
static unsigned long acpi_create_igfx_rmrr(const unsigned long current)
{
const u32 base_mask = ~(u32)(MiB - 1);
@ -89,7 +90,7 @@ static unsigned long acpi_fill_dmar(unsigned long current)
unsigned long tmp;
tmp = current;
current += acpi_create_dmar_drhd(current, 0, 0, IOMMU_BASE1);
current += acpi_create_dmar_drhd(current, 0, 0, GFXVT_BASE);
current += acpi_create_dmar_ds_pci(current, 0, 2, 0);
current += acpi_create_dmar_ds_pci(current, 0, 2, 1);
acpi_dmar_drhd_fixup(tmp, current);
@ -104,34 +105,37 @@ static unsigned long acpi_fill_dmar(unsigned long current)
}
const unsigned long tmp = current;
current += acpi_create_dmar_drhd(current,
DRHD_INCLUDE_PCI_ALL, 0, IOMMU_BASE2);
current += acpi_create_dmar_ds_ioapic(current,
2, PCH_IOAPIC_PCI_BUS, PCH_IOAPIC_PCI_SLOT, 0);
current += acpi_create_dmar_drhd(current, DRHD_INCLUDE_PCI_ALL, 0, VTVC0_BASE);
current += acpi_create_dmar_ds_ioapic(current, 2, PCH_IOAPIC_PCI_BUS,
PCH_IOAPIC_PCI_SLOT, 0);
size_t i;
for (i = 0; i < 8; ++i)
current += acpi_create_dmar_ds_msi_hpet(current,
0, PCH_HPET_PCI_BUS, PCH_HPET_PCI_SLOT, i);
current += acpi_create_dmar_ds_msi_hpet(current, 0, PCH_HPET_PCI_BUS,
PCH_HPET_PCI_SLOT, i);
acpi_dmar_drhd_fixup(tmp, current);
return current;
}
unsigned long northbridge_write_acpi_tables(struct device *const dev,
unsigned long current,
unsigned long northbridge_write_acpi_tables(struct device *const dev, unsigned long current,
struct acpi_rsdp *const rsdp)
{
const u32 capid0_a = pci_read_config32(dev, 0xe4);
const u32 capid0_a = pci_read_config32(dev, CAPID0_A);
if (capid0_a & (1 << 23))
return current;
printk(BIOS_DEBUG, "ACPI: * DMAR\n");
acpi_dmar_t *const dmar = (acpi_dmar_t *)current;
acpi_create_dmar(dmar, DMAR_INTR_REMAP, acpi_fill_dmar);
current += dmar->header.length;
current = acpi_align_current(current);
acpi_add_table(rsdp, dmar);
acpi_add_table(rsdp, dmar);
current = acpi_align_current(current);
printk(BIOS_DEBUG, "current = %lx\n", current);

View File

@ -20,19 +20,17 @@ void bootblock_early_northbridge_init(void)
uint32_t reg;
/*
* The "io" variant of the config access is explicitly used to
* setup the PCIEXBAR because CONFIG_MMCONF_SUPPORT is set to
* to true. That way all subsequent non-explicit config accesses use
* MCFG. This code also assumes that bootblock_northbridge_init() is
* the first thing called in the non-asm boot block code. The final
* assumption is that no assembly code is using the
* The "io" variant of the config access is explicitly used to setup the
* PCIEXBAR because CONFIG_MMCONF_SUPPORT is set to to true. That way, all
* subsequent non-explicit config accesses use MCFG. This code also assumes
* that bootblock_northbridge_init() is the first thing called in the non-asm
* boot block code. The final assumption is that no assembly code is using the
* CONFIG_MMCONF_SUPPORT option to do PCI config accesses.
*
* The PCIEXBAR is assumed to live in the memory mapped IO space under
* 4GiB.
* The PCIEXBAR is assumed to live in the memory mapped IO space under 4GiB.
*/
reg = 0;
pci_io_write_config32(PCI_DEV(0,0,0), PCIEXBAR + 4, reg);
pci_io_write_config32(HOST_BRIDGE, PCIEXBAR + 4, reg);
reg = CONFIG_MMCONF_BASE_ADDRESS | 4 | 1; /* 64MiB - 0-63 buses. */
pci_io_write_config32(PCI_DEV(0,0,0), PCIEXBAR, reg);
pci_io_write_config32(HOST_BRIDGE, PCIEXBAR, reg);
}

View File

@ -48,7 +48,7 @@ struct northbridge_intel_sandybridge_config {
struct i915_gpu_controller_info gfx;
/*
* Maximum PCI mmio size in MiB.
* Maximum PCI MMIO size in MiB.
*/
u16 pci_mmio_size;
@ -63,7 +63,8 @@ struct northbridge_intel_sandybridge_config {
bool ec_present;
bool ddr3lv_support;
/* N mode functionality. Leave this setting at 0.
/*
* N mode functionality. Leave this setting at 0.
* 0 Auto
* 1 1N
* 2 2N
@ -74,9 +75,10 @@ struct northbridge_intel_sandybridge_config {
DDR_NMODE_2N,
} nmode;
/* DDR refresh rate config. JEDEC Standard No.21-C Annex K allows
* for DIMM SPD data to specify whether double-rate is required for
* extended operating temperature range.
/*
* DDR refresh rate config. JEDEC Standard No.21-C Annex K allows for DIMM SPD data to
* specify whether double-rate is required for extended operating temperature range.
*
* 0 Enable double rate based upon temperature thresholds
* 1 Normal rate
* 2 Always enable double rate

View File

@ -25,49 +25,49 @@
static void systemagent_vtd_init(void)
{
const u32 capid0_a = pci_read_config32(PCI_DEV(0, 0, 0), CAPID0_A);
const u32 capid0_a = pci_read_config32(HOST_BRIDGE, CAPID0_A);
if (capid0_a & (1 << 23))
return;
/* setup BARs */
MCHBAR32(VTD1_BASE + 4) = IOMMU_BASE1 >> 32;
MCHBAR32(VTD1_BASE) = IOMMU_BASE1 | 1;
MCHBAR32(VTD2_BASE + 4) = IOMMU_BASE2 >> 32;
MCHBAR32(VTD2_BASE) = IOMMU_BASE2 | 1;
/* Setup BARs */
MCHBAR32(GFXVTBAR + 4) = GFXVT_BASE >> 32;
MCHBAR32(GFXVTBAR) = GFXVT_BASE | 1;
MCHBAR32(VTVC0BAR + 4) = VTVC0_BASE >> 32;
MCHBAR32(VTVC0BAR) = VTVC0_BASE | 1;
/* lock policies */
write32((void *)(IOMMU_BASE1 + 0xff0), 0x80000000);
/* Lock policies */
write32((void *)(GFXVT_BASE + 0xff0), 0x80000000);
const struct device *const azalia = pcidev_on_root(0x1b, 0);
if (azalia && azalia->enabled) {
write32((void *)(IOMMU_BASE2 + 0xff0), 0x20000000);
write32((void *)(IOMMU_BASE2 + 0xff0), 0xa0000000);
write32((void *)(VTVC0_BASE + 0xff0), 0x20000000);
write32((void *)(VTVC0_BASE + 0xff0), 0xa0000000);
} else {
write32((void *)(IOMMU_BASE2 + 0xff0), 0x80000000);
write32((void *)(VTVC0_BASE + 0xff0), 0x80000000);
}
}
static void enable_pam_region(void)
{
pci_write_config8(PCI_DEV(0, 0x00, 0), PAM0, 0x30);
pci_write_config8(PCI_DEV(0, 0x00, 0), PAM1, 0x33);
pci_write_config8(PCI_DEV(0, 0x00, 0), PAM2, 0x33);
pci_write_config8(PCI_DEV(0, 0x00, 0), PAM3, 0x33);
pci_write_config8(PCI_DEV(0, 0x00, 0), PAM4, 0x33);
pci_write_config8(PCI_DEV(0, 0x00, 0), PAM5, 0x33);
pci_write_config8(PCI_DEV(0, 0x00, 0), PAM6, 0x33);
pci_write_config8(HOST_BRIDGE, PAM0, 0x30);
pci_write_config8(HOST_BRIDGE, PAM1, 0x33);
pci_write_config8(HOST_BRIDGE, PAM2, 0x33);
pci_write_config8(HOST_BRIDGE, PAM3, 0x33);
pci_write_config8(HOST_BRIDGE, PAM4, 0x33);
pci_write_config8(HOST_BRIDGE, PAM5, 0x33);
pci_write_config8(HOST_BRIDGE, PAM6, 0x33);
}
static void sandybridge_setup_bars(void)
{
printk(BIOS_DEBUG, "Setting up static northbridge registers...");
/* Set up all hardcoded northbridge BARs */
pci_write_config32(PCI_DEV(0, 0x00, 0), EPBAR, DEFAULT_EPBAR | 1);
pci_write_config32(PCI_DEV(0, 0x00, 0), EPBAR + 4, (0LL+DEFAULT_EPBAR) >> 32);
pci_write_config32(PCI_DEV(0, 0x00, 0), MCHBAR, (uintptr_t)DEFAULT_MCHBAR | 1);
pci_write_config32(PCI_DEV(0, 0x00, 0), MCHBAR + 4, (0LL+(uintptr_t)DEFAULT_MCHBAR) >> 32);
pci_write_config32(PCI_DEV(0, 0x00, 0), DMIBAR, (uintptr_t)DEFAULT_DMIBAR | 1);
pci_write_config32(PCI_DEV(0, 0x00, 0), DMIBAR + 4, (0LL+(uintptr_t)DEFAULT_DMIBAR) >> 32);
pci_write_config32(HOST_BRIDGE, EPBAR, DEFAULT_EPBAR | 1);
pci_write_config32(HOST_BRIDGE, EPBAR + 4, (0LL + DEFAULT_EPBAR) >> 32);
pci_write_config32(HOST_BRIDGE, MCHBAR, (uintptr_t)DEFAULT_MCHBAR | 1);
pci_write_config32(HOST_BRIDGE, MCHBAR + 4, (0LL + (uintptr_t)DEFAULT_MCHBAR) >> 32);
pci_write_config32(HOST_BRIDGE, DMIBAR, (uintptr_t)DEFAULT_DMIBAR | 1);
pci_write_config32(HOST_BRIDGE, DMIBAR + 4, (0LL + (uintptr_t)DEFAULT_DMIBAR) >> 32);
printk(BIOS_DEBUG, " done\n");
}
@ -76,8 +76,7 @@ static void sandybridge_setup_graphics(void)
{
u32 reg32;
u16 reg16;
u8 reg8;
u8 gfxsize;
u8 reg8, gfxsize;
reg16 = pci_read_config16(PCI_DEV(0, 2, 0), PCI_DEVICE_ID);
switch (reg16) {
@ -105,7 +104,7 @@ static void sandybridge_setup_graphics(void)
/* Setup IGD memory by setting GGC[7:3] = 1 for 32MB */
gfxsize = 0;
}
reg16 = pci_read_config16(PCI_DEV(0,0,0), GGC);
reg16 = pci_read_config16(HOST_BRIDGE, GGC);
reg16 &= ~0x00f8;
reg16 |= (gfxsize + 1) << 3;
/* Program GTT memory by setting GGC[9:8] = 2MB */
@ -113,7 +112,7 @@ static void sandybridge_setup_graphics(void)
reg16 |= 2 << 8;
/* Enable VGA decode */
reg16 &= ~0x0002;
pci_write_config16(PCI_DEV(0,0,0), GGC, reg16);
pci_write_config16(HOST_BRIDGE, GGC, reg16);
/* Enable 256MB aperture */
reg8 = pci_read_config8(PCI_DEV(0, 2, 0), MSAC);
@ -131,52 +130,56 @@ static void sandybridge_setup_graphics(void)
MCHBAR32(SAPMCTL) = reg32 | 1;
/* GPU RC6 workaround for sighting 366252 */
reg32 = MCHBAR32(0x5d14);
reg32 = MCHBAR32(SSKPD_HI);
reg32 |= (1 << 31);
MCHBAR32(0x5d14) = reg32;
MCHBAR32(SSKPD_HI) = reg32;
/* VLW */
/* VLW (Virtual Legacy Wire?) */
reg32 = MCHBAR32(0x6120);
reg32 &= ~(1 << 0);
MCHBAR32(0x6120) = reg32;
reg32 = MCHBAR32(PAIR_CTL);
reg32 = MCHBAR32(INTRDIRCTL);
reg32 |= (1 << 4) | (1 << 5);
MCHBAR32(PAIR_CTL) = reg32;
MCHBAR32(INTRDIRCTL) = reg32;
}
static void start_peg_link_training(void)
{
u32 tmp;
u32 deven;
u32 tmp, deven;
/* PEG on IvyBridge+ needs a special startup sequence.
* As the MRC has its own initialization code skip it. */
if (((pci_read_config16(PCI_DEV(0, 0, 0), PCI_DEVICE_ID) &
BASE_REV_MASK) != BASE_REV_IVB) ||
CONFIG(HAVE_MRC))
const u16 base_rev = pci_read_config16(HOST_BRIDGE, PCI_DEVICE_ID) & BASE_REV_MASK;
/*
* PEG on IvyBridge+ needs a special startup sequence.
* As the MRC has its own initialization code skip it.
*/
if ((base_rev != BASE_REV_IVB) || CONFIG(HAVE_MRC))
return;
deven = pci_read_config32(PCI_DEV(0, 0, 0), DEVEN);
deven = pci_read_config32(HOST_BRIDGE, DEVEN);
/*
* For each PEG device, set bit 5 to use three retries for OC (Offset Calibration).
* We also clear DEFER_OC (bit 16) in order to start PEG training.
*/
if (deven & DEVEN_PEG10) {
tmp = pci_read_config32(PCI_DEV(0, 1, 0), 0xC24) & ~(1 << 16);
pci_write_config32(PCI_DEV(0, 1, 0), 0xC24, tmp | (1 << 5));
tmp = pci_read_config32(PCI_DEV(0, 1, 0), AFE_PWRON) & ~(1 << 16);
pci_write_config32(PCI_DEV(0, 1, 0), AFE_PWRON, tmp | (1 << 5));
}
if (deven & DEVEN_PEG11) {
tmp = pci_read_config32(PCI_DEV(0, 1, 1), 0xC24) & ~(1 << 16);
pci_write_config32(PCI_DEV(0, 1, 1), 0xC24, tmp | (1 << 5));
tmp = pci_read_config32(PCI_DEV(0, 1, 1), AFE_PWRON) & ~(1 << 16);
pci_write_config32(PCI_DEV(0, 1, 1), AFE_PWRON, tmp | (1 << 5));
}
if (deven & DEVEN_PEG12) {
tmp = pci_read_config32(PCI_DEV(0, 1, 2), 0xC24) & ~(1 << 16);
pci_write_config32(PCI_DEV(0, 1, 2), 0xC24, tmp | (1 << 5));
tmp = pci_read_config32(PCI_DEV(0, 1, 2), AFE_PWRON) & ~(1 << 16);
pci_write_config32(PCI_DEV(0, 1, 2), AFE_PWRON, tmp | (1 << 5));
}
if (deven & DEVEN_PEG60) {
tmp = pci_read_config32(PCI_DEV(0, 6, 0), 0xC24) & ~(1 << 16);
pci_write_config32(PCI_DEV(0, 6, 0), 0xC24, tmp | (1 << 5));
tmp = pci_read_config32(PCI_DEV(0, 6, 0), AFE_PWRON) & ~(1 << 16);
pci_write_config32(PCI_DEV(0, 6, 0), AFE_PWRON, tmp | (1 << 5));
}
}
@ -187,17 +190,17 @@ void systemagent_early_init(void)
u8 reg8;
/* Device ID Override Enable should be done very early */
capid0_a = pci_read_config32(PCI_DEV(0, 0, 0), 0xe4);
capid0_a = pci_read_config32(HOST_BRIDGE, CAPID0_A);
if (capid0_a & (1 << 10)) {
const size_t is_mobile = get_platform_type() == PLATFORM_MOBILE;
reg8 = pci_read_config8(PCI_DEV(0, 0, 0), 0xf3);
reg8 = pci_read_config8(HOST_BRIDGE, DIDOR);
reg8 &= ~7; /* Clear 2:0 */
if (is_mobile)
reg8 |= 1; /* Set bit 0 */
pci_write_config8(PCI_DEV(0, 0, 0), 0xf3, reg8);
pci_write_config8(HOST_BRIDGE, DIDOR, reg8);
}
/* Setup all BARs required for early PCIe and raminit */
@ -210,24 +213,25 @@ void systemagent_early_init(void)
systemagent_vtd_init();
/* Device Enable, don't touch PEG bits */
deven = pci_read_config32(PCI_DEV(0, 0, 0), DEVEN) | DEVEN_IGD;
pci_write_config32(PCI_DEV(0, 0, 0), DEVEN, deven);
deven = pci_read_config32(HOST_BRIDGE, DEVEN) | DEVEN_IGD;
pci_write_config32(HOST_BRIDGE, DEVEN, deven);
sandybridge_setup_graphics();
/* Write magic value to start PEG link training.
* This should be done in PCI device enumeration, but
* the PCIe specification requires to wait at least 100msec
* after reset for devices to come up.
* As we don't want to increase boot time, enable it early and
* assume the PEG is up as soon as PCI enumeration starts.
* TODO: use time stamps to ensure the timings are met */
/*
* Write magic values to start PEG link training. This should be done in PCI device
* enumeration, but the PCIe specification requires to wait at least 100msec after
* reset for devices to come up. As we don't want to increase boot time, enable it
* early and assume that PEG is up as soon as PCI enumeration starts.
*
* TODO: use timestamps to ensure the timings are met.
*/
start_peg_link_training();
}
void northbridge_romstage_finalize(int s3resume)
{
MCHBAR16(SSKPD) = 0xCAFE;
MCHBAR16(SSKPD_HI) = 0xCAFE;
romstage_handoff_init(s3resume);
}

View File

@ -16,36 +16,34 @@
#include <device/pci_ops.h>
#include "sandybridge.h"
#define PCI_DEV_SNB PCI_DEV(0, 0, 0)
void intel_sandybridge_finalize_smm(void)
{
pci_or_config16(PCI_DEV_SNB, GGC, 1 << 0);
pci_or_config16(PCI_DEV_SNB, PAVPC, 1 << 2);
pci_or_config32(PCI_DEV_SNB, DPR, 1 << 0);
pci_or_config32(PCI_DEV_SNB, MESEG_MASK, MELCK);
pci_or_config32(PCI_DEV_SNB, REMAPBASE, 1 << 0);
pci_or_config32(PCI_DEV_SNB, REMAPLIMIT, 1 << 0);
pci_or_config32(PCI_DEV_SNB, TOM, 1 << 0);
pci_or_config32(PCI_DEV_SNB, TOUUD, 1 << 0);
pci_or_config32(PCI_DEV_SNB, BDSM, 1 << 0);
pci_or_config32(PCI_DEV_SNB, BGSM, 1 << 0);
pci_or_config32(PCI_DEV_SNB, TSEGMB, 1 << 0);
pci_or_config32(PCI_DEV_SNB, TOLUD, 1 << 0);
pci_or_config16(HOST_BRIDGE, GGC, 1 << 0);
pci_or_config16(HOST_BRIDGE, PAVPC, 1 << 2);
pci_or_config32(HOST_BRIDGE, DPR, 1 << 0);
pci_or_config32(HOST_BRIDGE, MESEG_MASK, MELCK);
pci_or_config32(HOST_BRIDGE, REMAPBASE, 1 << 0);
pci_or_config32(HOST_BRIDGE, REMAPLIMIT, 1 << 0);
pci_or_config32(HOST_BRIDGE, TOM, 1 << 0);
pci_or_config32(HOST_BRIDGE, TOUUD, 1 << 0);
pci_or_config32(HOST_BRIDGE, BDSM, 1 << 0);
pci_or_config32(HOST_BRIDGE, BGSM, 1 << 0);
pci_or_config32(HOST_BRIDGE, TSEGMB, 1 << 0);
pci_or_config32(HOST_BRIDGE, TOLUD, 1 << 0);
MCHBAR32_OR(MMIO_PAVP_CTL, 1 << 0); /* PAVP */
MCHBAR32_OR(PAVP_MSG, 1 << 0); /* PAVP */
MCHBAR32_OR(SAPMCTL, 1 << 31); /* SA PM */
MCHBAR32_OR(0x6020, 1 << 0); /* UMA GFX */
MCHBAR32_OR(0x63fc, 1 << 0); /* VTDTRK */
MCHBAR32_OR(0x6800, 1 << 31);
MCHBAR32_OR(0x7000, 1 << 31);
MCHBAR32_OR(0x77fc, 1 << 0);
MCHBAR32_OR(UMAGFXCTL, 1 << 0); /* UMA GFX */
MCHBAR32_OR(VTDTRKLCK, 1 << 0); /* VTDTRK */
MCHBAR32_OR(REQLIM, 1 << 31);
MCHBAR32_OR(DMIVCLIM, 1 << 31);
MCHBAR32_OR(CRDTLCK, 1 << 0);
/* Memory Controller Lockdown */
MCHBAR8(MC_LOCK) = 0x8f;
/* Read+write the following */
MCHBAR32(0x6030) = MCHBAR32(0x6030);
MCHBAR32(0x6034) = MCHBAR32(0x6034);
MCHBAR32(0x6008) = MCHBAR32(0x6008);
MCHBAR32(VDMBDFBARKVM) = MCHBAR32(VDMBDFBARKVM);
MCHBAR32(VDMBDFBARPAVP) = MCHBAR32(VDMBDFBARPAVP);
MCHBAR32(HDAUDRID) = MCHBAR32(HDAUDRID);
}

View File

@ -57,7 +57,7 @@ static const struct gt_powermeter snb_pm_gt1[] = {
{ 0xa240, 0x00000000 },
{ 0xa244, 0x00000000 },
{ 0xa248, 0x8000421e },
{ 0 }
{ 0 },
};
static const struct gt_powermeter snb_pm_gt2[] = {
@ -80,7 +80,7 @@ static const struct gt_powermeter snb_pm_gt2[] = {
{ 0xa240, 0x00000000 },
{ 0xa244, 0x00000000 },
{ 0xa248, 0x8000421e },
{ 0 }
{ 0 },
};
static const struct gt_powermeter ivb_pm_gt1[] = {
@ -136,7 +136,7 @@ static const struct gt_powermeter ivb_pm_gt1[] = {
{ 0xaa3c, 0x00001c00 },
{ 0xaa54, 0x00000004 },
{ 0xaa60, 0x00060000 },
{ 0 }
{ 0 },
};
static const struct gt_powermeter ivb_pm_gt2_17w[] = {
@ -192,7 +192,7 @@ static const struct gt_powermeter ivb_pm_gt2_17w[] = {
{ 0xaa3c, 0x00003900 },
{ 0xaa54, 0x00000008 },
{ 0xaa60, 0x00110000 },
{ 0 }
{ 0 },
};
static const struct gt_powermeter ivb_pm_gt2_35w[] = {
@ -248,12 +248,12 @@ static const struct gt_powermeter ivb_pm_gt2_35w[] = {
{ 0xaa3c, 0x00003900 },
{ 0xaa54, 0x00000008 },
{ 0xaa60, 0x00110000 },
{ 0 }
{ 0 },
};
/* some vga option roms are used for several chipsets but they only have one
* PCI ID in their header. If we encounter such an option rom, we need to do
* the mapping ourselves
/*
* Some VGA option roms are used for several chipsets but they only have one PCI ID in their
* header. If we encounter such an option rom, we need to do the mapping ourselves.
*/
u32 map_oprom_vendev(u32 vendev)
@ -385,18 +385,15 @@ static void gma_pm_init_pre_vbios(struct device *dev)
if (tdp <= 17) {
/* <=17W ULV */
printk(BIOS_DEBUG, "IVB GT2 17W "
"Power Meter Weights\n");
printk(BIOS_DEBUG, "IVB GT2 17W Power Meter Weights\n");
gtt_write_powermeter(ivb_pm_gt2_17w);
} else if ((tdp >= 25) && (tdp <= 35)) {
/* 25W-35W */
printk(BIOS_DEBUG, "IVB GT2 25W-35W "
"Power Meter Weights\n");
printk(BIOS_DEBUG, "IVB GT2 25W-35W Power Meter Weights\n");
gtt_write_powermeter(ivb_pm_gt2_35w);
} else {
/* All others */
printk(BIOS_DEBUG, "IVB GT2 35W "
"Power Meter Weights\n");
printk(BIOS_DEBUG, "IVB GT2 35W Power Meter Weights\n");
gtt_write_powermeter(ivb_pm_gt2_35w);
}
}
@ -599,15 +596,15 @@ static void gma_enable_swsci(void)
{
u16 reg16;
/* clear DMISCI status */
/* Clear DMISCI status */
reg16 = inw(DEFAULT_PMBASE + TCO1_STS);
reg16 &= DMISCI_STS;
outw(DEFAULT_PMBASE + TCO1_STS, reg16);
/* clear acpi tco status */
/* Clear ACPI TCO status */
outl(DEFAULT_PMBASE + GPE0_STS, TCOSCI_STS);
/* enable acpi tco scis */
/* Enable ACPI TCO SCIs */
reg16 = inw(DEFAULT_PMBASE + GPE0_EN);
reg16 |= TCOSCI_EN;
outw(DEFAULT_PMBASE + GPE0_EN, reg16);
@ -654,10 +651,9 @@ static void gma_func0_init(struct device *dev)
intel_gma_restore_opregion();
}
const struct i915_gpu_controller_info *
intel_gma_get_controller_info(void)
const struct i915_gpu_controller_info *intel_gma_get_controller_info(void)
{
struct device *dev = pcidev_on_root(0x2, 0);
struct device *dev = pcidev_on_root(2, 0);
if (!dev) {
return NULL;
}
@ -675,9 +671,7 @@ static void gma_ssdt(struct device *device)
drivers_intel_gma_displays_ssdt_generate(gfx);
}
static unsigned long
gma_write_acpi_tables(struct device *const dev,
unsigned long current,
static unsigned long gma_write_acpi_tables(struct device *const dev, unsigned long current,
struct acpi_rsdp *const rsdp)
{
igd_opregion_t *opregion = (igd_opregion_t *)current;
@ -706,14 +700,14 @@ static const char *gma_acpi_name(const struct device *dev)
return "GFX0";
}
/* called by pci set_vga_bridge function */
/* Called by PCI set_vga_bridge function */
static void gma_func0_disable(struct device *dev)
{
u16 reg16;
struct device *dev_host = pcidev_on_root(0, 0);
reg16 = pci_read_config16(dev_host, GGC);
reg16 |= (1 << 1); /* disable VGA decode */
reg16 |= (1 << 1); /* Disable VGA decode */
pci_write_config16(dev_host, GGC, reg16);
dev->enabled = 0;
@ -729,18 +723,20 @@ static struct device_operations gma_func0_ops = {
.enable_resources = pci_dev_enable_resources,
.acpi_fill_ssdt_generator = gma_ssdt,
.init = gma_func0_init,
.scan_bus = 0,
.enable = 0,
.scan_bus = NULL,
.enable = NULL,
.disable = gma_func0_disable,
.ops_pci = &gma_pci_ops,
.acpi_name = gma_acpi_name,
.write_acpi_tables = gma_write_acpi_tables,
};
static const unsigned short pci_device_ids[] = { 0x0102, 0x0106, 0x010a, 0x0112,
static const unsigned short pci_device_ids[] = {
0x0102, 0x0106, 0x010a, 0x0112,
0x0116, 0x0122, 0x0126, 0x0156,
0x0166, 0x0162, 0x016a, 0x0152,
0 };
0
};
static const struct pci_driver gma __pci_driver = {
.ops = &gma_func0_ops,

View File

@ -17,9 +17,10 @@
struct i915_gpu_controller_info;
int i915lightup_sandy(const struct i915_gpu_controller_info *info,
u32 physbase, u16 pio, u8 *mmio, u32 lfb);
int i915lightup_ivy(const struct i915_gpu_controller_info *info,
u32 physbase, u16 pio, u8 *mmio, u32 lfb);
int i915lightup_sandy(const struct i915_gpu_controller_info *info, u32 physbase, u16 pio,
u8 *mmio, u32 lfb);
int i915lightup_ivy(const struct i915_gpu_controller_info *info, u32 physbase, u16 pio,
u8 *mmio, u32 lfb);
#endif /* NORTHBRIDGE_INTEL_SANDYBRIDGE_GMA_H */

View File

@ -0,0 +1,430 @@
/*
* This file is part of the coreboot project.
*
* Copyright (C) 2007-2008 coresystems GmbH
* Copyright (C) 2011 Google Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __SANDYBRIDGE_MCHBAR_REGS_H__
#define __SANDYBRIDGE_MCHBAR_REGS_H__
/*
* ### IOSAV command queue notes ###
*
* Intel provides a command queue of depth four.
* Every command is configured by using multiple MCHBAR registers.
* On executing the command queue, you have to specify its depth (number of commands).
*
* The macros for these registers can take some integer parameters, within these bounds:
* channel: [0..1]
* index: [0..3]
* lane: [0..8]
*
* Note that these ranges are 'closed': both endpoints are included.
*
*
*
* ### Register description ###
*
* IOSAV_n_SP_CMD_ADDR_ch(channel, index)
* Sub-sequence command addresses. Controls the address, bank address and slotrank signals.
*
* Bitfields:
* [0..15] Row / Column Address.
* [16..18] The result of (10 + [16..18]) is the number of valid row bits.
* Note: Value 1 is not implemented. Not that it really matters, though.
* Value 7 is reserved, as the hardware does not support it.
* [20..22] Bank Address.
* [24..25] Rank select. Let's call it "ranksel", as it is mentioned later.
*
* IOSAV_n_ADDR_UPDATE_ch(channel, index)
* How the address shall be updated after executing the sub-sequence command.
*
* Bitfields:
* [0] Increment CAS/RAS by 1.
* [1] Increment CAS/RAS by 8.
* [2] Increment bank select by 1.
* [3..4] Increment rank select by 1, 2 or 3.
* [5..9] Known as "addr_wrap". Address bits will wrap around the [addr_wrap..0] range.
* [10..11] LFSR update:
* 00: Do not use the LFSR function.
* 01: Undefined, treat as Reserved.
* 10: Apply LFSR on the [addr_wrap..0] bit range.
* 11: Apply LFSR on the [addr_wrap..3] bit range.
*
* [12..15] Update rate. The number of command runs between address updates. For example:
* 0: Update every command run.
* 1: Update every second command run. That is, half of the command rate.
* N: Update after N command runs without updates.
*
* [16..17] LFSR behavior on the deselect cycles (when no sub-seq command is issued):
* 0: No change w.r.t. the last issued command.
* 1: LFSR XORs with address & command (excluding CS), but does not update.
* 2: LFSR XORs with address & command (excluding CS), and updates.
*
* IOSAV_n_SP_CMD_CTRL_ch(channel, index)
* Special command control register. Controls the DRAM command signals.
*
* Bitfields:
* [0] !RAS signal.
* [1] !CAS signal.
* [2] !WE signal.
* [4..7] CKE, per rank and channel.
* [8..11] ODT, per rank and channel.
* [12] Chip Select mode control.
* [13..16] Chip select, per rank and channel. It works as follows:
*
* entity CS_BLOCK is
* port (
* MODE : in std_logic; -- Mode select at [12]
* RANKSEL : in std_logic_vector(0 to 3); -- Decoded "ranksel" value
* CS_CTL : in std_logic_vector(0 to 3); -- Chip select control at [13..16]
* CS_Q : out std_logic_vector(0 to 3) -- CS signals
* );
* end entity CS_BLOCK;
*
* architecture RTL of CS_BLOCK is
* begin
* if MODE = '1' then
* CS_Q <= not RANKSEL and CS_CTL;
* else
* CS_Q <= CS_CTL;
* end if;
* end architecture RTL;
*
* [17] Auto Precharge. Only valid when using 10 row bits!
*
* IOSAV_n_SUBSEQ_CTRL_ch(channel, index)
* Sub-sequence parameters. Controls repetititons, delays and data orientation.
*
* Bitfields:
* [0..8] Number of repetitions of the sub-sequence command.
* [10..14] Gap, number of clock-cycles to wait before sending the next command.
* [16..24] Number of clock-cycles to idle between sub-sequence commands.
* [26..27] The direction of the data.
* 00: None, does not handle data
* 01: Read
* 10: Write
* 11: Read & Write
*
* IOSAV_n_ADDRESS_LFSR_ch(channel, index)
* 23-bit LFSR state register. It is written into the LFSR when the sub-sequence is loaded,
* and then read back from the LFSR when the sub-sequence is done.
*
* Bitfields:
* [0..22] LFSR state.
*
* IOSAV_SEQ_CTL_ch(channel)
* Control the sequence level in IOSAV: number of sub-sequences, iterations, maintenance...
*
* Bitfields:
* [0..7] Number of full sequence executions. When this field becomes non-zero, then the
* sequence starts running immediately. This value is decremented after completing
* a full sequence iteration. When it is zero, the sequence is done. No decrement
* is done if this field is set to 0xff. This is the "infinite repeat" mode, and
* it is manually aborted by clearing this field.
*
* [8..16] Number of wait cycles after each sequence iteration. This wait's purpose is to
* allow performing maintenance in infinite loops. When non-zero, RCOMP, refresh
* and ZQXS operations can take place.
*
* [17] Stop-on-error mode: Whether to stop sequence execution when an error occurs.
* [18..19] Number of sub-sequences. The programmed value is the index of the last sub-seq.
* [20] If set, keep refresh disabled until the next sequence execution.
* DANGER: Refresh must be re-enabled within the (9 * tREFI) period!
*
* [22] If set, sequence execution will not prevent refresh. This cannot be set when
* bit [20] is also set, or was set on the previous sequence. This bit exists so
* that the sequence machine can be used as a timer without affecting the memory.
*
* [23] If set, a output pin is asserted on the first detected error. This output can
* be used as a trigger for an oscilloscope or a logic analyzer, which is handy.
*
* IOSAV_DATA_CTL_ch(channel)
* Data-related controls in IOSAV mode.
*
* Bitfields:
* [0..7] WDB (Write Data Buffer) pattern length: [0..7] = (length / 8) - 1;
* [8..15] WDB read pointer. Points at the data used for IOSAV write transactions.
* [16..23] Comparison pointer. Used to compare data from IOSAV read transactions.
* [24] If set, increment pointers only when micro-breakpoint is active.
*
* IOSAV_STATUS_ch(channel)
* State of the IOSAV sequence machine. Should be polled after sending an IOSAV sequence.
*
* Bitfields:
* [0] IDLE: IOSAV is sleeping.
* [1] BUSY: IOSAV is running a sequence.
* [2] DONE: IOSAV has completed a sequence.
* [3] ERROR: IOSAV detected an error and stopped on it, when using Stop-on-error.
* [4] PANIC: The refresh machine issued a Panic Refresh, and IOSAV was aborted.
* [5] RCOMP: RComp failure. Unused, consider Reserved.
* [6] Cleared with a new sequence, and set when done and refresh counter is drained.
*
*/
/* Indexed register helper macros */
#define Gz(r, z) ((r) + ((z) << 8))
#define Ly(r, y) ((r) + ((y) << 2))
#define Cx(r, x) ((r) + ((x) << 10))
#define CxLy(r, x, y) ((r) + ((x) << 10) + ((y) << 2))
#define GzLy(r, z, y) ((r) + ((z) << 8) + ((y) << 2))
/* Byte lane training register base addresses */
#define LANEBASE_B0 0x0000
#define LANEBASE_B1 0x0200
#define LANEBASE_B2 0x0400
#define LANEBASE_B3 0x0600
#define LANEBASE_ECC 0x0800 /* ECC lane is in the middle of the data lanes */
#define LANEBASE_B4 0x1000
#define LANEBASE_B5 0x1200
#define LANEBASE_B6 0x1400
#define LANEBASE_B7 0x1600
/* Byte lane register offsets */
#define GDCRTRAININGRESULT(ch, y) GzLy(0x0004, ch, y) /* Test results for PI config */
#define GDCRTRAININGRESULT1(ch) GDCRTRAININGRESULT(ch, 0) /* 0x0004 */
#define GDCRTRAININGRESULT2(ch) GDCRTRAININGRESULT(ch, 1) /* 0x0008 */
#define GDCRRX(ch, rank) GzLy(0x10, ch, rank) /* Time setting for lane Rx */
#define GDCRTX(ch, rank) GzLy(0x20, ch, rank) /* Time setting for lane Tx */
/* Register definitions */
#define GDCRCLKRANKSUSED_ch(ch) Gz(0x0c00, ch) /* Indicates which rank is populated */
#define GDCRCLKCOMP_ch(ch) Gz(0x0c04, ch) /* RCOMP result register */
#define GDCRCKPICODE_ch(ch) Gz(0x0c14, ch) /* PI coding for DDR CLK pins */
#define GDCRCKLOGICDELAY_ch(ch) Gz(0x0c18, ch) /* Logic delay of 1 QCLK in CLK slice */
#define GDDLLFUSE_ch(ch) Gz(0x0c20, ch) /* Used for fuse download to the DLLs */
#define GDCRCLKDEBUGMUXCFG_ch(ch) Gz(0x0c3c, ch) /* Debug MUX control */
#define GDCRCMDDEBUGMUXCFG_Cz_S(ch) Gz(0x0e3c, ch) /* Debug MUX control */
#define CRCOMPOFST1_ch(ch) Gz(0x1810, ch) /* DQ, CTL and CLK Offset values */
#define GDCRTRAININGMOD_ch(ch) Gz(0x3000, ch) /* Data training mode control */
#define GDCRTRAININGRESULT1_ch(ch) Gz(0x3004, ch) /* Training results according to PI */
#define GDCRTRAININGRESULT2_ch(ch) Gz(0x3008, ch)
#define GDCRCTLRANKSUSED_ch(ch) Gz(0x3200, ch) /* Indicates which rank is populated */
#define GDCRCMDCOMP_ch(ch) Gz(0x3204, ch) /* COMP values register */
#define GDCRCMDCTLCOMP_ch(ch) Gz(0x3208, ch) /* COMP values register */
#define GDCRCMDPICODING_ch(ch) Gz(0x320c, ch) /* Command and control PI coding */
#define GDCRTRAININGMOD 0x3400 /* Data training mode control register */
#define GDCRDATACOMP 0x340c /* COMP values register */
#define CRCOMPOFST2 0x3714 /* CMD DRV, SComp and Static Leg controls */
/* MC per-channel registers */
#define TC_DBP_ch(ch) Cx(0x4000, ch) /* Timings: BIN */
#define TC_RAP_ch(ch) Cx(0x4004, ch) /* Timings: Regular access */
#define TC_RWP_ch(ch) Cx(0x4008, ch) /* Timings: Read / Write */
#define TC_OTHP_ch(ch) Cx(0x400c, ch) /* Timings: Other parameters */
#define SCHED_SECOND_CBIT_ch(ch) Cx(0x401c, ch) /* More chicken bits */
#define SCHED_CBIT_ch(ch) Cx(0x4020, ch) /* Chicken bits in scheduler */
#define SC_ROUNDT_LAT_ch(ch) Cx(0x4024, ch) /* Round-trip latency per rank */
#define SC_IO_LATENCY_ch(ch) Cx(0x4028, ch) /* IO Latency Configuration */
#define SCRAMBLING_SEED_1_ch(ch) Cx(0x4034, ch) /* Scrambling seed 1 */
#define SCRAMBLING_SEED_2_LO_ch(ch) Cx(0x4038, ch) /* Scrambling seed 2 low */
#define SCRAMBLING_SEED_2_HI_ch(ch) Cx(0x403c, ch) /* Scrambling seed 2 high */
/* IOSAV Bytelane Bit-wise error */
#define IOSAV_By_BW_SERROR_ch(ch, y) CxLy(0x4040, ch, y)
/* IOSAV Bytelane Bit-wise compare mask */
#define IOSAV_By_BW_MASK_ch(ch, y) CxLy(0x4080, ch, y)
/*
* Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
* Different counters for transactions that are issued on the ring agents (core or GT) and
* transactions issued in the SA.
*/
#define SC_PR_CNT_CONFIG_ch(ch) Cx(0x40a8, ch)
#define SC_PCIT_ch(ch) Cx(0x40ac, ch) /* Page-close idle timer setup - 8 bits */
#define PM_PDWN_CONFIG_ch(ch) Cx(0x40b0, ch) /* Power-down (CKE-off) operation config */
#define ECC_INJECT_COUNT_ch(ch) Cx(0x40b4, ch) /* ECC error injection count */
#define ECC_DFT_ch(ch) Cx(0x40b8, ch) /* ECC DFT features (ECC4ANA, error inject) */
#define SC_WR_ADD_DELAY_ch(ch) Cx(0x40d0, ch) /* Extra WR delay to overcome WR-flyby issue */
#define IOSAV_By_BW_SERROR_C_ch(ch, y) CxLy(0x4140, ch, y) /* IOSAV Bytelane Bit-wise error */
/* IOSAV sub-sequence control registers */
#define IOSAV_n_SP_CMD_ADDR_ch(ch, y) CxLy(0x4200, ch, y) /* Special command address. */
#define IOSAV_n_ADDR_UPDATE_ch(ch, y) CxLy(0x4210, ch, y) /* Address update control */
#define IOSAV_n_SP_CMD_CTRL_ch(ch, y) CxLy(0x4220, ch, y) /* Control of command signals */
#define IOSAV_n_SUBSEQ_CTRL_ch(ch, y) CxLy(0x4230, ch, y) /* Sub-sequence controls */
#define IOSAV_n_ADDRESS_LFSR_ch(ch, y) CxLy(0x4240, ch, y) /* 23-bit LFSR state value */
#define PM_THML_STAT_ch(ch) Cx(0x4280, ch) /* Thermal status of each rank */
#define IOSAV_SEQ_CTL_ch(ch) Cx(0x4284, ch) /* IOSAV sequence level control */
#define IOSAV_DATA_CTL_ch(ch) Cx(0x4288, ch) /* Data control in IOSAV mode */
#define IOSAV_STATUS_ch(ch) Cx(0x428c, ch) /* State of the IOSAV sequence machine */
#define TC_ZQCAL_ch(ch) Cx(0x4290, ch) /* ZQCAL control register */
#define TC_RFP_ch(ch) Cx(0x4294, ch) /* Refresh Parameters */
#define TC_RFTP_ch(ch) Cx(0x4298, ch) /* Refresh Timing Parameters */
#define TC_MR2_SHADOW_ch(ch) Cx(0x429c, ch) /* MR2 shadow - copy of DDR configuration */
#define MC_INIT_STATE_ch(ch) Cx(0x42a0, ch) /* IOSAV mode control */
#define TC_SRFTP_ch(ch) Cx(0x42a4, ch) /* Self-refresh timing parameters */
#define IOSAV_ERROR_ch(ch) Cx(0x42ac, ch) /* Data vector count of the first error */
#define IOSAV_DC_MASK_ch(ch) Cx(0x42b0, ch) /* IOSAV data check masking */
#define IOSAV_By_ERROR_COUNT_ch(ch, y) CxLy(0x4340, ch, y) /* Per-byte 16-bit error count */
#define IOSAV_G_ERROR_COUNT_ch(ch) Cx(0x4364, ch) /* Global 16-bit error count */
/** WARNING: Only applies to Ivy Bridge! */
#define IOSAV_BYTE_SERROR_ch(ch) Cx(0x4368, ch) /** Byte-Wise Sticky Error */
#define IOSAV_BYTE_SERROR_C_ch(ch) Cx(0x436c, ch) /** Byte-Wise Sticky Error Clear */
#define PM_TRML_M_CONFIG_ch(ch) Cx(0x4380, ch) /* Thermal mode configuration */
#define PM_CMD_PWR_ch(ch) Cx(0x4384, ch) /* Power contribution of commands */
#define PM_BW_LIMIT_CONFIG_ch(ch) Cx(0x4388, ch) /* Bandwidth throttling on overtemp */
#define SC_WDBWM_ch(ch) Cx(0x438c, ch) /* Watermarks and starvation counter */
/* MC Channel Broadcast registers */
#define TC_DBP 0x4c00 /* Timings: BIN */
#define TC_RAP 0x4c04 /* Timings: Regular access */
#define TC_RWP 0x4c08 /* Timings: Read / Write */
#define TC_OTHP 0x4c0c /* Timings: Other parameters */
#define SCHED_SECOND_CBIT 0x4c1c /* More chicken bits */
#define SCHED_CBIT 0x4c20 /* Chicken bits in scheduler */
#define SC_ROUNDT_LAT 0x4c24 /* Round-trip latency per rank */
#define SC_IO_LATENCY 0x4c28 /* IO Latency Configuration */
#define SCRAMBLING_SEED_1 0x4c34 /* Scrambling seed 1 */
#define SCRAMBLING_SEED_2_LO 0x4c38 /* Scrambling seed 2 low */
#define SCRAMBLING_SEED_2_HI 0x4c3c /* Scrambling seed 2 high */
#define IOSAV_By_BW_SERROR(y) Ly(0x4c40, y) /* IOSAV Bytelane Bit-wise error */
#define IOSAV_By_BW_MASK(y) Ly(0x4c80, y) /* IOSAV Bytelane Bit-wise compare mask */
/*
* Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
* Different counters for transactions that are issued on the ring agents (core or GT) and
* transactions issued in the SA.
*/
#define SC_PR_CNT_CONFIG 0x4ca8
#define SC_PCIT 0x4cac /* Page-close idle timer setup - 8 bits */
#define PM_PDWN_CONFIG 0x4cb0 /* Power-down (CKE-off) operation config */
#define ECC_INJECT_COUNT 0x4cb4 /* ECC error injection count */
#define ECC_DFT 0x4cb8 /* ECC DFT features (ECC4ANA, error inject) */
#define SC_WR_ADD_DELAY 0x4cd0 /* Extra WR delay to overcome WR-flyby issue */
/** Opportunistic reads configuration during write-major-mode (WMM) */
#define WMM_READ_CONFIG 0x4cd4 /** WARNING: Only exists on IVB! */
#define IOSAV_By_BW_SERROR_C(y) Ly(0x4d40, y) /* IOSAV Bytelane Bit-wise error */
#define IOSAV_n_SP_CMD_ADDR(n) Ly(0x4e00, n) /* Sub-sequence special command address */
#define IOSAV_n_ADDR_UPDATE(n) Ly(0x4e10, n) /* Address update after command execution */
#define IOSAV_n_SP_CMD_CTRL(n) Ly(0x4e20, n) /* Command signals in sub-sequence command */
#define IOSAV_n_SUBSEQ_CTRL(n) Ly(0x4e30, n) /* Sub-sequence command parameter control */
#define IOSAV_n_ADDRESS_LFSR(n) Ly(0x4e40, n) /* 23-bit LFSR value of the sequence */
#define PM_THML_STAT 0x4e80 /* Thermal status of each rank */
#define IOSAV_SEQ_CTL 0x4e84 /* IOSAV sequence level control */
#define IOSAV_DATA_CTL 0x4e88 /* Data control in IOSAV mode */
#define IOSAV_STATUS 0x4e8c /* State of the IOSAV sequence machine */
#define TC_ZQCAL 0x4e90 /* ZQCAL control register */
#define TC_RFP 0x4e94 /* Refresh Parameters */
#define TC_RFTP 0x4e98 /* Refresh Timing Parameters */
#define TC_MR2_SHADOW 0x4e9c /* MR2 shadow - copy of DDR configuration */
#define MC_INIT_STATE 0x4ea0 /* IOSAV mode control */
#define TC_SRFTP 0x4ea4 /* Self-refresh timing parameters */
/**
* Auxiliary register in mcmnts synthesis FUB (Functional Unit Block). Additionally, this
* register is also used to enable IOSAV_n_SP_CMD_ADDR optimization on Ivy Bridge.
*/
#define MCMNTS_SPARE 0x4ea8 /** WARNING: Reserved, use only on IVB! */
#define IOSAV_ERROR 0x4eac /* Data vector count of the first error */
#define IOSAV_DC_MASK 0x4eb0 /* IOSAV data check masking */
#define IOSAV_By_ERROR_COUNT(y) Ly(0x4f40, y) /* Per-byte 16-bit error counter */
#define IOSAV_G_ERROR_COUNT 0x4f64 /* Global 16-bit error counter */
/** WARNING: Only applies to Ivy Bridge! */
#define IOSAV_BYTE_SERROR 0x4f68 /** Byte-Wise Sticky Error */
#define IOSAV_BYTE_SERROR_C 0x4f6c /** Byte-Wise Sticky Error Clear */
#define PM_TRML_M_CONFIG 0x4f80 /* Thermal mode configuration */
#define PM_CMD_PWR 0x4f84 /* Power contribution of commands */
#define PM_BW_LIMIT_CONFIG 0x4f88 /* Bandwidth throttling on overtemperature */
#define SC_WDBWM 0x4f8c /* Watermarks and starvation counter config */
/* No, there's no need to get mad about the Memory Address Decoder */
#define MAD_CHNL 0x5000 /* Address Decoder Channel Configuration */
#define MAD_DIMM(ch) Ly(0x5004, ch) /* Channel characteristics */
#define MAD_DIMM_CH0 MAD_DIMM(0) /* Channel 0 is at 0x5004 */
#define MAD_DIMM_CH1 MAD_DIMM(1) /* Channel 1 is at 0x5008 */
#define MAD_DIMM_CH2 MAD_DIMM(2) /* Channel 2 is at 0x500c (unused on SNB) */
#define MAD_ZR 0x5014 /* Address Decode Zones */
#define MCDECS_SPARE 0x5018 /* Spare register in mcdecs synthesis FUB */
#define MCDECS_CBIT 0x501c /* Chicken bits in mcdecs synthesis FUB */
#define CHANNEL_HASH 0x5024 /** WARNING: Only exists on IVB! */
#define MC_INIT_STATE_G 0x5030 /* High-level behavior in IOSAV mode */
#define MRC_REVISION 0x5034 /* MRC Revision */
#define PM_DLL_CONFIG 0x5064 /* Memory Controller I/O DLL config */
#define RCOMP_TIMER 0x5084 /* RCOMP evaluation timer register */
#define MC_LOCK 0x50fc /* Memory Controlller Lock register */
#define GFXVTBAR 0x5400 /* Base address for IGD */
#define VTVC0BAR 0x5410 /* Base address for PEG, USB, SATA, etc. */
/* On Ivy Bridge, this is used to enable Power Aware Interrupt Routing */
#define INTRDIRCTL 0x5418 /* Interrupt Redirection Control */
/* PAVP message register. Bit 0 locks PAVP settings, and bits [31..20] are an offset. */
#define PAVP_MSG 0x5500
#define MEM_TRML_ESTIMATION_CONFIG 0x5880
#define MEM_TRML_THRESHOLDS_CONFIG 0x5888
#define MEM_TRML_INTERRUPT 0x58a8
/* Some power MSRs are also represented in MCHBAR */
#define MCH_PKG_POWER_LIMIT_LO 0x59a0 /* Turbo Power Limit 1 parameters */
#define MCH_PKG_POWER_LIMIT_HI 0x59a4 /* Turbo Power Limit 2 parameters */
#define SSKPD 0x5d10 /* 64-bit scratchpad register */
#define SSKPD_HI 0x5d14
#define BIOS_RESET_CPL 0x5da8 /* 8-bit */
/* PCODE will sample SAPM-related registers at the end of Phase 4. */
#define MC_BIOS_REQ 0x5e00 /* Memory frequency request register */
#define MC_BIOS_DATA 0x5e04 /* Miscellaneous information for BIOS */
#define SAPMCTL 0x5f00 /* Bit 3 enables DDR EPG (C7i) on IVB */
#define M_COMP 0x5f08 /* Memory COMP control */
#define SAPMTIMERS 0x5f10 /* SAPM timers in 10ns (100 MHz) units */
/* WARNING: Only applies to Sandy Bridge! */
#define BANDTIMERS_SNB 0x5f18 /* MPLL and PPLL time to do self-banding */
/** WARNING: Only applies to Ivy Bridge! */
#define SAPMTIMERS2_IVB 0x5f18 /** Extra latency for DDRIO EPG exit (C7i) */
#define BANDTIMERS_IVB 0x5f20 /** MPLL and PPLL time to do self-banding */
/* Finalize registers. The names come from Haswell, as the finalize sequence is the same. */
#define HDAUDRID 0x6008
#define UMAGFXCTL 0x6020
#define VDMBDFBARKVM 0x6030
#define VDMBDFBARPAVP 0x6034
#define VTDTRKLCK 0x63fc
#define REQLIM 0x6800
#define DMIVCLIM 0x7000
#define PEGCTL 0x7010 /* Bit 0 is PCIPWRGAT (clock gate all PEG controllers) */
#define CRDTCTL3 0x740c /* Minimum completion credits for PCIe/DMI */
#define CRDTCTL4 0x7410 /* Read Return Tracker credits */
#define CRDTLCK 0x77fc
#endif /* __SANDYBRIDGE_MCHBAR_REGS_H__ */

View File

@ -26,8 +26,7 @@
static uintptr_t smm_region_start(void)
{
/* Base of TSEG is top of usable DRAM */
uintptr_t tom = pci_read_config32(PCI_DEV(0, 0, 0), TSEGMB);
return tom;
return pci_read_config32(HOST_BRIDGE, TSEGMB);
}
void *cbmem_top_chipset(void)
@ -53,19 +52,22 @@ void smm_region(uintptr_t *start, size_t *size)
void fill_postcar_frame(struct postcar_frame *pcf)
{
uintptr_t top_of_ram;
uintptr_t top_of_ram = (uintptr_t)cbmem_top();
top_of_ram = (uintptr_t)cbmem_top();
/* Cache 8MiB below the top of ram. On sandybridge systems the top of
/*
* Cache 8MiB below the top of ram. On sandybridge systems the top of
* RAM under 4GiB is the start of the TSEG region. It is required to
* be 8MiB aligned. Set this area as cacheable so it can be used later
* for ramstage before setting up the entire RAM as cacheable. */
* for ramstage before setting up the entire RAM as cacheable.
*/
postcar_frame_add_mtrr(pcf, top_of_ram - 8 * MiB, 8 * MiB, MTRR_TYPE_WRBACK);
/* Cache 8MiB at the top of ram. Top of RAM on sandybridge systems
/*
* Cache 8MiB at the top of ram. Top of RAM on sandybridge systems
* is where the TSEG region resides. However, it is not restricted
* to SMM mode until SMM has been relocated. By setting the region
* to cacheable it provides faster access when relocating the SMM
* handler as well as using the TSEG region for other purposes. */
* handler as well as using the TSEG region for other purposes.
*/
postcar_frame_add_mtrr(pcf, top_of_ram, 8 * MiB, MTRR_TYPE_WRBACK);
}

View File

@ -35,11 +35,9 @@ static uint64_t uma_memory_size = 0;
int bridge_silicon_revision(void)
{
if (bridge_revision_id < 0) {
uint8_t stepping = cpuid_eax(1) & 0xf;
uint8_t bridge_id = pci_read_config16(
pcidev_on_root(0, 0),
PCI_DEVICE_ID) & 0xf0;
bridge_revision_id = bridge_id | stepping;
uint8_t stepping = cpuid_eax(1) & 0x0f;
uint8_t bridge_id = pci_read_config16(pcidev_on_root(0, 0), PCI_DEVICE_ID);
bridge_revision_id = (bridge_id & 0xf0) | stepping;
}
return bridge_revision_id;
}
@ -66,18 +64,19 @@ static int get_pcie_bar(u32 *base)
pciexbar_reg = pci_read_config32(dev, PCIEXBAR);
/* MMCFG not supported or not enabled */
if (!(pciexbar_reg & (1 << 0)))
return 0;
switch ((pciexbar_reg >> 1) & 3) {
case 0: // 256MB
*base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28));
case 0: /* 256MB */
*base = pciexbar_reg & (0xffffffffULL << 28);
return 256;
case 1: // 128M
*base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28)|(1 << 27));
case 1: /* 128M */
*base = pciexbar_reg & (0xffffffffULL << 27);
return 128;
case 2: // 64M
*base = pciexbar_reg & ((1 << 31)|(1 << 30)|(1 << 29)|(1 << 28)|(1 << 27)|(1 << 26));
case 2: /* 64M */
*base = pciexbar_reg & (0xffffffffULL << 26);
return 64;
}
@ -88,10 +87,9 @@ static void add_fixed_resources(struct device *dev, int index)
{
mmio_resource(dev, index++, uma_memory_base >> 10, uma_memory_size >> 10);
mmio_resource(dev, index++, legacy_hole_base_k,
(0xc0000 >> 10) - legacy_hole_base_k);
reserved_ram_resource(dev, index++, 0xc0000 >> 10,
(0x100000 - 0xc0000) >> 10);
mmio_resource(dev, index++, legacy_hole_base_k, (0xc0000 >> 10) - legacy_hole_base_k);
reserved_ram_resource(dev, index++, 0xc0000 >> 10, (0x100000 - 0xc0000) >> 10);
#if CONFIG(CHROMEOS_RAMOOPS)
reserved_ram_resource(dev, index++,
@ -106,10 +104,10 @@ static void add_fixed_resources(struct device *dev, int index)
}
/* Reserve IOMMU BARs */
const u32 capid0_a = pci_read_config32(dev, 0xe4);
const u32 capid0_a = pci_read_config32(dev, CAPID0_A);
if (!(capid0_a & (1 << 23))) {
mmio_resource(dev, index++, IOMMU_BASE1 >> 10, 4);
mmio_resource(dev, index++, IOMMU_BASE2 >> 10, 4);
mmio_resource(dev, index++, GFXVT_BASE >> 10, 4);
mmio_resource(dev, index++, VTVC0_BASE >> 10, 4);
}
}
@ -157,17 +155,17 @@ static void pci_domain_set_resources(struct device *dev)
tolud = pci_read_config32(mch, TOLUD);
/* Top of Memory - does not account for any UMA */
tom = pci_read_config32(mch, 0xa4);
tom = pci_read_config32(mch, TOM + 4);
tom <<= 32;
tom |= pci_read_config32(mch, 0xa0);
tom |= pci_read_config32(mch, TOM);
printk(BIOS_DEBUG, "TOUUD 0x%llx TOLUD 0x%08x TOM 0x%llx\n",
touud, tolud, tom);
/* ME UMA needs excluding if total memory < 4GB */
me_base = pci_read_config32(mch, 0x74);
me_base = pci_read_config32(mch, MESEG_BASE + 4);
me_base <<= 32;
me_base |= pci_read_config32(mch, 0x70);
me_base |= pci_read_config32(mch, MESEG_BASE);
printk(BIOS_DEBUG, "MEBASE 0x%llx\n", me_base);
@ -206,13 +204,12 @@ static void pci_domain_set_resources(struct device *dev)
}
/* Calculate TSEG size from its base which must be below GTT */
tseg_base = pci_read_config32(mch, 0xb8);
tseg_base = pci_read_config32(mch, TSEGMB);
uma_size = (uma_memory_base - tseg_base) >> 10;
tomk -= uma_size;
uma_memory_base = tomk * 1024ULL;
uma_memory_size += uma_size * 1024ULL;
printk(BIOS_DEBUG, "TSEG base 0x%08x size %uM\n",
tseg_base, uma_size >> 10);
printk(BIOS_DEBUG, "TSEG base 0x%08x size %uM\n", tseg_base, uma_size >> 10);
printk(BIOS_INFO, "Available memory below 4GB: %lluM\n", tomk >> 10);
@ -222,14 +219,13 @@ static void pci_domain_set_resources(struct device *dev)
(tomk - (legacy_hole_base_k + legacy_hole_size_k)));
/*
* If >= 4GB installed then memory from TOLUD to 4GB
* is remapped above TOM, TOUUD will account for both
* If >= 4GB installed, then memory from TOLUD to 4GB is remapped above TOM.
* TOUUD will account for both memory chunks.
*/
touud >>= 10; /* Convert to KB */
if (touud > 4096 * 1024) {
ram_resource(dev, 5, 4096 * 1024, touud - (4096 * 1024));
printk(BIOS_INFO, "Available memory above 4GB: %lluM\n",
(touud >> 10) - 4096);
printk(BIOS_INFO, "Available memory above 4GB: %lluM\n", (touud >> 10) - 4096);
}
add_fixed_resources(dev, 6);
@ -253,8 +249,9 @@ static const char *northbridge_acpi_name(const struct device *dev)
return NULL;
}
/* TODO We could determine how many PCIe busses we need in
* the bar. For now that number is hardcoded to a max of 64.
/*
* TODO We could determine how many PCIe busses we need in the bar.
* For now, that number is hardcoded to a max of 64.
*/
static struct device_operations pci_domain_ops = {
.read_resources = pci_domain_read_resources,
@ -304,6 +301,7 @@ static void northbridge_dmi_init(struct device *dev)
reg32 = DMIBAR32(0x1f8);
reg32 |= (1 << 16);
DMIBAR32(0x1f8) = reg32;
} else if (bridge_silicon_revision() >= SNB_STEP_D1) {
reg32 = DMIBAR32(0x1f8);
reg32 &= ~(1 << 26);
@ -374,10 +372,15 @@ static void disable_peg(void)
dev = pcidev_on_root(0, 0);
pci_write_config32(dev, DEVEN, reg);
if (!(reg & (DEVEN_PEG60 | DEVEN_PEG10 | DEVEN_PEG11 | DEVEN_PEG12))) {
/* Set the PEG clock gating bit.
* Disables the IO clock on all PEG devices. */
MCHBAR32(0x7010) = MCHBAR32(0x7010) | 0x01;
/*
* Set the PEG clock gating bit. Disables the IO clock on all PEG devices.
*
* FIXME: If not clock gating, this register still needs to be written to once,
* to lock it down. Also, never clock gate on Ivy Bridge stepping A0!
*/
MCHBAR32_OR(PEGCTL, 1);
printk(BIOS_DEBUG, "Disabling PEG IO clock.\n");
}
}
@ -394,10 +397,10 @@ static void northbridge_init(struct device *dev)
if ((bridge_silicon_revision() & BASE_REV_MASK) == BASE_REV_IVB) {
/* Enable Power Aware Interrupt Routing */
u8 pair = MCHBAR8(PAIR_CTL);
pair &= ~0xf; /* Clear 3:0 */
pair |= 0x4; /* Fixed Priority */
MCHBAR8(PAIR_CTL) = pair;
u8 pair = MCHBAR8(INTRDIRCTL);
pair &= ~0x0f; /* Clear 3:0 */
pair |= 0x04; /* Fixed Priority */
MCHBAR8(INTRDIRCTL) = pair;
/* 30h for IvyBridge */
bridge_type |= 0x30;
@ -407,9 +410,7 @@ static void northbridge_init(struct device *dev)
}
MCHBAR32(SAPMTIMERS) = bridge_type;
/* Turn off unused devices. Has to be done before
* setting BIOS_RESET_CPL.
*/
/* Turn off unused devices. Has to be done before setting BIOS_RESET_CPL. */
disable_peg();
/*
@ -426,17 +427,17 @@ static void northbridge_init(struct device *dev)
set_power_limits(28);
/*
* CPUs with configurable TDP also need power limits set
* in MCHBAR. Use same values from MSR_PKG_POWER_LIMIT.
* CPUs with configurable TDP also need power limits set in MCHBAR.
* Use the same values from MSR_PKG_POWER_LIMIT.
*/
if (cpu_config_tdp_levels()) {
msr_t msr = rdmsr(MSR_PKG_POWER_LIMIT);
MCHBAR32(MC_TURBO_PL1) = msr.lo;
MCHBAR32(MC_TURBO_PL2) = msr.hi;
MCHBAR32(MCH_PKG_POWER_LIMIT_LO) = msr.lo;
MCHBAR32(MCH_PKG_POWER_LIMIT_HI) = msr.hi;
}
/* Set here before graphics PM init */
MCHBAR32(MMIO_PAVP_CTL) = 0x00100001;
MCHBAR32(PAVP_MSG) = 0x00100001;
}
void northbridge_write_smram(u8 smram)
@ -453,7 +454,7 @@ static struct device_operations mc_ops = {
.set_resources = pci_dev_set_resources,
.enable_resources = pci_dev_enable_resources,
.init = northbridge_init,
.scan_bus = 0,
.scan_bus = NULL,
.ops_pci = &intel_pci_ops,
.acpi_fill_ssdt_generator = generate_cpu_entries,
};

View File

@ -81,9 +81,11 @@ static struct device_operations device_ops = {
#endif
};
static const unsigned short pci_device_ids[] = { 0x0101, 0x0105, 0x0109, 0x010d,
static const unsigned short pci_device_ids[] = {
0x0101, 0x0105, 0x0109, 0x010d,
0x0151, 0x0155, 0x0159, 0x015d,
0 };
0,
};
static const struct pci_driver pch_pcie __pci_driver = {
.ops = &device_ops,

View File

@ -33,10 +33,10 @@
#include <stdint.h>
typedef struct {
uint16_t mode; // 0: Disable, 1: Enable, 2: Auto, 3: Smart Auto
uint16_t hs_port_switch_mask; // 4 bit mask, 1: switchable, 0: not switchable
uint16_t preboot_support; // 0: No xHCI preOS driver, 1: xHCI preOS driver
uint16_t xhci_streams; // 0: Disable, 1: Enable
uint16_t mode; /* 0: Disable, 1: Enable, 2: Auto, 3: Smart Auto */
uint16_t hs_port_switch_mask; /* 4 bit mask, 1: switchable, 0: not switchable */
uint16_t preboot_support; /* 0: No xHCI preOS driver, 1: xHCI preOS driver */
uint16_t xhci_streams; /* 0: Disable, 1: Enable */
} pch_usb3_controller_settings;
typedef void (*tx_byte_func)(unsigned char byte);
@ -57,17 +57,19 @@ struct pei_data
uint32_t pmbase;
uint32_t gpiobase;
uint32_t thermalbase;
uint32_t system_type; // 0 Mobile, 1 Desktop/Server
uint32_t system_type; /* 0 Mobile, 1 Desktop/Server */
uint32_t tseg_size;
uint8_t spd_addresses[4];
uint8_t ts_addresses[4];
int boot_mode;
int ec_present;
int gbe_enable;
// 0 = leave channel enabled
// 1 = disable dimm 0 on channel
// 2 = disable dimm 1 on channel
// 3 = disable dimm 0+1 on channel
/*
* 0 = leave channel enabled
* 1 = disable dimm 0 on channel
* 2 = disable dimm 1 on channel
* 3 = disable dimm 0+1 on channel
*/
int dimm_channel0_disabled;
int dimm_channel1_disabled;
/* Seed values saved in CMOS */
@ -95,41 +97,45 @@ struct pei_data
*
* Port Length
* MOBILE:
* < 0x050 = Setting 1 (back panel, 1-5in, lowest tx amplitude)
* < 0x140 = Setting 2 (back panel, 5-14in, highest tx amplitude)
* < 0x050 = Setting 1 (back panel, 1 to 5 in, lowest tx amplitude)
* < 0x140 = Setting 2 (back panel, 5 to 14 in, highest tx amplitude)
* DESKTOP:
* < 0x080 = Setting 1 (front/back panel, <8in, lowest tx amplitude)
* < 0x130 = Setting 2 (back panel, 8-13in, higher tx amplitude)
* < 0x150 = Setting 3 (back panel, 13-15in, highest tx amplitude)
* < 0x080 = Setting 1 (front/back panel, less than 8 in, lowest tx amplitude)
* < 0x130 = Setting 2 (back panel, 8 to 13 in, higher tx amplitude)
* < 0x150 = Setting 3 (back panel, 13 to 15 in, highest tx amplitude)
*/
uint16_t usb_port_config[16][3];
/* See the usb3 struct above for details */
pch_usb3_controller_settings usb3;
/* SPD data array for onboard RAM.
* spd_data [1..3] are ignored, instead the "dimm_channel{0,1}_disabled"
* flag and the spd_addresses are used to determine which DIMMs should
* use the SPD from spd_data[0].
/*
* SPD data array for onboard RAM. Note that spd_data [1..3] are ignored: instead,
* the "dimm_channel{0,1}_disabled" flag and the spd_addresses are used to determine
* which DIMMs should use the SPD from spd_data[0].
*/
uint8_t spd_data[4][256];
tx_byte_func tx_byte;
int ddr3lv_support;
/* pcie_init needs to be set to 1 to have the system agent initialize
* PCIe. Note: This should only be required if your system has Gen3 devices
* and it will increase your boot time by at least 100ms.
/*
* pcie_init needs to be set to 1 to have the system agent initialize PCIe.
* Note: This should only be required if your system has Gen3 devices and
* it will increase your boot time by at least 100ms.
*/
int pcie_init;
/* N mode functionality. Leave this setting at 0.
* 0 Auto
* 1 1N
* 2 2N
/*
* N mode functionality. Leave this setting at 0.
*
* 0: Auto
* 1: 1N
* 2: 2N
*/
int nmode;
/* DDR refresh rate config. JEDEC Standard No.21-C Annex K allows
* for DIMM SPD data to specify whether double-rate is required for
* extended operating temperature range.
* 0 Enable double rate based upon temperature thresholds
* 1 Normal rate
* 2 Always enable double rate
/*
* DDR refresh rate config. JEDEC Standard No.21-C Annex K allows for DIMM SPD data to
* specify whether double-rate is required for extended operating temperature range.
*
* 0: Enable double rate based upon temperature thresholds
* 1: Normal rate
* 2: Always enable double rate
*/
int ddr_refresh_rate_config;
} __packed;

View File

@ -35,37 +35,40 @@
#define MRC_CACHE_VERSION 1
/* FIXME: no ECC support. */
/* FIXME: no support for 3-channel chipsets. */
/* FIXME: no ECC support */
/* FIXME: no support for 3-channel chipsets */
static const char *ecc_decoder[] = {
"inactive",
"active on IO",
"disabled on IO",
"active"
"active",
};
static void wait_txt_clear(void)
{
struct cpuid_result cp;
struct cpuid_result cp = cpuid_ext(1, 0);
cp = cpuid_ext(0x1, 0x0);
/* Check if TXT is supported? */
if (!(cp.ecx & 0x40))
/* Check if TXT is supported */
if (!(cp.ecx & (1 << 6)))
return;
/* Some TXT public bit. */
/* Some TXT public bit */
if (!(read32((void *)0xfed30010) & 1))
return;
/* Wait for TXT clear. */
while (!(read8((void *)0xfed40000) & (1 << 7)));
/* Wait for TXT clear */
while (!(read8((void *)0xfed40000) & (1 << 7)))
;
}
/*
* Disable a channel in ramctr_timing.
*/
static void disable_channel(ramctr_timing *ctrl, int channel) {
/* Disable a channel in ramctr_timing */
static void disable_channel(ramctr_timing *ctrl, int channel)
{
ctrl->rankmap[channel] = 0;
memset(&ctrl->rank_mirror[channel][0], 0, sizeof(ctrl->rank_mirror[0]));
ctrl->channel_size_mb[channel] = 0;
ctrl->cmd_stretch[channel] = 0;
ctrl->mad_dimm[channel] = 0;
@ -73,9 +76,7 @@ static void disable_channel(ramctr_timing *ctrl, int channel) {
memset(&ctrl->info.dimm[channel][0], 0, sizeof(ctrl->info.dimm[0]));
}
/*
* Fill cbmem with information for SMBIOS type 17.
*/
/* Fill cbmem with information for SMBIOS type 17 */
static void fill_smbios17(ramctr_timing *ctrl)
{
int channel, slot;
@ -89,38 +90,35 @@ static void fill_smbios17(ramctr_timing *ctrl)
}
}
/*
* Dump in the log memory controller configuration as read from the memory
* controller registers.
*/
#define ON_OFF(val) (((val) & 1) ? "on" : "off")
/* Print the memory controller configuration as read from the memory controller registers. */
static void report_memory_config(void)
{
u32 addr_decoder_common, addr_decode_ch[NUM_CHANNELS];
int i, refclk;
int i;
addr_decoder_common = MCHBAR32(MAD_CHNL);
addr_decode_ch[0] = MCHBAR32(MAD_DIMM_CH0);
addr_decode_ch[1] = MCHBAR32(MAD_DIMM_CH1);
refclk = MCHBAR32(MC_BIOS_REQ) & 0x100 ? 100 : 133;
const int refclk = MCHBAR32(MC_BIOS_REQ) & 0x100 ? 100 : 133;
printk(BIOS_DEBUG, "memcfg DDR3 ref clock %d MHz\n", refclk);
printk(BIOS_DEBUG, "memcfg DDR3 clock %d MHz\n",
(MCHBAR32(MC_BIOS_DATA) * refclk * 100 * 2 + 50) / 100);
printk(BIOS_DEBUG, "memcfg channel assignment: A: %d, B % d, C % d\n",
addr_decoder_common & 3, (addr_decoder_common >> 2) & 3,
(addr_decoder_common >> 0) & 3,
(addr_decoder_common >> 2) & 3,
(addr_decoder_common >> 4) & 3);
for (i = 0; i < ARRAY_SIZE(addr_decode_ch); i++) {
u32 ch_conf = addr_decode_ch[i];
printk(BIOS_DEBUG, "memcfg channel[%d] config (%8.8x):\n", i,
ch_conf);
printk(BIOS_DEBUG, " ECC %s\n",
ecc_decoder[(ch_conf >> 24) & 3]);
printk(BIOS_DEBUG, " enhanced interleave mode %s\n",
((ch_conf >> 22) & 1) ? "on" : "off");
printk(BIOS_DEBUG, " rank interleave %s\n",
((ch_conf >> 21) & 1) ? "on" : "off");
printk(BIOS_DEBUG, "memcfg channel[%d] config (%8.8x):\n", i, ch_conf);
printk(BIOS_DEBUG, " ECC %s\n", ecc_decoder[(ch_conf >> 24) & 3]);
printk(BIOS_DEBUG, " enhanced interleave mode %s\n", ON_OFF(ch_conf >> 22));
printk(BIOS_DEBUG, " rank interleave %s\n", ON_OFF(ch_conf >> 21));
printk(BIOS_DEBUG, " DIMMA %d MB width x%d %s rank%s\n",
((ch_conf >> 0) & 0xff) * 256,
((ch_conf >> 19) & 1) ? 16 : 8,
@ -133,10 +131,9 @@ static void report_memory_config(void)
((ch_conf >> 16) & 1) ? ", selected" : "");
}
}
#undef ON_OFF
/*
* Return CRC16 match for all SPDs.
*/
/* Return CRC16 match for all SPDs */
static int verify_crc16_spds_ddr3(spd_raw_data *spd, ramctr_timing *ctrl)
{
int channel, slot, spd_slot;
@ -166,7 +163,7 @@ void read_spd(spd_raw_data * spd, u8 addr, bool id_only)
static void dram_find_spds_ddr3(spd_raw_data *spd, ramctr_timing *ctrl)
{
int dimms = 0, dimms_on_channel;
int dimms = 0, ch_dimms;
int channel, slot, spd_slot;
dimm_info *dimm = &ctrl->info;
@ -178,52 +175,54 @@ static void dram_find_spds_ddr3(spd_raw_data *spd, ramctr_timing *ctrl)
FOR_ALL_CHANNELS {
ctrl->channel_size_mb[channel] = 0;
dimms_on_channel = 0;
/* count dimms on channel */
ch_dimms = 0;
/* Count dimms on channel */
for (slot = 0; slot < NUM_SLOTS; slot++) {
spd_slot = 2 * channel + slot;
printk(BIOS_DEBUG,
"SPD probe channel%d, slot%d\n", channel, slot);
printk(BIOS_DEBUG, "SPD probe channel%d, slot%d\n", channel, slot);
spd_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot]);
if (dimm->dimm[channel][slot].dram_type == SPD_MEMORY_TYPE_SDRAM_DDR3)
dimms_on_channel++;
ch_dimms++;
}
for (slot = 0; slot < NUM_SLOTS; slot++) {
spd_slot = 2 * channel + slot;
printk(BIOS_DEBUG,
"SPD probe channel%d, slot%d\n", channel, slot);
printk(BIOS_DEBUG, "SPD probe channel%d, slot%d\n", channel, slot);
/* search for XMP profile */
spd_xmp_decode_ddr3(&dimm->dimm[channel][slot],
spd[spd_slot],
/* Search for XMP profile */
spd_xmp_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot],
DDR3_XMP_PROFILE_1);
if (dimm->dimm[channel][slot].dram_type != SPD_MEMORY_TYPE_SDRAM_DDR3) {
printram("No valid XMP profile found.\n");
spd_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot]);
} else if (dimms_on_channel > dimm->dimm[channel][slot].dimms_per_channel) {
printram("XMP profile supports %u DIMMs, but %u DIMMs are installed.\n",
dimm->dimm[channel][slot].dimms_per_channel,
dimms_on_channel);
} else if (ch_dimms > dimm->dimm[channel][slot].dimms_per_channel) {
printram(
"XMP profile supports %u DIMMs, but %u DIMMs are installed.\n",
dimm->dimm[channel][slot].dimms_per_channel, ch_dimms);
if (CONFIG(NATIVE_RAMINIT_IGNORE_XMP_MAX_DIMMS))
printk(BIOS_WARNING, "XMP maximum DIMMs will be ignored.\n");
printk(BIOS_WARNING,
"XMP maximum DIMMs will be ignored.\n");
else
spd_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot]);
spd_decode_ddr3(&dimm->dimm[channel][slot],
spd[spd_slot]);
} else if (dimm->dimm[channel][slot].voltage != 1500) {
/* TODO: support other DDR3 voltage than 1500mV */
/* TODO: Support DDR3 voltages other than 1500mV */
printram("XMP profile's requested %u mV is unsupported.\n",
dimm->dimm[channel][slot].voltage);
spd_decode_ddr3(&dimm->dimm[channel][slot], spd[spd_slot]);
}
/* fill in CRC16 for MRC cache */
/* Fill in CRC16 for MRC cache */
ctrl->spd_crc[channel][slot] =
spd_ddr3_calc_unique_crc(spd[spd_slot], sizeof(spd_raw_data));
if (dimm->dimm[channel][slot].dram_type != SPD_MEMORY_TYPE_SDRAM_DDR3) {
// set dimm invalid
/* Mark DIMM as invalid */
dimm->dimm[channel][slot].ranks = 0;
dimm->dimm[channel][slot].size_mb = 0;
continue;
@ -232,31 +231,41 @@ static void dram_find_spds_ddr3(spd_raw_data *spd, ramctr_timing *ctrl)
dram_print_spd_ddr3(&dimm->dimm[channel][slot]);
dimms++;
ctrl->rank_mirror[channel][slot * 2] = 0;
ctrl->rank_mirror[channel][slot * 2 + 1] = dimm->dimm[channel][slot].flags.pins_mirrored;
ctrl->rank_mirror[channel][slot * 2 + 1] =
dimm->dimm[channel][slot].flags.pins_mirrored;
ctrl->channel_size_mb[channel] += dimm->dimm[channel][slot].size_mb;
ctrl->auto_self_refresh &= dimm->dimm[channel][slot].flags.asr;
ctrl->extended_temperature_range &= dimm->dimm[channel][slot].flags.ext_temp_refresh;
ctrl->rankmap[channel] |= ((1 << dimm->dimm[channel][slot].ranks) - 1) << (2 * slot);
printk(BIOS_DEBUG, "channel[%d] rankmap = 0x%x\n",
channel, ctrl->rankmap[channel]);
ctrl->extended_temperature_range &=
dimm->dimm[channel][slot].flags.ext_temp_refresh;
ctrl->rankmap[channel] |=
((1 << dimm->dimm[channel][slot].ranks) - 1) << (2 * slot);
printk(BIOS_DEBUG, "channel[%d] rankmap = 0x%x\n", channel,
ctrl->rankmap[channel]);
}
if ((ctrl->rankmap[channel] & 3) && (ctrl->rankmap[channel] & 0xc)
&& dimm->dimm[channel][0].reference_card <= 5 && dimm->dimm[channel][1].reference_card <= 5) {
if ((ctrl->rankmap[channel] & 0x03) && (ctrl->rankmap[channel] & 0x0c)
&& dimm->dimm[channel][0].reference_card <= 5
&& dimm->dimm[channel][1].reference_card <= 5) {
const int ref_card_offset_table[6][6] = {
{ 0, 0, 0, 0, 2, 2, },
{ 0, 0, 0, 0, 2, 2, },
{ 0, 0, 0, 0, 2, 2, },
{ 0, 0, 0, 0, 1, 1, },
{ 2, 2, 2, 1, 0, 0, },
{ 2, 2, 2, 1, 0, 0, },
{ 0, 0, 0, 0, 2, 2 },
{ 0, 0, 0, 0, 2, 2 },
{ 0, 0, 0, 0, 2, 2 },
{ 0, 0, 0, 0, 1, 1 },
{ 2, 2, 2, 1, 0, 0 },
{ 2, 2, 2, 1, 0, 0 },
};
ctrl->ref_card_offset[channel] = ref_card_offset_table[dimm->dimm[channel][0].reference_card]
ctrl->ref_card_offset[channel] = ref_card_offset_table
[dimm->dimm[channel][0].reference_card]
[dimm->dimm[channel][1].reference_card];
} else
} else {
ctrl->ref_card_offset[channel] = 0;
}
}
if (!dimms)
die("No DIMMs were found");
@ -265,29 +274,24 @@ static void dram_find_spds_ddr3(spd_raw_data *spd, ramctr_timing *ctrl)
static void save_timings(ramctr_timing *ctrl)
{
/* Save the MRC S3 restore data to cbmem */
mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION, ctrl,
sizeof(*ctrl));
mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION, ctrl, sizeof(*ctrl));
}
static int try_init_dram_ddr3(ramctr_timing *ctrl, int fast_boot,
int s3_resume, int me_uma_size)
static int try_init_dram_ddr3(ramctr_timing *ctrl, int fast_boot, int s3resume, int me_uma_size)
{
if (ctrl->sandybridge)
return try_init_dram_ddr3_sandy(ctrl, fast_boot, s3_resume, me_uma_size);
return try_init_dram_ddr3_snb(ctrl, fast_boot, s3resume, me_uma_size);
else
return try_init_dram_ddr3_ivy(ctrl, fast_boot, s3_resume, me_uma_size);
return try_init_dram_ddr3_ivb(ctrl, fast_boot, s3resume, me_uma_size);
}
static void init_dram_ddr3(int min_tck, int s3resume)
{
int me_uma_size;
int cbmem_was_inited;
int me_uma_size, cbmem_was_inited, fast_boot, err;
ramctr_timing ctrl;
int fast_boot;
spd_raw_data spds[4];
struct region_device rdev;
ramctr_timing *ctrl_cached;
int err;
u32 cpu;
MCHBAR32(SAPMCTL) |= 1;
@ -298,17 +302,14 @@ static void init_dram_ddr3(int min_tck, int s3resume)
printk(BIOS_DEBUG, "Starting native Platform init\n");
u32 reg_5d10;
wait_txt_clear();
wrmsr(0x000002e6, (msr_t) { .lo = 0, .hi = 0 });
reg_5d10 = MCHBAR32(0x5d10); // !!! = 0x00000000
if ((pci_read_config16(SOUTHBRIDGE, 0xa2) & 0xa0) == 0x20 /* 0x0004 */
&& reg_5d10 && !s3resume) {
MCHBAR32(0x5d10) = 0;
/* Need reset. */
const u32 sskpd = MCHBAR32(SSKPD); // !!! = 0x00000000
if ((pci_read_config16(SOUTHBRIDGE, 0xa2) & 0xa0) == 0x20 && sskpd && !s3resume) {
MCHBAR32(SSKPD) = 0;
/* Need reset */
system_reset();
}
@ -316,10 +317,9 @@ static void init_dram_ddr3(int min_tck, int s3resume)
early_init_dmi();
early_thermal_init();
/* try to find timings in MRC cache */
int cache_not_found = mrc_cache_get_current(MRC_TRAINING_DATA,
MRC_CACHE_VERSION, &rdev);
if (cache_not_found || (region_device_sz(&rdev) < sizeof(ctrl))) {
/* Try to find timings in MRC cache */
err = mrc_cache_get_current(MRC_TRAINING_DATA, MRC_CACHE_VERSION, &rdev);
if (err || (region_device_sz(&rdev) < sizeof(ctrl))) {
if (s3resume) {
/* Failed S3 resume, reset to come up cleanly */
system_reset();
@ -329,7 +329,7 @@ static void init_dram_ddr3(int min_tck, int s3resume)
ctrl_cached = rdev_mmap_full(&rdev);
}
/* verify MRC cache for fast boot */
/* Verify MRC cache for fast boot */
if (!s3resume && ctrl_cached) {
/* Load SPD unique information data. */
memset(spds, 0, sizeof(spds));
@ -353,8 +353,8 @@ static void init_dram_ddr3(int min_tck, int s3resume)
/* Failed S3 resume, reset to come up cleanly */
system_reset();
}
/* no need to erase bad mrc cache here, it gets overwritten on
* successful boot. */
/* No need to erase bad MRC cache here, it gets overwritten on a
successful boot */
printk(BIOS_ERR, "Stored timings are invalid !\n");
fast_boot = 0;
}
@ -377,7 +377,7 @@ static void init_dram_ddr3(int min_tck, int s3resume)
}
if (err) {
/* fallback: disable failing channel */
/* Fallback: disable failing channel */
printk(BIOS_ERR, "RAM training failed, trying fallback.\n");
printram("Disable failing channel.\n");
@ -392,7 +392,7 @@ static void init_dram_ddr3(int min_tck, int s3resume)
/* Reset DDR3 frequency */
dram_find_spds_ddr3(spds, &ctrl);
/* disable failing channel */
/* Disable failing channel */
disable_channel(&ctrl, GET_ERR_CHANNEL(err));
err = try_init_dram_ddr3(&ctrl, fast_boot, s3resume, me_uma_size);

File diff suppressed because it is too large Load Diff

View File

@ -51,24 +51,24 @@ typedef struct dimm_info_st {
} dimm_info;
struct ram_rank_timings {
/* ROUNDT_LAT register. One byte per slotrank. */
/* ROUNDT_LAT register: One byte per slotrank */
u8 roundtrip_latency;
/* IO_LATENCY register. One nibble per slotrank. */
/* IO_LATENCY register: One nibble per slotrank */
u8 io_latency;
/* Phase interpolator coding for command and control. */
/* Phase interpolator coding for command and control */
int pi_coding;
struct ram_lane_timings {
/* lane register offset 0x10. */
/* Lane register offset 0x10 */
u16 timA; /* bits 0 - 5, bits 16 - 18 */
u8 rising; /* bits 8 - 14 */
u8 falling; /* bits 20 - 26. */
u8 falling; /* bits 20 - 26 */
/* lane register offset 0x20. */
int timC; /* bit 0 - 5, 19. */
u16 timB; /* bits 8 - 13, 15 - 17. */
/* Lane register offset 0x20 */
int timC; /* bits 0 - 5, 19 */
u16 timB; /* bits 8 - 13, 15 - 17 */
} lanes[NUM_LANES];
};
@ -82,7 +82,7 @@ typedef struct ramctr_timing_st {
u8 base_freq;
u16 cas_supported;
/* tLatencies are in units of ns, scaled by x256 */
/* Latencies are in units of ns, scaled by x256 */
u32 tCK;
u32 tAA;
u32 tWR;
@ -97,7 +97,7 @@ typedef struct ramctr_timing_st {
u32 tCWL;
u32 tCMD;
/* Latencies in terms of clock cycles
* They are saved separately as they are needed for DRAM MRS commands */
They are saved separately as they are needed for DRAM MRS commands */
u8 CAS; /* CAS read latency */
u8 CWL; /* CAS write latency */
@ -110,7 +110,7 @@ typedef struct ramctr_timing_st {
u32 tXP;
u32 tAONPD;
/* Bits [0..11] of PM_DLL_CONFIG: Master DLL wakeup delay timer. */
/* Bits [0..11] of PM_DLL_CONFIG: Master DLL wakeup delay timer */
u16 mdll_wake_delay;
u8 rankmap[NUM_CHANNELS];
@ -135,7 +135,6 @@ typedef struct ramctr_timing_st {
dimm_info info;
} ramctr_timing;
#define HOST_BRIDGE PCI_DEV(0, 0, 0)
#define SOUTHBRIDGE PCI_DEV(0, 0x1f, 0)
#define FOR_ALL_LANES for (lane = 0; lane < NUM_LANES; lane++)
@ -174,17 +173,14 @@ void normalize_training(ramctr_timing *ctrl);
void write_controller_mr(ramctr_timing *ctrl);
int channel_test(ramctr_timing *ctrl);
void set_scrambling_seed(ramctr_timing *ctrl);
void set_4f8c(void);
void set_wmm_behavior(void);
void prepare_training(ramctr_timing *ctrl);
void set_4008c(ramctr_timing *ctrl);
void set_read_write_timings(ramctr_timing *ctrl);
void set_normal_operation(ramctr_timing *ctrl);
void final_registers(ramctr_timing *ctrl);
void restore_timings(ramctr_timing *ctrl);
int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
int s3_resume, int me_uma_size);
int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
int s3_resume, int me_uma_size);
int try_init_dram_ddr3_snb(ramctr_timing *ctrl, int fast_boot, int s3_resume, int me_uma_size);
int try_init_dram_ddr3_ivb(ramctr_timing *ctrl, int fast_boot, int s3_resume, int me_uma_size);
#endif

View File

@ -19,12 +19,10 @@
#include "raminit_native.h"
#include "raminit_common.h"
/* Frequency multiplier. */
/* Frequency multiplier */
static u32 get_FRQ(u32 tCK, u8 base_freq)
{
u32 FRQ;
FRQ = 256000 / (tCK * base_freq);
const u32 FRQ = 256000 / (tCK * base_freq);
if (base_freq == 100) {
if (FRQ > 12)
@ -41,249 +39,181 @@ static u32 get_FRQ(u32 tCK, u8 base_freq)
return FRQ;
}
/* Get REFI based on MC frequency, tREFI = 7.8usec */
static u32 get_REFI(u32 tCK, u8 base_freq)
{
u32 refi;
if (base_freq == 100) {
/* Get REFI based on MCU frequency using the following rule:
* tREFI = 7.8usec
* _________________________________________
* FRQ : | 7 | 8 | 9 | 10 | 11 | 12 |
* REFI : | 5460 | 6240 | 7020 | 7800 | 8580 | 9360 |
*/
static const u32 frq_xs_map[] =
{ 5460, 6240, 7020, 7800, 8580, 9360 };
refi = frq_xs_map[get_FRQ(tCK, 100) - 7];
static const u32 frq_xs_map[] = {
/* FRQ: 7, 8, 9, 10, 11, 12, */
5460, 6240, 7020, 7800, 8580, 9360,
};
return frq_xs_map[get_FRQ(tCK, 100) - 7];
} else {
/* Get REFI based on MCU frequency using the following rule:
* tREFI = 7.8usec
* ________________________________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
* REFI: | 3120 | 4160 | 5200 | 6240 | 7280 | 8320 | 9360 | 10400 |
*/
static const u32 frq_refi_map[] =
{ 3120, 4160, 5200, 6240, 7280, 8320, 9360, 10400 };
refi = frq_refi_map[get_FRQ(tCK, 133) - 3];
}
return refi;
static const u32 frq_refi_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, 9, 10, */
3120, 4160, 5200, 6240, 7280, 8320, 9360, 10400,
};
return frq_refi_map[get_FRQ(tCK, 133) - 3];
}
}
/* Get XSOffset based on MC frequency, tXS-Offset: tXS = tRFC + 10ns */
static u8 get_XSOffset(u32 tCK, u8 base_freq)
{
u8 xsoffset;
if (base_freq == 100) {
/* Get XSOffset based on MCU frequency using the following rule:
* tXS-offset: tXS = tRFC+10ns.
* _____________________________
* FRQ : | 7 | 8 | 9 | 10 | 11 | 12 |
* XSOffset : | 7 | 8 | 9 | 10 | 11 | 12 |
*/
static const u8 frq_xs_map[] = { 7, 8, 9, 10, 11, 12 };
xsoffset = frq_xs_map[get_FRQ(tCK, 100) - 7];
static const u8 frq_xs_map[] = {
/* FRQ: 7, 8, 9, 10, 11, 12, */
7, 8, 9, 10, 11, 12,
};
return frq_xs_map[get_FRQ(tCK, 100) - 7];
} else {
/* Get XSOffset based on MCU frequency using the following rule:
* ___________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
* XSOffset : | 4 | 6 | 7 | 8 | 10 | 11 | 12 | 14 |
*/
static const u8 frq_xs_map[] = { 4, 6, 7, 8, 10, 11, 12, 14 };
xsoffset = frq_xs_map[get_FRQ(tCK, 133) - 3];
}
return xsoffset;
static const u8 frq_xs_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, 9, 10, */
4, 6, 7, 8, 10, 11, 12, 14,
};
return frq_xs_map[get_FRQ(tCK, 133) - 3];
}
}
/* Get MOD based on MC frequency */
static u8 get_MOD(u32 tCK, u8 base_freq)
{
u8 mod;
if (base_freq == 100) {
/* Get MOD based on MCU frequency using the following rule:
* _____________________________
* FRQ : | 7 | 8 | 9 | 10 | 11 | 12 |
* MOD : | 12 | 12 | 14 | 15 | 17 | 18 |
*/
static const u8 frq_mod_map[] = {
/* FRQ: 7, 8, 9, 10, 11, 12, */
12, 12, 14, 15, 17, 18,
};
return frq_mod_map[get_FRQ(tCK, 100) - 7];
static const u8 frq_mod_map[] = { 12, 12, 14, 15, 17, 18 };
mod = frq_mod_map[get_FRQ(tCK, 100) - 7];
} else {
/* Get MOD based on MCU frequency using the following rule:
* _______________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
* MOD : | 12 | 12 | 12 | 12 | 15 | 16 | 18 | 20 |
*/
static const u8 frq_mod_map[] = { 12, 12, 12, 12, 15, 16, 18, 20 };
mod = frq_mod_map[get_FRQ(tCK, 133) - 3];
static const u8 frq_mod_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, 9, 10, */
12, 12, 12, 12, 15, 16, 18, 20,
};
return frq_mod_map[get_FRQ(tCK, 133) - 3];
}
return mod;
}
/* Get Write Leveling Output delay based on MC frequency */
static u8 get_WLO(u32 tCK, u8 base_freq)
{
u8 wlo;
if (base_freq == 100) {
/* Get WLO based on MCU frequency using the following rule:
* Write leveling output delay
* _____________________________
* FRQ : | 7 | 8 | 9 | 10 | 11 | 12 |
* MOD : | 6 | 6 | 7 | 8 | 9 | 9 |
*/
static const u8 frq_wlo_map[] = {
/* FRQ: 7, 8, 9, 10, 11, 12, */
6, 6, 7, 8, 9, 9,
};
return frq_wlo_map[get_FRQ(tCK, 100) - 7];
static const u8 frq_wlo_map[] = { 6, 6, 7, 8, 9, 9 };
wlo = frq_wlo_map[get_FRQ(tCK, 100) - 7];
} else {
/* Get WLO based on MCU frequency using the following rule:
* Write leveling output delay
* ________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
* WLO : | 4 | 5 | 6 | 6 | 8 | 8 | 9 | 10 |
*/
static const u8 frq_wlo_map[] = { 4, 5, 6, 6, 8, 8, 9, 10 };
wlo = frq_wlo_map[get_FRQ(tCK, 133) - 3];
}
return wlo;
static const u8 frq_wlo_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, 9, 10, */
4, 5, 6, 6, 8, 8, 9, 10,
};
return frq_wlo_map[get_FRQ(tCK, 133) - 3];
}
}
/* Get CKE based on MC frequency */
static u8 get_CKE(u32 tCK, u8 base_freq)
{
u8 cke;
if (base_freq == 100) {
/* Get CKE based on MCU frequency using the following rule:
* _____________________________
* FRQ : | 7 | 8 | 9 | 10 | 11 | 12 |
* MOD : | 4 | 4 | 5 | 5 | 6 | 6 |
*/
static const u8 frq_cke_map[] = {
/* FRQ: 7, 8, 9, 10, 11, 12, */
4, 4, 5, 5, 6, 6,
};
return frq_cke_map[get_FRQ(tCK, 100) - 7];
static const u8 frq_cke_map[] = { 4, 4, 5, 5, 6, 6 };
cke = frq_cke_map[get_FRQ(tCK, 100) - 7];
} else {
/* Get CKE based on MCU frequency using the following rule:
* ________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
* WLO : | 3 | 3 | 4 | 4 | 5 | 6 | 6 | 7 |
*/
static const u8 frq_cke_map[] = { 3, 3, 4, 4, 5, 6, 6, 7 };
cke = frq_cke_map[get_FRQ(tCK, 133) - 3];
}
return cke;
static const u8 frq_cke_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, 9, 10, */
3, 3, 4, 4, 5, 6, 6, 7,
};
return frq_cke_map[get_FRQ(tCK, 133) - 3];
}
}
/* Get XPDLL based on MC frequency */
static u8 get_XPDLL(u32 tCK, u8 base_freq)
{
u8 xpdll;
if (base_freq == 100) {
/* Get XPDLL based on MCU frequency using the following rule:
* _____________________________
* FRQ : | 7 | 8 | 9 | 10 | 11 | 12 |
* XPDLL : | 17 | 20 | 22 | 24 | 27 | 32 |
*/
static const u8 frq_xpdll_map[] = {
/* FRQ: 7, 8, 9, 10, 11, 12, */
17, 20, 22, 24, 27, 32,
};
return frq_xpdll_map[get_FRQ(tCK, 100) - 7];
static const u8 frq_xpdll_map[] = { 17, 20, 22, 24, 27, 32 };
xpdll = frq_xpdll_map[get_FRQ(tCK, 100) - 7];
} else {
/* Get XPDLL based on MCU frequency using the following rule:
* _______________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
* XPDLL : | 10 | 13 | 16 | 20 | 23 | 26 | 29 | 32 |
*/
static const u8 frq_xpdll_map[] = { 10, 13, 16, 20, 23, 26, 29, 32 };
xpdll = frq_xpdll_map[get_FRQ(tCK, 133) - 3];
}
return xpdll;
static const u8 frq_xpdll_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, 9, 10, */
10, 13, 16, 20, 23, 26, 29, 32,
};
return frq_xpdll_map[get_FRQ(tCK, 133) - 3];
}
}
/* Get XP based on MC frequency */
static u8 get_XP(u32 tCK, u8 base_freq)
{
u8 xp;
if (base_freq == 100) {
/* Get XP based on MCU frequency using the following rule:
* _____________________________
* FRQ : | 7 | 8 | 9 | 10 | 11 | 12 |
* XP : | 5 | 5 | 6 | 6 | 7 | 8 |
*/
static const u8 frq_xp_map[] = { 5, 5, 6, 6, 7, 8 };
xp = frq_xp_map[get_FRQ(tCK, 100) - 7];
static const u8 frq_xp_map[] = {
/* FRQ: 7, 8, 9, 10, 11, 12, */
5, 5, 6, 6, 7, 8,
};
return frq_xp_map[get_FRQ(tCK, 100) - 7];
} else {
/* Get XP based on MCU frequency using the following rule:
* _______________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
* XP : | 3 | 4 | 4 | 5 | 6 | 7 | 8 | 8 |
*/
static const u8 frq_xp_map[] = { 3, 4, 4, 5, 6, 7, 8, 8 };
xp = frq_xp_map[get_FRQ(tCK, 133) - 3];
}
return xp;
static const u8 frq_xp_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, 9, 10, */
3, 4, 4, 5, 6, 7, 8, 8
};
return frq_xp_map[get_FRQ(tCK, 133) - 3];
}
}
/* Get AONPD based on MC frequency */
static u8 get_AONPD(u32 tCK, u8 base_freq)
{
u8 aonpd;
if (base_freq == 100) {
/* Get AONPD based on MCU frequency using the following rule:
* _____________________________
* FRQ : | 7 | 8 | 9 | 10 | 11 | 12 |
* AONPD : | 6 | 8 | 8 | 9 | 10 | 11 |
*/
static const u8 frq_aonpd_map[] = {
/* FRQ: 7, 8, 9, 10, 11, 12, */
6, 8, 8, 9, 10, 11,
};
return frq_aonpd_map[get_FRQ(tCK, 100) - 7];
static const u8 frq_aonpd_map[] = { 6, 8, 8, 9, 10, 11 };
aonpd = frq_aonpd_map[get_FRQ(tCK, 100) - 7];
} else {
/* Get AONPD based on MCU frequency using the following rule:
* _______________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
* AONPD : | 4 | 5 | 6 | 8 | 8 | 10 | 11 | 12 |
*/
static const u8 frq_aonpd_map[] = { 4, 5, 6, 8, 8, 10, 11, 12 };
aonpd = frq_aonpd_map[get_FRQ(tCK, 133) - 3];
}
return aonpd;
static const u8 frq_aonpd_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, 9, 10, */
4, 5, 6, 8, 8, 10, 11, 12,
};
return frq_aonpd_map[get_FRQ(tCK, 133) - 3];
}
}
/* Get COMP2 based on MC frequency */
static u32 get_COMP2(u32 tCK, u8 base_freq)
{
u32 comp2;
if (base_freq == 100) {
/* Get COMP2 based on MCU frequency using the following rule:
* ______________________________________________________________
* FRQ : | 7 | 8 | 9 | 10 | 11 | 12 |
* COMP : | CA8C264 | C6671E4 | C6671E4 | C446964 | C235924 | C235924 |
*/
static const u32 frq_comp2_map[] = { 0xCA8C264, 0xC6671E4, 0xC6671E4, 0xC446964, 0xC235924, 0xC235924 };
comp2 = frq_comp2_map[get_FRQ(tCK, 100) - 7];
} else {
/* Get COMP2 based on MCU frequency using the following rule:
* ________________________________________________________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 |
* COMP : | D6FF5E4 | CEBDB64 | CA8C264 | C6671E4 | C446964 | C235924 | C235924 | C235924 |
*/
static const u32 frq_comp2_map[] = { 0xD6FF5E4, 0xCEBDB64, 0xCA8C264,
0xC6671E4, 0xC446964, 0xC235924, 0xC235924, 0xC235924
static const u32 frq_comp2_map[] = {
// FRQ: 7, 8, 9, 10, 11, 12,
0x0CA8C264, 0x0C6671E4, 0x0C6671E4, 0x0C446964, 0x0C235924, 0x0C235924,
};
comp2 = frq_comp2_map[get_FRQ(tCK, 133) - 3];
return frq_comp2_map[get_FRQ(tCK, 100) - 7];
} else {
static const u32 frq_comp2_map[] = {
/* FRQ: 3, 4, 5, 6, */
0x0D6FF5E4, 0x0CEBDB64, 0x0CA8C264, 0x0C6671E4,
/* FRQ: 7, 8, 9, 10, */
0x0C446964, 0x0C235924, 0x0C235924, 0x0C235924,
};
return frq_comp2_map[get_FRQ(tCK, 133) - 3];
}
}
return comp2;
}
static void ivb_normalize_tclk(ramctr_timing *ctrl,
bool ref_100mhz_support)
static void ivb_normalize_tclk(ramctr_timing *ctrl, bool ref_100mhz_support)
{
if (ctrl->tCK <= TCK_1200MHZ) {
ctrl->tCK = TCK_1200MHZ;
@ -324,7 +254,7 @@ static void ivb_normalize_tclk(ramctr_timing *ctrl,
}
if (!ref_100mhz_support && ctrl->base_freq == 100) {
/* Skip unsupported frequency. */
/* Skip unsupported frequency */
ctrl->tCK++;
ivb_normalize_tclk(ctrl, ref_100mhz_support);
}
@ -333,29 +263,31 @@ static void ivb_normalize_tclk(ramctr_timing *ctrl,
static void find_cas_tck(ramctr_timing *ctrl)
{
u8 val;
u32 val32;
u32 reg32;
u8 ref_100mhz_support;
/* 100 Mhz reference clock supported */
reg32 = pci_read_config32(PCI_DEV(0, 0, 0), CAPID0_B);
/* 100 MHz reference clock supported */
reg32 = pci_read_config32(HOST_BRIDGE, CAPID0_B);
ref_100mhz_support = !!((reg32 >> 21) & 0x7);
printk(BIOS_DEBUG, "100MHz reference clock support: %s\n",
ref_100mhz_support ? "yes" : "no");
printk(BIOS_DEBUG, "100MHz reference clock support: %s\n", ref_100mhz_support ? "yes"
: "no");
/* Find CAS latency */
while (1) {
/* Normalising tCK before computing clock could potentially
* results in lower selected CAS, which is desired.
/*
* Normalising tCK before computing clock could potentially
* result in a lower selected CAS, which is desired.
*/
ivb_normalize_tclk(ctrl, ref_100mhz_support);
if (!(ctrl->tCK))
die("Couldn't find compatible clock / CAS settings\n");
val = DIV_ROUND_UP(ctrl->tAA, ctrl->tCK);
printk(BIOS_DEBUG, "Trying CAS %u, tCK %u.\n", val, ctrl->tCK);
for (; val <= MAX_CAS; val++)
if ((ctrl->cas_supported >> (val - MIN_CAS)) & 1)
break;
if (val == (MAX_CAS + 1)) {
ctrl->tCK++;
continue;
@ -365,9 +297,7 @@ static void find_cas_tck(ramctr_timing *ctrl)
}
}
val32 = NS2MHZ_DIV256 / ctrl->tCK;
printk(BIOS_DEBUG, "Selected DRAM frequency: %u MHz\n", val32);
printk(BIOS_DEBUG, "Selected DRAM frequency: %u MHz\n", NS2MHZ_DIV256 / ctrl->tCK);
printk(BIOS_DEBUG, "Selected CAS latency : %uT\n", val);
ctrl->CAS = val;
}
@ -375,9 +305,10 @@ static void find_cas_tck(ramctr_timing *ctrl)
static void dram_timing(ramctr_timing *ctrl)
{
/* Maximum supported DDR3 frequency is 1400MHz (DDR3 2800).
* We cap it at 1200Mhz (DDR3 2400).
* Then, align it to the closest JEDEC standard frequency */
/*
* On Ivy Bridge, the maximum supported DDR3 frequency is 1400MHz (DDR3 2800).
* Cap it at 1200MHz (DDR3 2400), and align it to the closest JEDEC standard frequency.
*/
if (ctrl->tCK == TCK_1200MHZ) {
ctrl->edge_offset[0] = 18; //XXX: guessed
ctrl->edge_offset[1] = 8;
@ -386,6 +317,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 8;
ctrl->timC_offset[2] = 8;
ctrl->pi_coding_threshold = 10;
} else if (ctrl->tCK == TCK_1100MHZ) {
ctrl->edge_offset[0] = 17; //XXX: guessed
ctrl->edge_offset[1] = 7;
@ -394,6 +326,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 7;
ctrl->timC_offset[2] = 7;
ctrl->pi_coding_threshold = 13;
} else if (ctrl->tCK == TCK_1066MHZ) {
ctrl->edge_offset[0] = 16;
ctrl->edge_offset[1] = 7;
@ -402,6 +335,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 7;
ctrl->timC_offset[2] = 7;
ctrl->pi_coding_threshold = 13;
} else if (ctrl->tCK == TCK_1000MHZ) {
ctrl->edge_offset[0] = 15; //XXX: guessed
ctrl->edge_offset[1] = 6;
@ -410,6 +344,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 6;
ctrl->timC_offset[2] = 6;
ctrl->pi_coding_threshold = 13;
} else if (ctrl->tCK == TCK_933MHZ) {
ctrl->edge_offset[0] = 14;
ctrl->edge_offset[1] = 6;
@ -418,6 +353,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 6;
ctrl->timC_offset[2] = 6;
ctrl->pi_coding_threshold = 15;
} else if (ctrl->tCK == TCK_900MHZ) {
ctrl->edge_offset[0] = 14; //XXX: guessed
ctrl->edge_offset[1] = 6;
@ -426,6 +362,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 6;
ctrl->timC_offset[2] = 6;
ctrl->pi_coding_threshold = 12;
} else if (ctrl->tCK == TCK_800MHZ) {
ctrl->edge_offset[0] = 13;
ctrl->edge_offset[1] = 5;
@ -434,6 +371,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 5;
ctrl->timC_offset[2] = 5;
ctrl->pi_coding_threshold = 15;
} else if (ctrl->tCK == TCK_700MHZ) {
ctrl->edge_offset[0] = 13; //XXX: guessed
ctrl->edge_offset[1] = 5;
@ -442,6 +380,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 5;
ctrl->timC_offset[2] = 5;
ctrl->pi_coding_threshold = 16;
} else if (ctrl->tCK == TCK_666MHZ) {
ctrl->edge_offset[0] = 10;
ctrl->edge_offset[1] = 4;
@ -450,6 +389,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 4;
ctrl->timC_offset[2] = 4;
ctrl->pi_coding_threshold = 16;
} else if (ctrl->tCK == TCK_533MHZ) {
ctrl->edge_offset[0] = 8;
ctrl->edge_offset[1] = 3;
@ -458,6 +398,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 3;
ctrl->timC_offset[2] = 3;
ctrl->pi_coding_threshold = 17;
} else { /* TCK_400MHZ */
ctrl->edge_offset[0] = 6;
ctrl->edge_offset[1] = 2;
@ -478,6 +419,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->CWL = DIV_ROUND_UP(ctrl->tCWL, ctrl->tCK);
else
ctrl->CWL = get_CWL(ctrl->tCK);
printk(BIOS_DEBUG, "Selected CWL latency : %uT\n", ctrl->CWL);
/* Find tRCD */
@ -528,8 +470,9 @@ static void dram_timing(ramctr_timing *ctrl)
static void dram_freq(ramctr_timing *ctrl)
{
if (ctrl->tCK > TCK_400MHZ) {
printk (BIOS_ERR, "DRAM frequency is under lowest supported "
"frequency (400 MHz). Increasing to 400 MHz as last resort");
printk(BIOS_ERR,
"DRAM frequency is under lowest supported frequency (400 MHz). "
"Increasing to 400 MHz as last resort");
ctrl->tCK = TCK_400MHZ;
}
@ -540,11 +483,12 @@ static void dram_freq(ramctr_timing *ctrl)
/* Step 1 - Set target PCU frequency */
find_cas_tck(ctrl);
/* Frequency multiplier. */
u32 FRQ = get_FRQ(ctrl->tCK, ctrl->base_freq);
/* Frequency multiplier */
const u32 FRQ = get_FRQ(ctrl->tCK, ctrl->base_freq);
/* The PLL will never lock if the required frequency is
* already set. Exit early to prevent a system hang.
/*
* The PLL will never lock if the required frequency is already set.
* Exit early to prevent a system hang.
*/
reg1 = MCHBAR32(MC_BIOS_DATA);
val2 = (u8) reg1;
@ -555,7 +499,8 @@ static void dram_freq(ramctr_timing *ctrl)
reg1 = FRQ;
if (ctrl->base_freq == 100)
reg1 |= 0x100; /* Enable 100Mhz REF clock */
reg1 |= 0x80000000; // set running bit
reg1 |= 0x80000000; /* set running bit */
MCHBAR32(MC_BIOS_REQ) = reg1;
int i = 0;
printk(BIOS_DEBUG, "PLL busy... ");
@ -581,61 +526,57 @@ static void dram_freq(ramctr_timing *ctrl)
static void dram_ioregs(ramctr_timing *ctrl)
{
u32 reg, comp2;
u32 reg;
int channel;
// IO clock
/* IO clock */
FOR_ALL_CHANNELS {
MCHBAR32(GDCRCLKRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
}
// IO command
/* IO command */
FOR_ALL_CHANNELS {
MCHBAR32(GDCRCTLRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
}
// IO control
/* IO control */
FOR_ALL_POPULATED_CHANNELS {
program_timings(ctrl, channel);
}
// Rcomp
/* Perform RCOMP */
printram("RCOMP...");
reg = 0;
while (reg == 0) {
reg = MCHBAR32(RCOMP_TIMER) & 0x10000;
}
while (!(MCHBAR32(RCOMP_TIMER) & (1 << 16)))
;
printram("done\n");
// Set comp2
comp2 = get_COMP2(ctrl->tCK, ctrl->base_freq);
MCHBAR32(CRCOMPOFST2) = comp2;
/* Set COMP2 */
MCHBAR32(CRCOMPOFST2) = get_COMP2(ctrl->tCK, ctrl->base_freq);
printram("COMP2 done\n");
// Set comp1
/* Set COMP1 */
FOR_ALL_POPULATED_CHANNELS {
reg = MCHBAR32(CRCOMPOFST1_ch(channel)); //ch0
reg = (reg & ~0xe00) | (1 << 9); //odt
reg = (reg & ~0xe00000) | (1 << 21); //clk drive up
reg = (reg & ~0x38000000) | (1 << 27); //ctl drive up
reg = MCHBAR32(CRCOMPOFST1_ch(channel));
reg = (reg & ~0x00000e00) | (1 << 9); /* ODT */
reg = (reg & ~0x00e00000) | (1 << 21); /* clk drive up */
reg = (reg & ~0x38000000) | (1 << 27); /* ctl drive up */
MCHBAR32(CRCOMPOFST1_ch(channel)) = reg;
}
printram("COMP1 done\n");
printram("FORCE RCOMP and wait 20us...");
MCHBAR32(M_COMP) |= 0x100;
MCHBAR32(M_COMP) |= (1 << 8);
udelay(20);
printram("done\n");
}
int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
int s3_resume, int me_uma_size)
int try_init_dram_ddr3_ivb(ramctr_timing *ctrl, int fast_boot, int s3_resume, int me_uma_size)
{
int err;
printk(BIOS_DEBUG, "Starting Ivybridge RAM training (%d).\n",
fast_boot);
printk(BIOS_DEBUG, "Starting Ivybridge RAM training (%d).\n", fast_boot);
if (!fast_boot) {
/* Find fastest common supported parameters */
@ -644,7 +585,7 @@ int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
dram_dimm_mapping(ctrl);
}
/* Set MCU frequency */
/* Set MC frequency */
dram_freq(ctrl);
if (!fast_boot) {
@ -653,7 +594,7 @@ int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
}
/* Set version register */
MCHBAR32(MRC_REVISION) = 0xC04EB002;
MCHBAR32(MRC_REVISION) = 0xc04eb002;
/* Enable crossover */
dram_xover(ctrl);
@ -667,11 +608,11 @@ int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
/* Set scheduler chicken bits */
MCHBAR32(SCHED_CBIT) = 0x10100005;
/* Set CPU specific register */
set_4f8c();
/* Set up watermarks and starvation counter */
set_wmm_behavior();
/* Clear IO reset bit */
MCHBAR32(MC_INIT_STATE_G) &= ~0x20;
MCHBAR32(MC_INIT_STATE_G) &= ~(1 << 5);
/* Set MAD-DIMM registers */
dram_dimm_set_mapping(ctrl);
@ -693,7 +634,7 @@ int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
if (fast_boot) {
restore_timings(ctrl);
} else {
/* Do jedec ddr3 reset sequence */
/* Do JEDEC DDR3 reset sequence */
dram_jedecreset(ctrl);
printk(BIOS_DEBUG, "Done jedec reset\n");
@ -737,7 +678,7 @@ int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
normalize_training(ctrl);
}
set_4008c(ctrl);
set_read_write_timings(ctrl);
write_controller_mr(ctrl);

View File

@ -60,8 +60,7 @@ void save_mrc_data(struct pei_data *pei_data)
u16 c1, c2, checksum;
/* Save the MRC S3 restore data to cbmem */
mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION,
pei_data->mrc_output,
mrc_cache_stash_data(MRC_TRAINING_DATA, MRC_CACHE_VERSION, pei_data->mrc_output,
pei_data->mrc_output_len);
/* Save the MRC seed values to CMOS */
@ -74,13 +73,11 @@ void save_mrc_data(struct pei_data *pei_data)
pei_data->scrambler_seed_s3, CMOS_OFFSET_MRC_SEED_S3);
/* Save a simple checksum of the seed values */
c1 = compute_ip_checksum((u8*)&pei_data->scrambler_seed,
sizeof(u32));
c2 = compute_ip_checksum((u8*)&pei_data->scrambler_seed_s3,
sizeof(u32));
c1 = compute_ip_checksum((u8 *)&pei_data->scrambler_seed, sizeof(u32));
c2 = compute_ip_checksum((u8 *)&pei_data->scrambler_seed_s3, sizeof(u32));
checksum = add_ip_checksums(sizeof(u32), c1, c2);
cmos_write(checksum & 0xff, CMOS_OFFSET_MRC_SEED_CHK);
cmos_write((checksum >> 0) & 0xff, CMOS_OFFSET_MRC_SEED_CHK);
cmos_write((checksum >> 8) & 0xff, CMOS_OFFSET_MRC_SEED_CHK + 1);
}
@ -89,7 +86,7 @@ static void prepare_mrc_cache(struct pei_data *pei_data)
struct region_device rdev;
u16 c1, c2, checksum, seed_checksum;
// preset just in case there is an error
/* Preset just in case there is an error */
pei_data->mrc_input = NULL;
pei_data->mrc_input_len = 0;
@ -103,10 +100,8 @@ static void prepare_mrc_cache(struct pei_data *pei_data)
pei_data->scrambler_seed_s3, CMOS_OFFSET_MRC_SEED_S3);
/* Compute seed checksum and compare */
c1 = compute_ip_checksum((u8*)&pei_data->scrambler_seed,
sizeof(u32));
c2 = compute_ip_checksum((u8*)&pei_data->scrambler_seed_s3,
sizeof(u32));
c1 = compute_ip_checksum((u8 *)&pei_data->scrambler_seed, sizeof(u32));
c2 = compute_ip_checksum((u8 *)&pei_data->scrambler_seed_s3, sizeof(u32));
checksum = add_ip_checksums(sizeof(u32), c1, c2);
seed_checksum = cmos_read(CMOS_OFFSET_MRC_SEED_CHK);
@ -119,30 +114,28 @@ static void prepare_mrc_cache(struct pei_data *pei_data)
return;
}
if (mrc_cache_get_current(MRC_TRAINING_DATA, MRC_CACHE_VERSION,
&rdev)) {
/* error message printed in find_current_mrc_cache */
if (mrc_cache_get_current(MRC_TRAINING_DATA, MRC_CACHE_VERSION, &rdev)) {
/* Error message printed in find_current_mrc_cache */
return;
}
pei_data->mrc_input = rdev_mmap_full(&rdev);
pei_data->mrc_input_len = region_device_sz(&rdev);
printk(BIOS_DEBUG, "%s: at %p, size %x\n",
__func__, pei_data->mrc_input, pei_data->mrc_input_len);
printk(BIOS_DEBUG, "%s: at %p, size %x\n", __func__, pei_data->mrc_input,
pei_data->mrc_input_len);
}
static const char *ecc_decoder[] = {
"inactive",
"active on IO",
"disabled on IO",
"active"
"active",
};
/*
* Dump in the log memory controller configuration as read from the memory
* controller registers.
*/
#define ON_OFF(val) (((val) & 1) ? "on" : "off")
/* Print the memory controller configuration as read from the memory controller registers. */
static void report_memory_config(void)
{
u32 addr_decoder_common, addr_decode_ch[2];
@ -154,21 +147,18 @@ static void report_memory_config(void)
printk(BIOS_DEBUG, "memcfg DDR3 clock %d MHz\n",
(MCHBAR32(MC_BIOS_DATA) * 13333 * 2 + 50) / 100);
printk(BIOS_DEBUG, "memcfg channel assignment: A: %d, B % d, C % d\n",
addr_decoder_common & 3,
(addr_decoder_common >> 0) & 3,
(addr_decoder_common >> 2) & 3,
(addr_decoder_common >> 4) & 3);
for (i = 0; i < ARRAY_SIZE(addr_decode_ch); i++) {
u32 ch_conf = addr_decode_ch[i];
printk(BIOS_DEBUG, "memcfg channel[%d] config (%8.8x):\n",
i, ch_conf);
printk(BIOS_DEBUG, " ECC %s\n",
ecc_decoder[(ch_conf >> 24) & 3]);
printk(BIOS_DEBUG, " enhanced interleave mode %s\n",
((ch_conf >> 22) & 1) ? "on" : "off");
printk(BIOS_DEBUG, " rank interleave %s\n",
((ch_conf >> 21) & 1) ? "on" : "off");
printk(BIOS_DEBUG, "memcfg channel[%d] config (%8.8x):\n", i, ch_conf);
printk(BIOS_DEBUG, " ECC %s\n", ecc_decoder[(ch_conf >> 24) & 3]);
printk(BIOS_DEBUG, " enhanced interleave mode %s\n", ON_OFF(ch_conf >> 22));
printk(BIOS_DEBUG, " rank interleave %s\n", ON_OFF(ch_conf >> 21));
printk(BIOS_DEBUG, " DIMMA %d MB width x%d %s rank%s\n",
((ch_conf >> 0) & 0xff) * 256,
((ch_conf >> 19) & 1) ? 16 : 8,
@ -181,6 +171,7 @@ static void report_memory_config(void)
((ch_conf >> 16) & 1) ? ", selected" : "");
}
}
#undef ON_OFF
/**
* Find PEI executable in coreboot filesystem and execute it.
@ -245,18 +236,17 @@ void sdram_initialize(struct pei_data *pei_data)
if (CONFIG(USBDEBUG_IN_PRE_RAM))
usbdebug_hw_init(true);
/* For reference print the System Agent version
* after executing the UEFI PEI stage.
*/
/* For reference, print the System Agent version after executing the UEFI PEI stage */
u32 version = MCHBAR32(MRC_REVISION);
printk(BIOS_DEBUG, "System Agent Version %d.%d.%d Build %d\n",
version >> 24, (version >> 16) & 0xff,
(version >> 8) & 0xff, version & 0xff);
(version >> 24) & 0xff, (version >> 16) & 0xff,
(version >> 8) & 0xff, (version >> 0) & 0xff);
/* Send ME init done for SandyBridge here. This is done
* inside the SystemAgent binary on IvyBridge. */
if (BASE_REV_SNB ==
(pci_read_config16(PCI_CPU_DEVICE, PCI_DEVICE_ID) & BASE_REV_MASK))
/*
* Send ME init done for SandyBridge here.
* This is done inside the SystemAgent binary on IvyBridge.
*/
if (BASE_REV_SNB == (pci_read_config16(PCI_CPU_DEVICE, PCI_DEVICE_ID) & BASE_REV_MASK))
intel_early_me_init_done(ME_INIT_STATUS_SUCCESS);
else
intel_early_me_status();
@ -264,31 +254,30 @@ void sdram_initialize(struct pei_data *pei_data)
report_memory_config();
}
/* These are the location and structure of MRC_VAR data in CAR.
The CAR region looks like this:
+------------------+ -> DCACHE_RAM_BASE
| |
| |
| COREBOOT STACK |
| |
| |
+------------------+ -> DCACHE_RAM_BASE + DCACHE_RAM_SIZE
| |
| MRC HEAP |
| size = 0x5000 |
| |
+------------------+
| |
| MRC VAR |
| size = 0x4000 |
| |
+------------------+ -> DACHE_RAM_BASE + DACHE_RAM_SIZE
+ DCACHE_RAM_MRC_VAR_SIZE
/*
* These are the location and structure of MRC_VAR data in CAR.
* The CAR region looks like this:
* +------------------+ -> DCACHE_RAM_BASE
* | |
* | |
* | COREBOOT STACK |
* | |
* | |
* +------------------+ -> DCACHE_RAM_BASE + DCACHE_RAM_SIZE
* | |
* | MRC HEAP |
* | size = 0x5000 |
* | |
* +------------------+
* | |
* | MRC VAR |
* | size = 0x4000 |
* | |
* +------------------+ -> DACHE_RAM_BASE + DACHE_RAM_SIZE
* + DCACHE_RAM_MRC_VAR_SIZE
*/
#define DCACHE_RAM_MRC_VAR_BASE \
(CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE + \
CONFIG_DCACHE_RAM_MRC_VAR_SIZE - 0x4000)
#define DCACHE_RAM_MRC_VAR_BASE (CONFIG_DCACHE_RAM_BASE + CONFIG_DCACHE_RAM_SIZE \
+ CONFIG_DCACHE_RAM_MRC_VAR_SIZE - 0x4000)
struct mrc_var_data {
u32 acpi_timer_flag;
@ -306,7 +295,7 @@ static void northbridge_fill_pei_data(struct pei_data *pei_data)
pei_data->pciexbar = CONFIG_MMCONF_BASE_ADDRESS;
pei_data->hpet_address = CONFIG_HPET_ADDRESS;
pei_data->thermalbase = 0xfed08000;
pei_data->system_type = get_platform_type() == PLATFORM_MOBILE ? 0 : 1;
pei_data->system_type = !(get_platform_type() == PLATFORM_MOBILE);
pei_data->tseg_size = CONFIG_SMM_TSEG_SIZE;
if ((cpu_get_cpuid() & 0xffff0) == 0x306a0) {
@ -322,7 +311,7 @@ static void southbridge_fill_pei_data(struct pei_data *pei_data)
const struct device *dev = pcidev_on_root(0x19, 0);
pei_data->smbusbar = SMBUS_IO_BASE;
pei_data->wdbbar = 0x4000000;
pei_data->wdbbar = 0x04000000;
pei_data->wdbsize = 0x1000;
pei_data->rcba = (uintptr_t)DEFAULT_RCBABASE;
pei_data->pmbase = DEFAULT_PMBASE;
@ -360,11 +349,8 @@ static void devicetree_fill_pei_data(struct pei_data *pei_data)
}
memcpy(pei_data->spd_addresses, cfg->spd_addresses,
sizeof(pei_data->spd_addresses));
memcpy(pei_data->ts_addresses, cfg->ts_addresses,
sizeof(pei_data->ts_addresses));
memcpy(pei_data->spd_addresses, cfg->spd_addresses, sizeof(pei_data->spd_addresses));
memcpy(pei_data->ts_addresses, cfg->ts_addresses, sizeof(pei_data->ts_addresses));
pei_data->ec_present = cfg->ec_present;
pei_data->ddr3lv_support = cfg->ddr3lv_support;
@ -383,7 +369,7 @@ static void devicetree_fill_pei_data(struct pei_data *pei_data)
static void disable_p2p(void)
{
/* Disable PCI-to-PCI bridge early to prevent probing by MRC. */
/* Disable PCI-to-PCI bridge early to prevent probing by MRC */
const struct device *const p2p = pcidev_on_root(0x1e, 0);
if (p2p && p2p->enabled)
return;
@ -393,7 +379,6 @@ static void disable_p2p(void)
void perform_raminit(int s3resume)
{
int cbmem_was_initted;
struct pei_data pei_data;
struct mrc_var_data *mrc_var;
@ -425,6 +410,7 @@ void perform_raminit(int s3resume)
if (pei_data.spd_data[i][0] && !pei_data.spd_data[0][0]) {
memcpy(pei_data.spd_data[0], pei_data.spd_data[i],
sizeof(pei_data.spd_data[0]));
} else if (pei_data.spd_data[i][0] && pei_data.spd_data[0][0]) {
if (memcmp(pei_data.spd_data[i], pei_data.spd_data[0],
sizeof(pei_data.spd_data[0])) != 0)
@ -438,18 +424,18 @@ void perform_raminit(int s3resume)
timestamp_add_now(TS_BEFORE_INITRAM);
sdram_initialize(&pei_data);
/* Sanity check mrc_var location by verifying a known field */
mrc_var = (void *)DCACHE_RAM_MRC_VAR_BASE;
/* Sanity check mrc_var location by verifying a known field. */
if (mrc_var->tx_byte == (uintptr_t)pei_data.tx_byte) {
printk(BIOS_DEBUG, "MRC_VAR pool occupied [%08x,%08x]\n",
mrc_var->pool_base,
mrc_var->pool_base + mrc_var->pool_used);
mrc_var->pool_base, mrc_var->pool_base + mrc_var->pool_used);
} else {
printk(BIOS_ERR, "Could not parse MRC_VAR data\n");
hexdump32(BIOS_ERR, mrc_var, sizeof(*mrc_var) / sizeof(u32));
}
cbmem_was_initted = !cbmem_recovery(s3resume);
const int cbmem_was_initted = !cbmem_recovery(s3resume);
if (!s3resume)
save_mrc_data(&pei_data);

View File

@ -18,8 +18,8 @@
#include "sandybridge.h"
#include <device/dram/ddr3.h>
/* The order is ch0dimmA, ch0dimmB, ch1dimmA, ch1dimmB. */
/* The order is: ch0dimmA, ch0dimmB, ch1dimmA, ch1dimmB */
void read_spd(spd_raw_data *spd, u8 addr, bool id_only);
void mainboard_get_spd(spd_raw_data *spd, bool id_only);
#endif /* RAMINIT_H */
#endif /* RAMINIT_NATIVE_H */

View File

@ -18,116 +18,105 @@
#include "raminit_native.h"
#include "raminit_common.h"
/* Frequency multiplier. */
/* Frequency multiplier */
static u32 get_FRQ(u32 tCK)
{
u32 FRQ;
FRQ = 256000 / (tCK * BASEFREQ);
const u32 FRQ = 256000 / (tCK * BASEFREQ);
if (FRQ > 8)
return 8;
if (FRQ < 3)
return 3;
return FRQ;
}
/* Get REFI based on MC frequency */
static u32 get_REFI(u32 tCK)
{
/* Get REFI based on MCU frequency using the following rule:
* _________________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
* REFI: | 3120 | 4160 | 5200 | 6240 | 7280 | 8320 |
*/
static const u32 frq_refi_map[] =
{ 3120, 4160, 5200, 6240, 7280, 8320 };
static const u32 frq_refi_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, */
3120, 4160, 5200, 6240, 7280, 8320,
};
return frq_refi_map[get_FRQ(tCK) - 3];
}
/* Get XSOffset based on MC frequency */
static u8 get_XSOffset(u32 tCK)
{
/* Get XSOffset based on MCU frequency using the following rule:
* _________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
* XSOffset : | 4 | 6 | 7 | 8 | 10 | 11 |
*/
static const u8 frq_xs_map[] = { 4, 6, 7, 8, 10, 11 };
static const u8 frq_xs_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, */
4, 6, 7, 8, 10, 11,
};
return frq_xs_map[get_FRQ(tCK) - 3];
}
/* Get MOD based on MC frequency */
static u8 get_MOD(u32 tCK)
{
/* Get MOD based on MCU frequency using the following rule:
* _____________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
* MOD : | 12 | 12 | 12 | 12 | 15 | 16 |
*/
static const u8 frq_mod_map[] = { 12, 12, 12, 12, 15, 16 };
static const u8 frq_mod_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, */
12, 12, 12, 12, 15, 16,
};
return frq_mod_map[get_FRQ(tCK) - 3];
}
/* Get Write Leveling Output delay based on MC frequency */
static u8 get_WLO(u32 tCK)
{
/* Get WLO based on MCU frequency using the following rule:
* _______________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
* WLO : | 4 | 5 | 6 | 6 | 8 | 8 |
*/
static const u8 frq_wlo_map[] = { 4, 5, 6, 6, 8, 8 };
static const u8 frq_wlo_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, */
4, 5, 6, 6, 8, 8,
};
return frq_wlo_map[get_FRQ(tCK) - 3];
}
/* Get CKE based on MC frequency */
static u8 get_CKE(u32 tCK)
{
/* Get CKE based on MCU frequency using the following rule:
* _______________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
* CKE : | 3 | 3 | 4 | 4 | 5 | 6 |
*/
static const u8 frq_cke_map[] = { 3, 3, 4, 4, 5, 6 };
static const u8 frq_cke_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, */
3, 3, 4, 4, 5, 6,
};
return frq_cke_map[get_FRQ(tCK) - 3];
}
/* Get XPDLL based on MC frequency */
static u8 get_XPDLL(u32 tCK)
{
/* Get XPDLL based on MCU frequency using the following rule:
* _____________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
* XPDLL : | 10 | 13 | 16 | 20 | 23 | 26 |
*/
static const u8 frq_xpdll_map[] = { 10, 13, 16, 20, 23, 26 };
static const u8 frq_xpdll_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, */
10, 13, 16, 20, 23, 26,
};
return frq_xpdll_map[get_FRQ(tCK) - 3];
}
/* Get XP based on MC frequency */
static u8 get_XP(u32 tCK)
{
/* Get XP based on MCU frequency using the following rule:
* _______________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
* XP : | 3 | 4 | 4 | 5 | 6 | 7 |
*/
static const u8 frq_xp_map[] = { 3, 4, 4, 5, 6, 7 };
static const u8 frq_xp_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, */
3, 4, 4, 5, 6, 7,
};
return frq_xp_map[get_FRQ(tCK) - 3];
}
/* Get AONPD based on MC frequency */
static u8 get_AONPD(u32 tCK)
{
/* Get AONPD based on MCU frequency using the following rule:
* ________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
* AONPD : | 4 | 5 | 6 | 8 | 8 | 10 |
*/
static const u8 frq_aonpd_map[] = { 4, 5, 6, 8, 8, 10 };
static const u8 frq_aonpd_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, */
4, 5, 6, 8, 8, 10,
};
return frq_aonpd_map[get_FRQ(tCK) - 3];
}
/* Get COMP2 based on MC frequency */
static u32 get_COMP2(u32 tCK)
{
/* Get COMP2 based on MCU frequency using the following rule:
* ___________________________________________________________
* FRQ : | 3 | 4 | 5 | 6 | 7 | 8 |
* COMP : | D6BEDCC | CE7C34C | CA57A4C | C6369CC | C42514C | C21410C |
*/
static const u32 frq_comp2_map[] = { 0xD6BEDCC, 0xCE7C34C, 0xCA57A4C,
0xC6369CC, 0xC42514C, 0xC21410C
static const u32 frq_comp2_map[] = {
/* FRQ: 3, 4, 5, 6, 7, 8, */
0x0D6BEDCC, 0x0CE7C34C, 0x0CA57A4C, 0x0C6369CC, 0x0C42514C, 0x0C21410C,
};
return frq_comp2_map[get_FRQ(tCK) - 3];
}
@ -154,21 +143,23 @@ static void snb_normalize_tclk(u32 *tclk)
static void find_cas_tck(ramctr_timing *ctrl)
{
u8 val;
u32 val32;
/* Find CAS latency */
while (1) {
/* Normalising tCK before computing clock could potentially
* results in lower selected CAS, which is desired.
/*
* Normalising tCK before computing clock could potentially
* result in a lower selected CAS, which is desired.
*/
snb_normalize_tclk(&(ctrl->tCK));
if (!(ctrl->tCK))
die("Couldn't find compatible clock / CAS settings\n");
val = DIV_ROUND_UP(ctrl->tAA, ctrl->tCK);
printk(BIOS_DEBUG, "Trying CAS %u, tCK %u.\n", val, ctrl->tCK);
for (; val <= MAX_CAS; val++)
if ((ctrl->cas_supported >> (val - MIN_CAS)) & 1)
break;
if (val == (MAX_CAS + 1)) {
ctrl->tCK++;
continue;
@ -178,18 +169,17 @@ static void find_cas_tck(ramctr_timing *ctrl)
}
}
val32 = NS2MHZ_DIV256 / ctrl->tCK;
printk(BIOS_DEBUG, "Selected DRAM frequency: %u MHz\n", val32);
printk(BIOS_DEBUG, "Selected DRAM frequency: %u MHz\n", NS2MHZ_DIV256 / ctrl->tCK);
printk(BIOS_DEBUG, "Selected CAS latency : %uT\n", val);
ctrl->CAS = val;
}
static void dram_timing(ramctr_timing *ctrl)
{
/* Maximum supported DDR3 frequency is 1066MHz (DDR3 2133) so make sure
* we cap it if we have faster DIMMs.
* Then, align it to the closest JEDEC standard frequency */
/*
* On Sandy Bridge, the maximum supported DDR3 frequency is 1066MHz (DDR3 2133).
* Cap it for faster DIMMs, and align it to the closest JEDEC standard frequency.
*/
if (ctrl->tCK == TCK_1066MHZ) {
ctrl->edge_offset[0] = 16;
ctrl->edge_offset[1] = 7;
@ -198,6 +188,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 7;
ctrl->timC_offset[2] = 7;
ctrl->pi_coding_threshold = 13;
} else if (ctrl->tCK == TCK_933MHZ) {
ctrl->edge_offset[0] = 14;
ctrl->edge_offset[1] = 6;
@ -206,6 +197,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 6;
ctrl->timC_offset[2] = 6;
ctrl->pi_coding_threshold = 15;
} else if (ctrl->tCK == TCK_800MHZ) {
ctrl->edge_offset[0] = 13;
ctrl->edge_offset[1] = 5;
@ -214,6 +206,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 5;
ctrl->timC_offset[2] = 5;
ctrl->pi_coding_threshold = 15;
} else if (ctrl->tCK == TCK_666MHZ) {
ctrl->edge_offset[0] = 10;
ctrl->edge_offset[1] = 4;
@ -222,6 +215,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 4;
ctrl->timC_offset[2] = 4;
ctrl->pi_coding_threshold = 16;
} else if (ctrl->tCK == TCK_533MHZ) {
ctrl->edge_offset[0] = 8;
ctrl->edge_offset[1] = 3;
@ -230,6 +224,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[1] = 3;
ctrl->timC_offset[2] = 3;
ctrl->pi_coding_threshold = 17;
} else {
ctrl->tCK = TCK_400MHZ;
ctrl->edge_offset[0] = 6;
@ -251,6 +246,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->CWL = DIV_ROUND_UP(ctrl->tCWL, ctrl->tCK);
else
ctrl->CWL = get_CWL(ctrl->tCK);
printk(BIOS_DEBUG, "Selected CWL latency : %uT\n", ctrl->CWL);
/* Find tRCD */
@ -285,7 +281,7 @@ static void dram_timing(ramctr_timing *ctrl)
printk(BIOS_DEBUG, "Selected tWTR : %uT\n", ctrl->tWTR);
/* Refresh-to-Active or Refresh-to-Refresh (tRFC) */
ctrl->tRFC = DIV_ROUND_UP(ctrl->tRFC, ctrl->tCK - 1);
ctrl->tRFC = DIV_ROUND_UP(ctrl->tRFC, ctrl->tCK - 1); /* FIXME: Why the -1 ? */
printk(BIOS_DEBUG, "Selected tRFC : %uT\n", ctrl->tRFC);
ctrl->tREFI = get_REFI(ctrl->tCK);
@ -300,10 +296,10 @@ static void dram_timing(ramctr_timing *ctrl)
static void dram_freq(ramctr_timing *ctrl)
{
if (ctrl->tCK > TCK_400MHZ) {
printk(BIOS_ERR, "DRAM frequency is under lowest supported "
"frequency (400 MHz). Increasing to 400 MHz as last resort");
printk(BIOS_ERR,
"DRAM frequency is under lowest supported frequency (400 MHz). "
"Increasing to 400 MHz as last resort");
ctrl->tCK = TCK_400MHZ;
}
@ -311,13 +307,15 @@ static void dram_freq(ramctr_timing *ctrl)
u8 val2;
u32 reg1 = 0;
/* Step 1 - Set target PCU frequency */
find_cas_tck(ctrl);
/* Frequency multiplier. */
u32 FRQ = get_FRQ(ctrl->tCK);
/* Frequency multiplier */
const u32 FRQ = get_FRQ(ctrl->tCK);
/* The PLL will never lock if the required frequency is
* already set. Exit early to prevent a system hang.
/*
* The PLL will never lock if the required frequency is already set.
* Exit early to prevent a system hang.
*/
reg1 = MCHBAR32(MC_BIOS_DATA);
val2 = (u8) reg1;
@ -326,7 +324,7 @@ static void dram_freq(ramctr_timing *ctrl)
/* Step 1 - Select frequency in the MCU */
reg1 = FRQ;
reg1 |= 0x80000000; // set running bit
reg1 |= 0x80000000; /* set running bit */
MCHBAR32(MC_BIOS_REQ) = reg1;
int i=0;
printk(BIOS_DEBUG, "PLL busy... ");
@ -352,61 +350,57 @@ static void dram_freq(ramctr_timing *ctrl)
static void dram_ioregs(ramctr_timing *ctrl)
{
u32 reg, comp2;
u32 reg;
int channel;
// IO clock
/* IO clock */
FOR_ALL_CHANNELS {
MCHBAR32(GDCRCLKRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
}
// IO command
/* IO command */
FOR_ALL_CHANNELS {
MCHBAR32(GDCRCTLRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
}
// IO control
/* IO control */
FOR_ALL_POPULATED_CHANNELS {
program_timings(ctrl, channel);
}
// Rcomp
/* Perform RCOMP */
printram("RCOMP...");
reg = 0;
while (reg == 0) {
reg = MCHBAR32(RCOMP_TIMER) & 0x10000;
}
while (!(MCHBAR32(RCOMP_TIMER) & (1 << 16)))
;
printram("done\n");
// Set comp2
comp2 = get_COMP2(ctrl->tCK);
MCHBAR32(CRCOMPOFST2) = comp2;
/* Set COMP2 */
MCHBAR32(CRCOMPOFST2) = get_COMP2(ctrl->tCK);
printram("COMP2 done\n");
// Set comp1
/* Set COMP1 */
FOR_ALL_POPULATED_CHANNELS {
reg = MCHBAR32(CRCOMPOFST1_ch(channel)); //ch0
reg = (reg & ~0xe00) | (1 << 9); //odt
reg = (reg & ~0xe00000) | (1 << 21); //clk drive up
reg = (reg & ~0x38000000) | (1 << 27); //ctl drive up
reg = MCHBAR32(CRCOMPOFST1_ch(channel));
reg = (reg & ~0x00000e00) | (1 << 9); /* ODT */
reg = (reg & ~0x00e00000) | (1 << 21); /* clk drive up */
reg = (reg & ~0x38000000) | (1 << 27); /* ctl drive up */
MCHBAR32(CRCOMPOFST1_ch(channel)) = reg;
}
printram("COMP1 done\n");
printram("FORCE RCOMP and wait 20us...");
MCHBAR32(M_COMP) |= 0x100;
MCHBAR32(M_COMP) |= (1 << 8);
udelay(20);
printram("done\n");
}
int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
int s3_resume, int me_uma_size)
int try_init_dram_ddr3_snb(ramctr_timing *ctrl, int fast_boot, int s3_resume, int me_uma_size)
{
int err;
printk(BIOS_DEBUG, "Starting SandyBridge RAM training (%d).\n",
fast_boot);
printk(BIOS_DEBUG, "Starting SandyBridge RAM training (%d).\n", fast_boot);
if (!fast_boot) {
/* Find fastest common supported parameters */
@ -415,7 +409,7 @@ int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
dram_dimm_mapping(ctrl);
}
/* Set MCU frequency */
/* Set MC frequency */
dram_freq(ctrl);
if (!fast_boot) {
@ -424,7 +418,7 @@ int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
}
/* Set version register */
MCHBAR32(MRC_REVISION) = 0xC04EB002;
MCHBAR32(MRC_REVISION) = 0xc04eb002;
/* Enable crossover */
dram_xover(ctrl);
@ -438,11 +432,11 @@ int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
/* Set scheduler chicken bits */
MCHBAR32(SCHED_CBIT) = 0x10100005;
/* Set CPU specific register */
set_4f8c();
/* Set up watermarks and starvation counter */
set_wmm_behavior();
/* Clear IO reset bit */
MCHBAR32(MC_INIT_STATE_G) &= ~0x20;
MCHBAR32(MC_INIT_STATE_G) &= ~(1 << 5);
/* Set MAD-DIMM registers */
dram_dimm_set_mapping(ctrl);
@ -464,7 +458,7 @@ int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
if (fast_boot) {
restore_timings(ctrl);
} else {
/* Do jedec ddr3 reset sequence */
/* Do JEDEC DDR3 reset sequence */
dram_jedecreset(ctrl);
printk(BIOS_DEBUG, "Done jedec reset\n");
@ -508,7 +502,7 @@ int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
normalize_training(ctrl);
}
set_4008c(ctrl);
set_read_write_timings(ctrl);
write_controller_mr(ctrl);

View File

@ -39,20 +39,18 @@ static void early_pch_reset_pmcon(void)
{
u8 reg8;
// reset rtc power status
/* Reset RTC power status */
reg8 = pci_read_config8(PCH_LPC_DEV, GEN_PMCON_3);
reg8 &= ~(1 << 2);
pci_write_config8(PCH_LPC_DEV, GEN_PMCON_3, reg8);
}
/* Platform has no romstage entry point under mainboard directory,
* so this one is named with prefix mainboard.
*/
/* The romstage entry point for this platform is not mainboard-specific, hence the name */
void mainboard_romstage_entry(void)
{
int s3resume = 0;
if (MCHBAR16(SSKPD) == 0xCAFE)
if (MCHBAR16(SSKPD_HI) == 0xCAFE)
system_reset();
enable_lapic();
@ -60,14 +58,12 @@ void mainboard_romstage_entry(void)
/* Init LPC, GPIO, BARs, disable watchdog ... */
early_pch_init();
/* USB is initialized in MRC if MRC is used. */
/* When using MRC, USB is initialized by MRC */
if (CONFIG(USE_NATIVE_RAMINIT)) {
early_usb_init(mainboard_usb_ports);
}
/* Perform some early chipset initialization required
* before RAM initialization can work
*/
/* Perform some early chipset init needed before RAM initialization can work */
systemagent_early_init();
printk(BIOS_DEBUG, "Back from systemagent_early_init()\n");

View File

@ -43,8 +43,8 @@
#define DEFAULT_EPBAR 0xfed19000 /* 4 KB */
#define DEFAULT_RCBABASE ((u8 *)0xfed1c000)
#define IOMMU_BASE1 0xfed90000ULL
#define IOMMU_BASE2 0xfed91000ULL
#define GFXVT_BASE 0xfed90000ULL
#define VTVC0_BASE 0xfed91000ULL
/* Everything below this line is ignored in the DSDT */
#ifndef __ACPI__
@ -58,14 +58,12 @@ enum platform_type {
/* Device 0:0.0 PCI configuration space (Host Bridge) */
#define HOST_BRIDGE PCI_DEV(0, 0, 0)
#define EPBAR 0x40
#define MCHBAR 0x48
#define PCIEXBAR 0x60
#define DMIBAR 0x68
#define GGC 0x50 /* GMCH Graphics Control */
#define DEVEN 0x54 /* Device Enable */
#define DEVEN_D7EN (1 << 14)
#define DEVEN_PEG60 (1 << 13)
@ -79,6 +77,9 @@ enum platform_type {
#define PAVPC 0x58 /* Protected Audio Video Path Control */
#define DPR 0x5c /* DMA Protected Range */
#define PCIEXBAR 0x60
#define DMIBAR 0x68
#define MESEG_BASE 0x70
#define MESEG_MASK 0x78
#define MELCK (1 << 10) /* ME Range Lock */
@ -109,6 +110,13 @@ enum platform_type {
#define SKPAD 0xdc /* Scratchpad Data */
#define DIDOR 0xf3 /* Device ID override, for debug and samples only */
/* Devices 0:1.0, 0:1.1, 0:1.2, 0:6.0 PCI configuration space (PCI Express Graphics) */
#define AFE_PWRON 0xc24 /* PEG Analog Front-End Power-On */
/* Device 0:2.0 PCI configuration space (Graphics Device) */
@ -121,237 +129,18 @@ enum platform_type {
#define MCHBAR8(x) (*((volatile u8 *)(DEFAULT_MCHBAR + (x))))
#define MCHBAR16(x) (*((volatile u16 *)(DEFAULT_MCHBAR + (x))))
#define MCHBAR32(x) (*((volatile u32 *)(DEFAULT_MCHBAR + (x))))
#define MCHBAR32_OR(x, or) (MCHBAR32(x) = (MCHBAR32(x) | (or)))
#define MCHBAR32_AND(x, and) (MCHBAR32(x) = (MCHBAR32(x) & (and)))
#define MCHBAR8_AND(x, and) (MCHBAR8(x) = MCHBAR8(x) & (and))
#define MCHBAR16_AND(x, and) (MCHBAR16(x) = MCHBAR16(x) & (and))
#define MCHBAR32_AND(x, and) (MCHBAR32(x) = MCHBAR32(x) & (and))
#define MCHBAR8_OR(x, or) (MCHBAR8(x) = MCHBAR8(x) | (or))
#define MCHBAR16_OR(x, or) (MCHBAR16(x) = MCHBAR16(x) | (or))
#define MCHBAR32_OR(x, or) (MCHBAR32(x) = MCHBAR32(x) | (or))
#define MCHBAR8_AND_OR(x, and, or) (MCHBAR8(x) = (MCHBAR8(x) & (and)) | (or))
#define MCHBAR16_AND_OR(x, and, or) (MCHBAR16(x) = (MCHBAR16(x) & (and)) | (or))
#define MCHBAR32_AND_OR(x, and, or) (MCHBAR32(x) = (MCHBAR32(x) & (and)) | (or))
/* Indexed register helper macros */
#define Gz(r, z) ((r) + ((z) << 8))
#define Ly(r, y) ((r) + ((y) << 2))
#define Cx(r, x) ((r) + ((x) << 10))
#define CxLy(r, x, y) ((r) + ((x) << 10) + ((y) << 2))
#define GzLy(r, z, y) ((r) + ((z) << 8) + ((y) << 2))
/* byte lane training register base addresses */
#define LANEBASE_B0 0x0000
#define LANEBASE_B1 0x0200
#define LANEBASE_B2 0x0400
#define LANEBASE_B3 0x0600
#define LANEBASE_ECC 0x0800 /* ECC lane is in the middle of the data lanes */
#define LANEBASE_B4 0x1000
#define LANEBASE_B5 0x1200
#define LANEBASE_B6 0x1400
#define LANEBASE_B7 0x1600
/* byte lane register offsets */
#define GDCRTRAININGRESULT(ch, y) GzLy(0x0004, ch, y) /* Test results for PI config */
#define GDCRTRAININGRESULT1(ch) GDCRTRAININGRESULT(ch, 0) /* 0x0004 */
#define GDCRTRAININGRESULT2(ch) GDCRTRAININGRESULT(ch, 1) /* 0x0008 */
#define GDCRRX(ch, rank) GzLy(0x10, ch, rank) /* Time setting for lane Rx */
#define GDCRTX(ch, rank) GzLy(0x20, ch, rank) /* Time setting for lane Tx */
/* Register definitions */
#define GDCRCLKRANKSUSED_ch(ch) Gz(0x0c00, ch) /* Indicates which rank is populated */
#define GDCRCLKCOMP_ch(ch) Gz(0x0c04, ch) /* RCOMP result register */
#define GDCRCKPICODE_ch(ch) Gz(0x0c14, ch) /* PI coding for DDR CLK pins */
#define GDCRCKLOGICDELAY_ch(ch) Gz(0x0c18, ch) /* Logic delay of 1 QCLK in CLK slice */
#define GDDLLFUSE_ch(ch) Gz(0x0c20, ch) /* Used for fuse download to the DLLs */
#define GDCRCLKDEBUGMUXCFG_ch(ch) Gz(0x0c3c, ch) /* Debug MUX control */
#define GDCRCMDDEBUGMUXCFG_Cz_S(ch) Gz(0x0e3c, ch) /* Debug MUX control */
#define CRCOMPOFST1_ch(ch) Gz(0x1810, ch) /* DQ, CTL and CLK Offset values */
#define GDCRTRAININGMOD_ch(ch) Gz(0x3000, ch) /* Data training mode control */
#define GDCRTRAININGRESULT1_ch(ch) Gz(0x3004, ch) /* Training results according to PI */
#define GDCRTRAININGRESULT2_ch(ch) Gz(0x3008, ch)
#define GDCRCTLRANKSUSED_ch(ch) Gz(0x3200, ch) /* Indicates which rank is populated */
#define GDCRCMDCOMP_ch(ch) Gz(0x3204, ch) /* COMP values register */
#define GDCRCMDCTLCOMP_ch(ch) Gz(0x3208, ch) /* COMP values register */
#define GDCRCMDPICODING_ch(ch) Gz(0x320c, ch) /* Command and control PI coding */
#define GDCRTRAININGMOD 0x3400 /* Data training mode control register */
#define GDCRDATACOMP 0x340c /* COMP values register */
#define CRCOMPOFST2 0x3714 /* CMD DRV, SComp and Static Leg controls */
/* MC per-channel registers */
#define TC_DBP_ch(ch) Cx(0x4000, ch) /* Timings: BIN */
#define TC_RAP_ch(ch) Cx(0x4004, ch) /* Timings: Regular access */
#define TC_RWP_ch(ch) Cx(0x4008, ch) /* Timings: Read / Write */
#define TC_OTHP_ch(ch) Cx(0x400c, ch) /* Timings: Other parameters */
#define SCHED_SECOND_CBIT_ch(ch) Cx(0x401c, ch) /* More chicken bits */
#define SCHED_CBIT_ch(ch) Cx(0x4020, ch) /* Chicken bits in scheduler */
#define SC_ROUNDT_LAT_ch(ch) Cx(0x4024, ch) /* Round-trip latency per rank */
#define SC_IO_LATENCY_ch(ch) Cx(0x4028, ch) /* IO Latency Configuration */
#define SCRAMBLING_SEED_1_ch(ch) Cx(0x4034, ch) /* Scrambling seed 1 */
#define SCRAMBLING_SEED_2_LOW_ch(ch) Cx(0x4038, ch) /* Scrambling seed 2 low */
#define SCRAMBLING_SEED_2_HIGH_ch(ch) Cx(0x403c, ch) /* Scrambling seed 2 high */
/* IOSAV Bytelane Bit-wise error */
#define IOSAV_By_BW_SERROR_ch(ch, y) CxLy(0x4040, ch, y)
/* IOSAV Bytelane Bit-wise compare mask */
#define IOSAV_By_BW_MASK_ch(ch, y) CxLy(0x4080, ch, y)
/*
* Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
* Different counters for transactions that are issued on the ring agents (core or GT) and
* transactions issued in the SA.
*/
#define SC_PR_CNT_CONFIG_ch(ch) Cx(0x40a8, ch)
#define SC_PCIT_ch(ch) Cx(0x40ac, ch) /* Page-close idle timer setup - 8 bits */
#define PM_PDWN_CONFIG_ch(ch) Cx(0x40b0, ch) /* Power-down (CKE-off) operation config */
#define ECC_INJECT_COUNT_ch(ch) Cx(0x40b4, ch) /* ECC error injection count */
#define ECC_DFT_ch(ch) Cx(0x40b8, ch) /* ECC DFT features (ECC4ANA, error inject) */
#define SC_WR_ADD_DELAY_ch(ch) Cx(0x40d0, ch) /* Extra WR delay to overcome WR-flyby issue */
#define IOSAV_By_BW_SERROR_C_ch(ch, y) CxLy(0x4140, ch, y) /* IOSAV Bytelane Bit-wise error */
/* IOSAV sub-sequence control registers */
#define IOSAV_n_SP_CMD_ADDR_ch(ch, y) CxLy(0x4200, ch, y) /* Special command address. */
#define IOSAV_n_ADDR_UPD_ch(ch, y) CxLy(0x4210, ch, y) /* Address update control */
#define IOSAV_n_SP_CMD_CTL_ch(ch, y) CxLy(0x4220, ch, y) /* Control of command signals */
#define IOSAV_n_SUBSEQ_CTL_ch(ch, y) CxLy(0x4230, ch, y) /* Sub-sequence controls */
#define IOSAV_n_ADDRESS_LFSR_ch(ch, y) CxLy(0x4240, ch, y) /* 23-bit LFSR state value */
#define PM_THML_STAT_ch(ch) Cx(0x4280, ch) /* Thermal status of each rank */
#define IOSAV_SEQ_CTL_ch(ch) Cx(0x4284, ch) /* IOSAV sequence level control */
#define IOSAV_DATA_CTL_ch(ch) Cx(0x4288, ch) /* Data control in IOSAV mode */
#define IOSAV_STATUS_ch(ch) Cx(0x428c, ch) /* State of the IOSAV sequence machine */
#define TC_ZQCAL_ch(ch) Cx(0x4290, ch) /* ZQCAL control register */
#define TC_RFP_ch(ch) Cx(0x4294, ch) /* Refresh Parameters */
#define TC_RFTP_ch(ch) Cx(0x4298, ch) /* Refresh Timing Parameters */
#define TC_MR2_SHADOW_ch(ch) Cx(0x429c, ch) /* MR2 shadow - copy of DDR configuration */
#define MC_INIT_STATE_ch(ch) Cx(0x42a0, ch) /* IOSAV mode control */
#define TC_SRFTP_ch(ch) Cx(0x42a4, ch) /* Self-refresh timing parameters */
#define IOSAV_ERROR_ch(ch) Cx(0x42ac, ch) /* Data vector count of the first error */
#define IOSAV_DC_MASK_ch(ch) Cx(0x42b0, ch) /* IOSAV data check masking */
#define IOSAV_By_ERROR_COUNT_ch(ch, y) CxLy(0x4340, ch, y) /* Per-byte 16-bit error count */
#define IOSAV_G_ERROR_COUNT_ch(ch) Cx(0x4364, ch) /* Global 16-bit error count */
#define PM_TRML_M_CONFIG_ch(ch) Cx(0x4380, ch) /* Thermal mode configuration */
#define PM_CMD_PWR_ch(ch) Cx(0x4384, ch) /* Power contribution of commands */
#define PM_BW_LIMIT_CONFIG_ch(ch) Cx(0x4388, ch) /* Bandwidth throttling on overtemp */
#define SC_WDBWM_ch(ch) Cx(0x438c, ch) /* Watermarks and starvation counter */
/* MC Channel Broadcast registers */
#define TC_DBP 0x4c00 /* Timings: BIN */
#define TC_RAP 0x4c04 /* Timings: Regular access */
#define TC_RWP 0x4c08 /* Timings: Read / Write */
#define TC_OTHP 0x4c0c /* Timings: Other parameters */
#define SCHED_SECOND_CBIT 0x4c1c /* More chicken bits */
#define SCHED_CBIT 0x4c20 /* Chicken bits in scheduler */
#define SC_ROUNDT_LAT 0x4c24 /* Round-trip latency per rank */
#define SC_IO_LATENCY 0x4c28 /* IO Latency Configuration */
#define SCRAMBLING_SEED_1 0x4c34 /* Scrambling seed 1 */
#define SCRAMBLING_SEED_2_LOW 0x4c38 /* Scrambling seed 2 low */
#define SCRAMBLING_SEED_2_HIGH 0x4c3c /* Scrambling seed 2 high */
#define IOSAV_By_BW_SERROR(y) Ly(0x4c40, y) /* IOSAV Bytelane Bit-wise error */
#define IOSAV_By_BW_MASK(y) Ly(0x4c80, y) /* IOSAV Bytelane Bit-wise compare mask */
/*
* Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
* Different counters for transactions that are issued on the ring agents (core or GT) and
* transactions issued in the SA.
*/
#define SC_PR_CNT_CONFIG 0x4ca8
#define SC_PCIT 0x4cac /* Page-close idle timer setup - 8 bits */
#define PM_PDWN_CONFIG 0x4cb0 /* Power-down (CKE-off) operation config */
#define ECC_INJECT_COUNT 0x4cb4 /* ECC error injection count */
#define ECC_DFT 0x4cb8 /* ECC DFT features (ECC4ANA, error inject) */
#define SC_WR_ADD_DELAY 0x4cd0 /* Extra WR delay to overcome WR-flyby issue */
/* Opportunistic reads configuration during write-major-mode (WMM) */
#define WMM_READ_CONFIG 0x4cd4 /** WARNING: Only exists on IVB! */
#define IOSAV_By_BW_SERROR_C(y) Ly(0x4d40, y) /* IOSAV Bytelane Bit-wise error */
#define IOSAV_n_SP_CMD_ADDR(n) Ly(0x4e00, n) /* Sub-sequence special command address */
#define IOSAV_n_ADDR_UPD(n) Ly(0x4e10, n) /* Address update after command execution */
#define IOSAV_n_SP_CMD_CTL(n) Ly(0x4e20, n) /* Command signals in sub-sequence command */
#define IOSAV_n_SUBSEQ_CTL(n) Ly(0x4e30, n) /* Sub-sequence command parameter control */
#define IOSAV_n_ADDRESS_LFSR(n) Ly(0x4e40, n) /* 23-bit LFSR value of the sequence */
#define PM_THML_STAT 0x4e80 /* Thermal status of each rank */
#define IOSAV_SEQ_CTL 0x4e84 /* IOSAV sequence level control */
#define IOSAV_DATA_CTL 0x4e88 /* Data control in IOSAV mode */
#define IOSAV_STATUS 0x4e8c /* State of the IOSAV sequence machine */
#define TC_ZQCAL 0x4e90 /* ZQCAL control register */
#define TC_RFP 0x4e94 /* Refresh Parameters */
#define TC_RFTP 0x4e98 /* Refresh Timing Parameters */
#define TC_MR2_SHADOW 0x4e9c /* MR2 shadow - copy of DDR configuration */
#define MC_INIT_STATE 0x4ea0 /* IOSAV mode control */
#define TC_SRFTP 0x4ea4 /* Self-refresh timing parameters */
/*
* Auxiliary register in mcmnts synthesis FUB (Functional Unit Block). Additionally, this
* register is also used to enable IOSAV_n_SP_CMD_ADDR optimization on Ivy Bridge.
*/
#define MCMNTS_SPARE 0x4ea8 /** WARNING: Reserved, use only on IVB! */
#define IOSAV_ERROR 0x4eac /* Data vector count of the first error */
#define IOSAV_DC_MASK 0x4eb0 /* IOSAV data check masking */
#define IOSAV_By_ERROR_COUNT(y) Ly(0x4f40, y) /* Per-byte 16-bit error counter */
#define IOSAV_G_ERROR_COUNT 0x4f64 /* Global 16-bit error counter */
#define PM_TRML_M_CONFIG 0x4f80 /* Thermal mode configuration */
#define PM_CMD_PWR 0x4f84 /* Power contribution of commands */
#define PM_BW_LIMIT_CONFIG 0x4f88 /* Bandwidth throttling on overtemperature */
#define SC_WDBWM 0x4f8c /* Watermarks and starvation counter config */
#define MAD_CHNL 0x5000 /* Address Decoder Channel Configuration */
#define MAD_DIMM_CH0 0x5004 /* Address Decode Channel 0 */
#define MAD_DIMM_CH1 0x5008 /* Address Decode Channel 1 */
#define MAD_DIMM_CH2 0x500c /* Address Decode Channel 2 (unused on SNB) */
#define MAD_ZR 0x5014 /* Address Decode Zones */
#define MCDECS_SPARE 0x5018 /* Spare register in mcdecs synthesis FUB */
#define MCDECS_CBIT 0x501c /* Chicken bits in mcdecs synthesis FUB */
#define CHANNEL_HASH 0x5024 /** WARNING: Only exists on IVB! */
#define MC_INIT_STATE_G 0x5030 /* High-level behavior in IOSAV mode */
#define MRC_REVISION 0x5034 /* MRC Revision */
#define PM_DLL_CONFIG 0x5064 /* Memory Controller I/O DLL config */
#define RCOMP_TIMER 0x5084 /* RCOMP evaluation timer register */
#define MC_LOCK 0x50fc /* Memory Controlller Lock register */
#define VTD1_BASE 0x5400 /* Base address for IGD */
#define VTD2_BASE 0x5410 /* Base address for PEG, USB, SATA, etc. */
#define PAIR_CTL 0x5418 /* Power Aware Interrupt Routing Control */
/* PAVP control register, undocumented. Different from PAVPC on PCI config space. */
#define MMIO_PAVP_CTL 0x5500 /* Bit 0 locks PAVP settings */
#define MEM_TRML_ESTIMATION_CONFIG 0x5880
#define MEM_TRML_THRESHOLDS_CONFIG 0x5888
#define MEM_TRML_INTERRUPT 0x58a8
#define MC_TURBO_PL1 0x59a0 /* Turbo Power Limit 1 parameters */
#define MC_TURBO_PL2 0x59a4 /* Turbo Power Limit 2 parameters */
#define SSKPD_OK 0x5d10 /* 64-bit scratchpad register */
#define SSKPD 0x5d14 /* 16bit (scratchpad) */
#define BIOS_RESET_CPL 0x5da8 /* 8bit */
/* PCODE will sample SAPM-related registers at the end of Phase 4. */
#define MC_BIOS_REQ 0x5e00 /* Memory frequency request register */
#define MC_BIOS_DATA 0x5e04 /* Miscellaneous information for BIOS */
#define SAPMCTL 0x5f00 /* Bit 3 enables DDR EPG (C7i) on IVB */
#define M_COMP 0x5f08 /* Memory COMP control */
#define SAPMTIMERS 0x5f10 /* SAPM timers in 10ns (100 MHz) units */
/* WARNING: Only applies to Sandy Bridge! */
#define BANDTIMERS_SNB 0x5f18 /* MPLL and PPLL time to do self-banding */
/** WARNING: Only applies to Ivy Bridge! */
#define SAPMTIMERS2_IVB 0x5f18 /** Extra latency for DDRIO EPG exit (C7i) */
#define BANDTIMERS_IVB 0x5f20 /** MPLL and PPLL time to do self-banding */
/* As there are many registers, define them on a separate file */
#include "mchbar_regs.h"
/*
* EPBAR - Egress Port Root Complex Register Block
@ -436,7 +225,6 @@ enum platform_type {
#ifndef __ASSEMBLER__
void intel_sandybridge_finalize_smm(void);
int bridge_silicon_revision(void);
void systemagent_early_init(void);
void sandybridge_init_iommu(void);
@ -444,8 +232,7 @@ void sandybridge_late_initialization(void);
void northbridge_romstage_finalize(int s3resume);
void early_init_dmi(void);
/* mainboard_early_init: Optional mainboard callback run after console init
but before raminit. */
/* mainboard_early_init: Optional callback, run after console init but before raminit. */
void mainboard_early_init(int s3resume);
int mainboard_should_reset_usb(int s3resume);
void perform_raminit(int s3resume);
@ -454,7 +241,8 @@ enum platform_type get_platform_type(void);
#include <device/device.h>
struct acpi_rsdp;
unsigned long northbridge_write_acpi_tables(struct device *device, unsigned long start, struct acpi_rsdp *rsdp);
unsigned long northbridge_write_acpi_tables(struct device *device, unsigned long start,
struct acpi_rsdp *rsdp);
#endif
#endif