src/cpu: Capitalize CPU

Change-Id: I58d5c16de796a91fa14d8db78722024266c09a94
Signed-off-by: Elyes HAOUAS <ehaouas@noos.fr>
Reviewed-on: https://review.coreboot.org/15934
Tested-by: build bot (Jenkins)
Reviewed-by: Paul Menzel <paulepanter@users.sourceforge.net>
Reviewed-by: Martin Roth <martinroth@google.com>
This commit is contained in:
Elyes HAOUAS
2016-07-28 18:58:27 +02:00
committed by Martin Roth
parent 918535a657
commit d82be923b1
54 changed files with 102 additions and 102 deletions

View File

@@ -54,7 +54,7 @@ _start16bit:
* If we are hyperthreaded or we have multiple cores it is bad,
* for SMP startup. On Opterons it causes a 5 second delay.
* Invalidating the cache was pure paranoia in any event.
* If you cpu needs it you can write a cpu dependent version of
* If you CPU needs it you can write a CPU dependent version of
* entry16.inc.
*/

View File

@@ -242,10 +242,10 @@ static int lapic_start_cpu(unsigned long apicid)
static atomic_t active_cpus = ATOMIC_INIT(1);
/* start_cpu_lock covers last_cpu_index and secondary_stack.
* Only starting one cpu at a time let's me remove the logic
* Only starting one CPU at a time let's me remove the logic
* for select the stack from assembly language.
*
* In addition communicating by variables to the cpu I
* In addition communicating by variables to the CPU I
* am starting allows me to verify it has started before
* start_cpu returns.
*/
@@ -301,12 +301,12 @@ int start_cpu(struct device *cpu)
cpu->enabled = 0;
cpu->initialized = 0;
/* Start the cpu */
/* Start the CPU */
result = lapic_start_cpu(apicid);
if (result) {
result = 0;
/* Wait 1s or until the new cpu calls in */
/* Wait 1s or until the new CPU calls in */
for(count = 0; count < 100000 ; count++) {
if (secondary_stack == 0) {
result = 1;
@@ -542,23 +542,23 @@ void initialize_cpus(struct bus *cpu_bus)
struct device_path cpu_path;
struct cpu_info *info;
/* Find the info struct for this cpu */
/* Find the info struct for this CPU */
info = cpu_info();
#if NEED_LAPIC == 1
/* Ensure the local apic is enabled */
enable_lapic();
/* Get the device path of the boot cpu */
/* Get the device path of the boot CPU */
cpu_path.type = DEVICE_PATH_APIC;
cpu_path.apic.apic_id = lapicid();
#else
/* Get the device path of the boot cpu */
/* Get the device path of the boot CPU */
cpu_path.type = DEVICE_PATH_CPU;
cpu_path.cpu.id = 0;
#endif
/* Find the device structure for the boot cpu */
/* Find the device structure for the boot CPU */
info->cpu = alloc_find_dev(cpu_bus, &cpu_path);
#if CONFIG_SMP && CONFIG_MAX_CPUS > 1

View File

@@ -375,7 +375,7 @@ static int allocate_cpu_devices(struct bus *cpu_bus, struct mp_params *p)
struct device *new;
int apic_id;
/* Build the cpu device path */
/* Build the CPU device path */
cpu_path.type = DEVICE_PATH_APIC;
/* Assuming linear APIC space allocation. */
@@ -385,10 +385,10 @@ static int allocate_cpu_devices(struct bus *cpu_bus, struct mp_params *p)
}
cpu_path.apic.apic_id = apic_id;
/* Allocate the new cpu device structure */
/* Allocate the new CPU device structure */
new = alloc_find_dev(cpu_bus, &cpu_path);
if (new == NULL) {
printk(BIOS_CRIT, "Could not allocate cpu device\n");
printk(BIOS_CRIT, "Could not allocate CPU device\n");
max_cpus--;
}
cpus[i].dev = new;
@@ -577,7 +577,7 @@ static void init_bsp(struct bus *cpu_bus)
*
* The MP initialization has the following properties:
* 1. APs are brought up in parallel.
* 2. The ordering of coreboot cpu number and APIC ids is not deterministic.
* 2. The ordering of coreboot CPU number and APIC ids is not deterministic.
* Therefore, one cannot rely on this property or the order of devices in
* the device tree unless the chipset or mainboard know the APIC ids
* a priori.
@@ -641,7 +641,7 @@ static void mp_initialize_cpu(void)
cpu_initialize(info->index);
}
/* Returns apic id for coreboot cpu number or < 0 on failure. */
/* Returns apic id for coreboot CPU number or < 0 on failure. */
static int mp_get_apic_id(int cpu_slot)
{
if (cpu_slot >= CONFIG_MAX_CPUS || cpu_slot < 0)
@@ -843,7 +843,7 @@ static struct mp_flight_record mp_steps[] = {
MP_FR_BLOCK_APS(NULL, load_smm_handlers),
/* Perform SMM relocation. */
MP_FR_NOBLOCK_APS(trigger_smm_relocation, trigger_smm_relocation),
/* Initialize each cpu through the driver framework. */
/* Initialize each CPU through the driver framework. */
MP_FR_BLOCK_APS(mp_initialize_cpu, mp_initialize_cpu),
/* Wait for APs to finish everything else then let them park. */
MP_FR_BLOCK_APS(NULL, NULL),

View File

@@ -93,7 +93,7 @@ _start:
mov idt_ptr, %ebx
lidt (%ebx)
/* Obtain cpu number. */
/* Obtain CPU number. */
movl ap_count, %eax
1:
movl %eax, %ecx
@@ -107,7 +107,7 @@ _start:
movl stack_top, %edx
subl %eax, %edx
mov %edx, %esp
/* Save cpu number. */
/* Save CPU number. */
mov %ecx, %esi
/* Determine if one should check microcode versions. */

View File

@@ -48,7 +48,7 @@ extern unsigned char _binary_smmstub_start[];
/* This is the SMM handler that the stub calls. It is encoded as an rmodule. */
extern unsigned char _binary_smm_start[];
/* Per cpu minimum stack size. */
/* Per CPU minimum stack size. */
#define SMM_MINIMUM_STACK_SIZE 32
/*
@@ -75,7 +75,7 @@ static void smm_place_jmp_instructions(void *entry_start, int stride, int num,
struct smm_entry_ins entry = { .jmp_rel = 0xe9 };
/* Each entry point has an IP value of 0x8000. The SMBASE for each
* cpu is different so the effective address of the entry instruction
* CPU is different so the effective address of the entry instruction
* is different. Therefore, the relative displacement for each entry
* instruction needs to be updated to reflect the current effective
* IP. Additionally, the IP result from the jmp instruction is
@@ -126,7 +126,7 @@ static void *smm_stub_place_stacks(char *base, int size,
}
/* Place the staggered entry points for each CPU. The entry points are
* staggered by the per cpu SMM save state size extending down from
* staggered by the per CPU SMM save state size extending down from
* SMM_ENTRY_OFFSET. */
static void smm_stub_place_staggered_entry_points(char *base,
const struct smm_loader_params *params, const struct rmodule *smm_stub)
@@ -260,7 +260,7 @@ static int smm_module_setup_stub(void *smbase, struct smm_loader_params *params)
stub_params->runtime.smbase = (u32)smbase;
stub_params->runtime.save_state_size = params->per_cpu_save_state_size;
/* Initialize the APIC id to cpu number table to be 1:1 */
/* Initialize the APIC id to CPU number table to be 1:1 */
for (i = 0; i < params->num_concurrent_stacks; i++)
stub_params->runtime.apic_id_to_cpu[i] = i;

View File

@@ -40,15 +40,15 @@ smbase:
.long 0
save_state_size:
.long 0
/* apic_to_cpu_num is a table mapping the default APIC id to cpu num. If the
* APIC id is found at the given index, the contiguous cpu number is index
/* apic_to_cpu_num is a table mapping the default APIC id to CPU num. If the
* APIC id is found at the given index, the contiguous CPU number is index
* into the table. */
apic_to_cpu_num:
.fill CONFIG_MAX_CPUS,1,0xff
/* end struct smm_runtime */
.data
/* Provide fallback stack to use when a valid cpu number cannot be found. */
/* Provide fallback stack to use when a valid CPU number cannot be found. */
fallback_stack_bottom:
.skip 128
fallback_stack_top:
@@ -112,7 +112,7 @@ smm_trampoline32:
inc %ecx
cmp $CONFIG_MAX_CPUS, %ecx
jne 1b
/* This is bad. One cannot find a stack entry because a cpu num could
/* This is bad. One cannot find a stack entry because a CPU num could
* not be assigned. Use the fallback stack and check this condition in
* C handler. */
movl $(fallback_stack_top), %esp

View File

@@ -89,7 +89,7 @@
* 0xa0000-0xa0400 and the stub plus stack would need to go
* at 0xa8000-0xa8100 (example for core 0). That is not enough.
*
* This means we're basically limited to 16 cpu cores before
* This means we're basically limited to 16 CPU cores before
* we need to move the SMM handler to TSEG.
*
* Note: Some versions of Pentium M need their SMBASE aligned to 32k.
@@ -167,7 +167,7 @@ smm_relocate:
outb %al, %dx
movb $'-', %al
outb %al, %dx
/* calculate ascii of cpu number. More than 9 cores? -> FIXME */
/* calculate ascii of CPU number. More than 9 cores? -> FIXME */
movb %cl, %al
addb $'0', %al
outb %al, %dx