cpu/x86/smm_module_loader: Drop superfluous checks
Checking if the stack encroaches on the entry points is done in other parts of the code. Change-Id: I275d5dda9c69cc89608450ae27dd5dbd581e3595 Signed-off-by: Arthur Heymans <arthur@aheymans.xyz> Reviewed-on: https://review.coreboot.org/c/coreboot/+/63480 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Kyösti Mälkki <kyosti.malkki@gmail.com>
This commit is contained in:
		
				
					committed by
					
						
						Paul Fagerburg
					
				
			
			
				
	
			
			
			
						parent
						
							3b9f715183
						
					
				
				
					commit
					346db92f8c
				
			@@ -169,42 +169,15 @@ u32 smm_get_cpu_smbase(unsigned int cpu_num)
 | 
				
			|||||||
 * important to enter protected mode before the jump because the "jump to
 | 
					 * important to enter protected mode before the jump because the "jump to
 | 
				
			||||||
 * address" might be larger than the 20bit address supported by real mode.
 | 
					 * address" might be larger than the 20bit address supported by real mode.
 | 
				
			||||||
 * SMI entry right now is in real mode.
 | 
					 * SMI entry right now is in real mode.
 | 
				
			||||||
 * input: smbase - this is the smbase of the first cpu not the smbase
 | 
					 | 
				
			||||||
 *        where tseg starts (aka smram_start). All CPUs code segment
 | 
					 | 
				
			||||||
 *        and stack will be below this point except for the common
 | 
					 | 
				
			||||||
 *        SMI handler which is one segment above
 | 
					 | 
				
			||||||
 * input: num_cpus - number of cpus that need relocation including
 | 
					 * input: num_cpus - number of cpus that need relocation including
 | 
				
			||||||
 *        the first CPU (though its code is already loaded)
 | 
					 *        the first CPU (though its code is already loaded)
 | 
				
			||||||
 * input: top of stack (stacks work downward by default in Intel HW)
 | 
					 | 
				
			||||||
 * output: return -1, if runtime smi code could not be installed. In
 | 
					 | 
				
			||||||
 *         this case SMM will not work and any SMI's generated will
 | 
					 | 
				
			||||||
 *         cause a CPU shutdown or general protection fault because
 | 
					 | 
				
			||||||
 *         the appropriate smi handling code was not installed
 | 
					 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static int smm_place_entry_code(uintptr_t smbase, unsigned int num_cpus,
 | 
					static void smm_place_entry_code(const unsigned int num_cpus)
 | 
				
			||||||
				uintptr_t stack_top, const struct smm_loader_params *params)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	unsigned int i;
 | 
						unsigned int i;
 | 
				
			||||||
	unsigned int size;
 | 
						unsigned int size;
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	/*
 | 
					 | 
				
			||||||
	 * Ensure there was enough space and the last CPUs smbase
 | 
					 | 
				
			||||||
	 * did not encroach upon the stack. Stack top is smram start
 | 
					 | 
				
			||||||
	 * + size of stack.
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (cpus[num_cpus].active) {
 | 
					 | 
				
			||||||
		if (cpus[num_cpus - 1].smbase + SMM_ENTRY_OFFSET < stack_top) {
 | 
					 | 
				
			||||||
			printk(BIOS_ERR, "%s: stack encroachment\n", __func__);
 | 
					 | 
				
			||||||
				printk(BIOS_ERR, "%s: smbase %lx, stack_top %lx\n",
 | 
					 | 
				
			||||||
				       __func__, cpus[num_cpus].smbase, stack_top);
 | 
					 | 
				
			||||||
				return 0;
 | 
					 | 
				
			||||||
		}
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	printk(BIOS_INFO, "%s: smbase %lx, stack_top %lx\n",
 | 
					 | 
				
			||||||
		__func__, cpus[num_cpus-1].smbase, stack_top);
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
	/* start at 1, the first CPU stub code is already there */
 | 
						/* start at 1, the first CPU stub code is already there */
 | 
				
			||||||
	size = cpus[0].code_end - cpus[0].code_start;
 | 
						size = cpus[0].code_end - cpus[0].code_start;
 | 
				
			||||||
	for (i = 1; i < num_cpus; i++) {
 | 
						for (i = 1; i < num_cpus; i++) {
 | 
				
			||||||
@@ -215,7 +188,6 @@ static int smm_place_entry_code(uintptr_t smbase, unsigned int num_cpus,
 | 
				
			|||||||
		printk(BIOS_DEBUG, "%s: copying from %lx to %lx 0x%x bytes\n",
 | 
							printk(BIOS_DEBUG, "%s: copying from %lx to %lx 0x%x bytes\n",
 | 
				
			||||||
			__func__, cpus[0].code_start, cpus[i].code_start, size);
 | 
								__func__, cpus[0].code_start, cpus[i].code_start, size);
 | 
				
			||||||
	}
 | 
						}
 | 
				
			||||||
	return 1;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
static uintptr_t stack_top;
 | 
					static uintptr_t stack_top;
 | 
				
			||||||
@@ -245,21 +217,10 @@ int smm_setup_stack(const uintptr_t perm_smbase, const size_t perm_smram_size,
 | 
				
			|||||||
 * staggered by the per CPU SMM save state size extending down from
 | 
					 * staggered by the per CPU SMM save state size extending down from
 | 
				
			||||||
 * SMM_ENTRY_OFFSET.
 | 
					 * SMM_ENTRY_OFFSET.
 | 
				
			||||||
 */
 | 
					 */
 | 
				
			||||||
static int smm_stub_place_staggered_entry_points(char *base,
 | 
					static void smm_stub_place_staggered_entry_points(const struct smm_loader_params *params)
 | 
				
			||||||
	const struct smm_loader_params *params, const struct rmodule *smm_stub)
 | 
					 | 
				
			||||||
{
 | 
					{
 | 
				
			||||||
	size_t stub_entry_offset;
 | 
						if (params->num_concurrent_save_states > 1)
 | 
				
			||||||
	int rc = 1;
 | 
							smm_place_entry_code(params->num_concurrent_save_states);
 | 
				
			||||||
	stub_entry_offset = rmodule_entry_offset(smm_stub);
 | 
					 | 
				
			||||||
	/* Each CPU now has its own stub code, which enters protected mode,
 | 
					 | 
				
			||||||
	 * sets up the stack, and then jumps to common SMI handler
 | 
					 | 
				
			||||||
	 */
 | 
					 | 
				
			||||||
	if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
 | 
					 | 
				
			||||||
		rc = smm_place_entry_code((uintptr_t)base,
 | 
					 | 
				
			||||||
					  params->num_concurrent_save_states,
 | 
					 | 
				
			||||||
					  stack_top, params);
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
	return rc;
 | 
					 | 
				
			||||||
}
 | 
					}
 | 
				
			||||||
 | 
					
 | 
				
			||||||
/*
 | 
					/*
 | 
				
			||||||
@@ -328,10 +289,7 @@ static int smm_module_setup_stub(const uintptr_t smbase, const size_t smm_size,
 | 
				
			|||||||
	       stub_params->start32_offset);
 | 
						       stub_params->start32_offset);
 | 
				
			||||||
	printk(BIOS_DEBUG, "%s: runtime.smm_size = 0x%zx\n", __func__, smm_size);
 | 
						printk(BIOS_DEBUG, "%s: runtime.smm_size = 0x%zx\n", __func__, smm_size);
 | 
				
			||||||
 | 
					
 | 
				
			||||||
	if (!smm_stub_place_staggered_entry_points((void *)smbase, params, &smm_stub)) {
 | 
						smm_stub_place_staggered_entry_points(params);
 | 
				
			||||||
		printk(BIOS_ERR, "%s: staggered entry points failed\n", __func__);
 | 
					 | 
				
			||||||
		return -1;
 | 
					 | 
				
			||||||
	}
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
	printk(BIOS_DEBUG, "SMM Module: stub loaded at %lx. Will call %p\n", smm_stub_loc,
 | 
						printk(BIOS_DEBUG, "SMM Module: stub loaded at %lx. Will call %p\n", smm_stub_loc,
 | 
				
			||||||
	       params->handler);
 | 
						       params->handler);
 | 
				
			||||||
 
 | 
				
			|||||||
		Reference in New Issue
	
	Block a user