Go back to SIPI WAIT state for those CPUS defining the newly introduced
CONFIG_AP_IN_SIPI_WAIT flag. Newer Intel CPUs need this to operate with multiple cores. Signed-off-by: Stefan Reinauer <stepan@coresystems.de> Acked-by: Peter Stuge <peter@stuge.se> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@3465 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
committed by
Stefan Reinauer
parent
ab8bb8b061
commit
685240610b
@ -51,6 +51,11 @@ static inline __attribute__((always_inline)) unsigned long lapicid(void)
|
||||
return lapic_read(LAPIC_ID) >> 24;
|
||||
}
|
||||
|
||||
|
||||
#if CONFIG_AP_IN_SIPI_WAIT != 1
|
||||
/* If we need to go back to sipi wait, we use the long non-inlined version of
|
||||
* this function in lapic_cpu_init.c
|
||||
*/
|
||||
static inline __attribute__((always_inline)) void stop_this_cpu(void)
|
||||
{
|
||||
|
||||
@ -59,6 +64,7 @@ static inline __attribute__((always_inline)) void stop_this_cpu(void)
|
||||
hlt();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#if ! defined (__ROMCC__)
|
||||
|
||||
@ -98,7 +104,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
|
||||
}
|
||||
|
||||
|
||||
extern inline void lapic_write_atomic(unsigned long reg, unsigned long v)
|
||||
static inline void lapic_write_atomic(unsigned long reg, unsigned long v)
|
||||
{
|
||||
xchg((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg), v);
|
||||
}
|
||||
|
Reference in New Issue
Block a user