cpu/x86/mp: pass pointers to structures for AP callbacks
In order to extend the MP callback infrastructure prepare for easier changes by making the AP callback get signalled by a single pointer to a local variable on the signaller's stack. When the APs see the callback they will copy the structure to a local variable and then set the acknowledgement by clearing out the slot. The reading and writing to the slots were implemented using inline assembly which forces a memory access and a compiler barrier. BUG=b:74436746 Change-Id: Ia46133a49c03ce3ce0e73ae3d30547316c7ec43c Signed-off-by: Aaron Durbin <adurbin@chromium.org> Reviewed-on: https://review.coreboot.org/26043 Reviewed-by: Subrata Banik <subrata.banik@intel.com> Reviewed-by: Nico Huber <nico.h@gmx.de> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
parent
e6cc21e262
commit
223fb436fe
@ -40,7 +40,9 @@
|
|||||||
|
|
||||||
#define MAX_APIC_IDS 256
|
#define MAX_APIC_IDS 256
|
||||||
|
|
||||||
typedef void (*mp_callback_t)(void);
|
struct mp_callback {
|
||||||
|
void (*func)(void);
|
||||||
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A mp_flight_record details a sequence of calls for the APs to perform
|
* A mp_flight_record details a sequence of calls for the APs to perform
|
||||||
@ -58,8 +60,8 @@ typedef void (*mp_callback_t)(void);
|
|||||||
struct mp_flight_record {
|
struct mp_flight_record {
|
||||||
atomic_t barrier;
|
atomic_t barrier;
|
||||||
atomic_t cpus_entered;
|
atomic_t cpus_entered;
|
||||||
mp_callback_t ap_call;
|
void (*ap_call)(void);
|
||||||
mp_callback_t bsp_call;
|
void (*bsp_call)(void);
|
||||||
} __aligned(CACHELINE_SIZE);
|
} __aligned(CACHELINE_SIZE);
|
||||||
|
|
||||||
#define _MP_FLIGHT_RECORD(barrier_, ap_func_, bsp_func_) \
|
#define _MP_FLIGHT_RECORD(barrier_, ap_func_, bsp_func_) \
|
||||||
@ -851,19 +853,30 @@ static void trigger_smm_relocation(void)
|
|||||||
mp_state.ops.per_cpu_smm_trigger();
|
mp_state.ops.per_cpu_smm_trigger();
|
||||||
}
|
}
|
||||||
|
|
||||||
static mp_callback_t ap_callbacks[CONFIG_MAX_CPUS];
|
static struct mp_callback *ap_callbacks[CONFIG_MAX_CPUS];
|
||||||
|
|
||||||
static mp_callback_t read_callback(mp_callback_t *slot)
|
static struct mp_callback *read_callback(struct mp_callback **slot)
|
||||||
{
|
{
|
||||||
return *(volatile mp_callback_t *)slot;
|
struct mp_callback *ret;
|
||||||
|
|
||||||
|
asm volatile ("mov %1, %0\n"
|
||||||
|
: "=r" (ret)
|
||||||
|
: "m" (*slot)
|
||||||
|
: "memory"
|
||||||
|
);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void store_callback(mp_callback_t *slot, mp_callback_t value)
|
static void store_callback(struct mp_callback **slot, struct mp_callback *val)
|
||||||
{
|
{
|
||||||
*(volatile mp_callback_t *)slot = value;
|
asm volatile ("mov %1, %0\n"
|
||||||
|
: "=m" (*slot)
|
||||||
|
: "r" (val)
|
||||||
|
: "memory"
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int run_ap_work(mp_callback_t func, long expire_us)
|
static int run_ap_work(struct mp_callback *val, long expire_us)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
int cpus_accepted;
|
int cpus_accepted;
|
||||||
@ -879,7 +892,7 @@ static int run_ap_work(mp_callback_t func, long expire_us)
|
|||||||
for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
|
for (i = 0; i < ARRAY_SIZE(ap_callbacks); i++) {
|
||||||
if (cur_cpu == i)
|
if (cur_cpu == i)
|
||||||
continue;
|
continue;
|
||||||
store_callback(&ap_callbacks[i], func);
|
store_callback(&ap_callbacks[i], val);
|
||||||
}
|
}
|
||||||
mfence();
|
mfence();
|
||||||
|
|
||||||
@ -908,28 +921,34 @@ static int run_ap_work(mp_callback_t func, long expire_us)
|
|||||||
|
|
||||||
static void ap_wait_for_instruction(void)
|
static void ap_wait_for_instruction(void)
|
||||||
{
|
{
|
||||||
int cur_cpu = cpu_index();
|
struct mp_callback lcb;
|
||||||
|
struct mp_callback **per_cpu_slot;
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK))
|
if (!IS_ENABLED(CONFIG_PARALLEL_MP_AP_WORK))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
while (1) {
|
per_cpu_slot = &ap_callbacks[cpu_index()];
|
||||||
mp_callback_t func = read_callback(&ap_callbacks[cur_cpu]);
|
|
||||||
|
|
||||||
if (func == NULL) {
|
while (1) {
|
||||||
|
struct mp_callback *cb = read_callback(per_cpu_slot);
|
||||||
|
|
||||||
|
if (cb == NULL) {
|
||||||
asm ("pause");
|
asm ("pause");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
store_callback(&ap_callbacks[cur_cpu], NULL);
|
/* Copy to local variable before signalling consumption. */
|
||||||
|
memcpy(&lcb, cb, sizeof(lcb));
|
||||||
mfence();
|
mfence();
|
||||||
func();
|
store_callback(per_cpu_slot, NULL);
|
||||||
|
lcb.func();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int mp_run_on_aps(void (*func)(void), long expire_us)
|
int mp_run_on_aps(void (*func)(void), long expire_us)
|
||||||
{
|
{
|
||||||
return run_ap_work(func, expire_us);
|
struct mp_callback lcb = { .func = func };
|
||||||
|
return run_ap_work(&lcb, expire_us);
|
||||||
}
|
}
|
||||||
|
|
||||||
int mp_run_on_all_cpus(void (*func)(void), long expire_us)
|
int mp_run_on_all_cpus(void (*func)(void), long expire_us)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user