cmos post: Guard with spinlock
The CMOS post code storage mechanism does back-to-back CMOS reads and writes that may be interleaved during CPU bringup, leading to corruption of the log or of other parts of CMOS. Change-Id: I704813cc917a659fe034b71c2ff9eb9b80f7c949 Signed-off-by: Duncan Laurie <dlaurie@chromium.org> Reviewed-on: https://gerrit.chromium.org/gerrit/58102 Reviewed-by: Aaron Durbin <adurbin@chromium.org> Reviewed-on: http://review.coreboot.org/4227 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
This commit is contained in:
committed by
Stefan Reinauer
parent
35bd3fedfe
commit
e807c34a5e
@@ -1,6 +1,8 @@
|
||||
#ifndef ARCH_SMP_SPINLOCK_H
|
||||
#define ARCH_SMP_SPINLOCK_H
|
||||
|
||||
#ifndef __PRE_RAM__
|
||||
|
||||
/*
|
||||
* Your basic SMP spinlocks, allowing only a single CPU anywhere
|
||||
*/
|
||||
@@ -61,4 +63,16 @@ static inline __attribute__((always_inline)) void cpu_relax(void)
|
||||
__asm__ __volatile__("rep;nop": : :"memory");
|
||||
}
|
||||
|
||||
#else /* !__PRE_RAM__ */
|
||||
|
||||
#define DECLARE_SPIN_LOCK(x)
|
||||
#define barrier() do {} while(0)
|
||||
#define spin_is_locked(lock) 0
|
||||
#define spin_unlock_wait(lock) do {} while(0)
|
||||
#define spin_lock(lock) do {} while(0)
|
||||
#define spin_unlock(lock) do {} while(0)
|
||||
#define cpu_relax() do {} while(0)
|
||||
|
||||
#endif /* !__PRE_RAM__ */
|
||||
|
||||
#endif /* ARCH_SMP_SPINLOCK_H */
|
||||
|
Reference in New Issue
Block a user