Following patch reworks car_disable into C. Tested, works here. I compared
also the GCC generated code and it looks all right. Please test on some multicore CPU. I added the "memory" clobber to read_cr0 / write_cr0 function as it is in Linux Kernel. Seems that if this is missing, GCC is too smart and messes the order of reads/writes to CR0 (not tested if really a problem here, but be safe for future users of this function ;) Signed-off-by: Rudolf Marek <r.marek@assembler.cz> Acked-by: Stefan Reinauer <stepan@coresystems.de> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5562 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
@ -20,16 +20,19 @@
|
||||
#ifndef CPU_X86_CACHE
|
||||
#define CPU_X86_CACHE
|
||||
|
||||
/* the memory clobber prevents the GCC from reordering the read/write order
|
||||
of CR0 */
|
||||
|
||||
static inline unsigned long read_cr0(void)
|
||||
{
|
||||
unsigned long cr0;
|
||||
asm volatile ("movl %%cr0, %0" : "=r" (cr0));
|
||||
asm volatile ("movl %%cr0, %0" : "=r" (cr0) :: "memory");
|
||||
return cr0;
|
||||
}
|
||||
|
||||
static inline void write_cr0(unsigned long cr0)
|
||||
{
|
||||
asm volatile ("movl %0, %%cr0" : : "r" (cr0));
|
||||
asm volatile ("movl %0, %%cr0" : : "r" (cr0) : "memory");
|
||||
}
|
||||
|
||||
static inline void invd(void)
|
||||
@ -39,7 +42,7 @@ static inline void invd(void)
|
||||
|
||||
static inline void wbinvd(void)
|
||||
{
|
||||
asm volatile ("wbinvd");
|
||||
asm volatile ("wbinvd" ::: "memory");
|
||||
}
|
||||
|
||||
static inline void enable_cache(void)
|
||||
|
Reference in New Issue
Block a user