arch/x86: Move cpu_relax()
It's not related to spinlocks and the actual implementation was also guarded by CONFIG(SMP). With a single call-site in x86-specific code, empty stubs for other arch are currently not necessary. Also drop an unused included on a nearby line. Change-Id: I00439e9c1d10c943ab5e404f5d687d316768fa16 Signed-off-by: Kyösti Mälkki <kyosti.malkki@gmail.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/43808 Reviewed-by: Angel Pons <th3fanbus@gmail.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
This commit is contained in:
		
				
					committed by
					
						 Patrick Georgi
						Patrick Georgi
					
				
			
			
				
	
			
			
			
						parent
						
							c731788929
						
					
				
				
					commit
					0199d3bd7f
				
			| @@ -9,7 +9,6 @@ | ||||
| #define spin_unlock_wait(lock)	do {} while (0) | ||||
| #define spin_lock(lock)		do {} while (0) | ||||
| #define spin_unlock(lock)	do {} while (0) | ||||
| #define cpu_relax()		do {} while (0) | ||||
|  | ||||
| #include <smp/node.h> | ||||
| #define boot_cpu() 1 | ||||
|   | ||||
| @@ -269,6 +269,12 @@ static inline void get_fms(struct cpuinfo_x86 *c, uint32_t tfms) | ||||
|  | ||||
| } | ||||
|  | ||||
| /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||||
| static __always_inline void cpu_relax(void) | ||||
| { | ||||
| 	__asm__ __volatile__("rep;nop" : : : "memory"); | ||||
| } | ||||
|  | ||||
| #define asmlinkage __attribute__((regparm(0))) | ||||
|  | ||||
| /* | ||||
|   | ||||
| @@ -62,12 +62,6 @@ static __always_inline void spin_unlock(spinlock_t *lock) | ||||
| 		: "=m" (lock->lock) : : "memory"); | ||||
| } | ||||
|  | ||||
| /* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */ | ||||
| static __always_inline void cpu_relax(void) | ||||
| { | ||||
| 	__asm__ __volatile__("rep;nop" : : : "memory"); | ||||
| } | ||||
|  | ||||
| #else | ||||
|  | ||||
| #define DECLARE_SPIN_LOCK(x) | ||||
| @@ -76,7 +70,6 @@ static __always_inline void cpu_relax(void) | ||||
| #define spin_unlock_wait(lock)	do {} while (0) | ||||
| #define spin_lock(lock)		do {} while (0) | ||||
| #define spin_unlock(lock)	do {} while (0) | ||||
| #define cpu_relax()		do {} while (0) | ||||
|  | ||||
| #endif | ||||
|  | ||||
|   | ||||
| @@ -1,8 +1,7 @@ | ||||
| /* SPDX-License-Identifier: GPL-2.0-only */ | ||||
|  | ||||
| #include <arch/cpu.h> | ||||
| #include <cpu/x86/tsc.h> | ||||
| #include <pc80/i8254.h> | ||||
| #include <smp/spinlock.h> | ||||
| #include <delay.h> | ||||
| #include <thread.h> | ||||
|  | ||||
|   | ||||
| @@ -11,7 +11,6 @@ | ||||
| #define spin_unlock_wait(lock)	do {} while (0) | ||||
| #define spin_lock(lock)		do {} while (0) | ||||
| #define spin_unlock(lock)	do {} while (0) | ||||
| #define cpu_relax()		do {} while (0) | ||||
| #endif | ||||
|  | ||||
| #endif /* SMP_SPINLOCK_H */ | ||||
|   | ||||
| @@ -9,7 +9,6 @@ | ||||
| #define spin_unlock_wait(lock)	do {} while (0) | ||||
| #define spin_lock(lock)		do {} while (0) | ||||
| #define spin_unlock(lock)	do {} while (0) | ||||
| #define cpu_relax()		do {} while (0) | ||||
|  | ||||
| #include <smp/node.h> | ||||
| #define boot_cpu() 1 | ||||
|   | ||||
		Reference in New Issue
	
	Block a user