- First pass at s2880 support.

- SMP cleanups (remove SMP only use CONFIG_SMP)
- Minor tweaks to romcc to keep it from taking forever compiling
- failover fixes
- Get a good implementation of k8_cpufixup and sizeram for the opteron


git-svn-id: svn://svn.coreboot.org/coreboot/trunk@998 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
This commit is contained in:
Eric Biederman
2003-07-21 20:13:45 +00:00
parent 6d4512cdf9
commit 2c018fba95
33 changed files with 1512 additions and 268 deletions

View File

@@ -0,0 +1,69 @@
#ifndef ARCH_SMP_ATOMIC_H
#define ARCH_SMP_ATOMIC_H
/*
* Make sure gcc doesn't try to be clever and move things around
* on us. We need to use _exactly_ the address the user gave us,
* not some alias that contains the same information.
*/
typedef struct { volatile int counter; } atomic_t;
#define ATOMIC_INIT(i) { (i) }
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*/
/**
* atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
#define atomic_read(v) ((v)->counter)
/**
* atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
#define atomic_set(v,i) (((v)->counter) = (i))
/**
* atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
static __inline__ void atomic_inc(atomic_t *v)
{
__asm__ __volatile__(
"lock ; incl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1. Note that the guaranteed
* useful range of an atomic_t is only 24 bits.
*/
static __inline__ void atomic_dec(atomic_t *v)
{
__asm__ __volatile__(
"lock ; decl %0"
:"=m" (v->counter)
:"m" (v->counter));
}
#endif /* ARCH_SMP_ATOMIC_H */

View File

@@ -0,0 +1,57 @@
#ifndef ARCH_SMP_SPINLOCK_H
#define ARCH_SMP_SPINLOCK_H
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
*/
typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* We make no fairness assumptions. They have a cost.
*/
#define barrier() __asm__ __volatile__("": : :"memory")
#define spin_is_locked(x) (*(volatile char *)(&(x)->lock) <= 0)
#define spin_unlock_wait(x) do { barrier(); } while(spin_is_locked(x))
#define spin_lock_string \
"\n1:\t" \
"lock ; decb %0\n\t" \
"js 2f\n" \
".section .text.lock,\"ax\"\n" \
"2:\t" \
"cmpb $0,%0\n\t" \
"rep;nop\n\t" \
"jle 2b\n\t" \
"jmp 1b\n" \
".previous"
/*
* This works. Despite all the confusion.
*/
#define spin_unlock_string \
"movb $1,%0"
static inline void spin_lock(spinlock_t *lock)
{
__asm__ __volatile__(
spin_lock_string
:"=m" (lock->lock) : : "memory");
}
static inline void spin_unlock(spinlock_t *lock)
{
__asm__ __volatile__(
spin_unlock_string
:"=m" (lock->lock) : : "memory");
}
#endif /* ARCH_SMP_SPINLOCK_H */

View File

@@ -1,6 +1,6 @@
#include <arch/asm.h>
#include <arch/intel.h>
#ifdef SMP
#ifdef CONFIG_SMP
#include <cpu/p6/apic.h>
#endif
.section ".text"
@@ -39,7 +39,7 @@ _start:
/* set new stack */
movl $_estack, %esp
#ifdef SMP
#ifdef CONFIG_SMP
/* Get the cpu id */
movl $APIC_DEFAULT_BASE, %edi
movl APIC_ID(%edi), %eax