dect
/
linux-2.6
Archived
13
0
Fork 0

[IA64] Squeeze ticket locks back into 4 bytes.

Linus pointed out that other people have spent large amounts of time
and effort to optimize the layout of frequently used structures. Often
these have embedded locks, and the assumption is that a lock takes
4 bytes.  Linus also pointed out how to work with the limited options
for atomic instructions on Itanium.

Signed-off-by: Tony Luck <tony.luck@intel.com>
This commit is contained in:
Tony Luck 2009-10-07 10:54:19 -07:00
parent 0eca52a927
commit 9d40ee200a
2 changed files with 27 additions and 20 deletions

View File

@ -25,61 +25,68 @@
* by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head
* becomes equal to the the initial value of the tail.
* The pad bits in the middle are used to prevent the next_ticket number
* overflowing into the now_serving number.
*
* 63 32 31 0
* 31 17 16 15 14 0
* +----------------------------------------------------+
* | next_ticket_number | now_serving |
* | now_serving | padding | next_ticket |
* +----------------------------------------------------+
*/
#define TICKET_SHIFT 32
#define TICKET_SHIFT 17
#define TICKET_BITS 15
#define TICKET_MASK ((1 << TICKET_BITS) - 1)
static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
{
int *p = (int *)&lock->lock, turn, now_serving;
int *p = (int *)&lock->lock, ticket, serve;
now_serving = *p;
turn = ia64_fetchadd(1, p+1, acq);
ticket = ia64_fetchadd(1, p, acq);
if (turn == now_serving)
if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return;
do {
ia64_invala();
for (;;) {
asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return;
cpu_relax();
} while (ACCESS_ONCE(*p) != turn);
}
}
static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock), try;
int tmp = ACCESS_ONCE(lock->lock);
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
try = tmp + (1L << TICKET_SHIFT);
return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp;
}
if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
return 0;
}
static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
{
int *p = (int *)&lock->lock;
unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
(void)ia64_fetchadd(1, p, rel);
asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1));
return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1;
return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
static inline int __raw_spin_is_locked(raw_spinlock_t *lock)

View File

@ -6,7 +6,7 @@
#endif
typedef struct {
volatile unsigned long lock;
volatile unsigned int lock;
} raw_spinlock_t;
#define __RAW_SPIN_LOCK_UNLOCKED { 0 }