Archived
14
0
Fork 0

include/asm-x86/sync_bitops.h: checkpatch cleanups - formatting only

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Joe Perches 2008-03-23 01:03:38 -07:00 committed by Ingo Molnar
parent a4c2d7d928
commit 26b7fcc4bd

View file

@ -13,7 +13,7 @@
* bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1).
*/
#define ADDR (*(volatile long *) addr)
#define ADDR (*(volatile long *)addr)
/**
* sync_set_bit - Atomically set a bit in memory
@ -26,12 +26,12 @@
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void sync_set_bit(int nr, volatile unsigned long * addr)
static inline void sync_set_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__("lock; btsl %1,%0"
:"+m" (ADDR)
:"Ir" (nr)
: "memory");
asm volatile("lock; btsl %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
}
/**
@ -44,12 +44,12 @@ static inline void sync_set_bit(int nr, volatile unsigned long * addr)
* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
* in order to ensure changes are visible on other processors.
*/
static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
static inline void sync_clear_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__("lock; btrl %1,%0"
:"+m" (ADDR)
:"Ir" (nr)
: "memory");
asm volatile("lock; btrl %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
}
/**
@ -61,12 +61,12 @@ static inline void sync_clear_bit(int nr, volatile unsigned long * addr)
* Note that @nr may be almost arbitrarily large; this function is not
* restricted to acting on a single-word quantity.
*/
static inline void sync_change_bit(int nr, volatile unsigned long * addr)
static inline void sync_change_bit(int nr, volatile unsigned long *addr)
{
__asm__ __volatile__("lock; btcl %1,%0"
:"+m" (ADDR)
:"Ir" (nr)
: "memory");
asm volatile("lock; btcl %1,%0"
: "+m" (ADDR)
: "Ir" (nr)
: "memory");
}
/**
@ -77,13 +77,13 @@ static inline void sync_change_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
static inline int sync_test_and_set_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
__asm__ __volatile__("lock; btsl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR)
:"Ir" (nr) : "memory");
asm volatile("lock; btsl %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
}
@ -95,13 +95,13 @@ static inline int sync_test_and_set_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
static inline int sync_test_and_clear_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
__asm__ __volatile__("lock; btrl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR)
:"Ir" (nr) : "memory");
asm volatile("lock; btrl %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
}
@ -113,13 +113,13 @@ static inline int sync_test_and_clear_bit(int nr, volatile unsigned long * addr)
* This operation is atomic and cannot be reordered.
* It also implies a memory barrier.
*/
static inline int sync_test_and_change_bit(int nr, volatile unsigned long* addr)
static inline int sync_test_and_change_bit(int nr, volatile unsigned long *addr)
{
int oldbit;
__asm__ __volatile__("lock; btcl %2,%1\n\tsbbl %0,%0"
:"=r" (oldbit),"+m" (ADDR)
:"Ir" (nr) : "memory");
asm volatile("lock; btcl %2,%1\n\tsbbl %0,%0"
: "=r" (oldbit), "+m" (ADDR)
: "Ir" (nr) : "memory");
return oldbit;
}