|
@@ -41,6 +41,7 @@
|
|
|
#include <linux/list.h>
|
|
|
#include <linux/spinlock.h>
|
|
|
#include <linux/lockdep.h>
|
|
|
+#include <asm/asm.h>
|
|
|
|
|
|
struct rwsem_waiter;
|
|
|
|
|
@@ -55,17 +56,28 @@ extern asmregparm struct rw_semaphore *
|
|
|
|
|
|
/*
|
|
|
* the semaphore definition
|
|
|
+ *
|
|
|
+ * The bias values and the counter type limits the number of
|
|
|
+ * potential readers/writers to 32767 for 32 bits and 2147483647
|
|
|
+ * for 64 bits.
|
|
|
*/
|
|
|
|
|
|
-#define RWSEM_UNLOCKED_VALUE 0x00000000
|
|
|
-#define RWSEM_ACTIVE_BIAS 0x00000001
|
|
|
-#define RWSEM_ACTIVE_MASK 0x0000ffff
|
|
|
-#define RWSEM_WAITING_BIAS (-0x00010000)
|
|
|
+#ifdef CONFIG_X86_64
|
|
|
+# define RWSEM_ACTIVE_MASK 0xffffffffL
|
|
|
+#else
|
|
|
+# define RWSEM_ACTIVE_MASK 0x0000ffffL
|
|
|
+#endif
|
|
|
+
|
|
|
+#define RWSEM_UNLOCKED_VALUE 0x00000000L
|
|
|
+#define RWSEM_ACTIVE_BIAS 0x00000001L
|
|
|
+#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
|
|
|
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
|
|
|
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
|
|
|
|
|
|
+typedef signed long rwsem_count_t;
|
|
|
+
|
|
|
struct rw_semaphore {
|
|
|
- signed long count;
|
|
|
+ rwsem_count_t count;
|
|
|
spinlock_t wait_lock;
|
|
|
struct list_head wait_list;
|
|
|
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
|
@@ -105,7 +117,7 @@ do { \
|
|
|
static inline void __down_read(struct rw_semaphore *sem)
|
|
|
{
|
|
|
asm volatile("# beginning down_read\n\t"
|
|
|
- LOCK_PREFIX " inc%z0 (%1)\n\t"
|
|
|
+ LOCK_PREFIX _ASM_INC "(%1)\n\t"
|
|
|
/* adds 0x00000001, returns the old value */
|
|
|
" jns 1f\n"
|
|
|
" call call_rwsem_down_read_failed\n"
|
|
@@ -121,7 +133,7 @@ static inline void __down_read(struct rw_semaphore *sem)
|
|
|
*/
|
|
|
static inline int __down_read_trylock(struct rw_semaphore *sem)
|
|
|
{
|
|
|
- __s32 result, tmp;
|
|
|
+ rwsem_count_t result, tmp;
|
|
|
asm volatile("# beginning __down_read_trylock\n\t"
|
|
|
" mov %0,%1\n\t"
|
|
|
"1:\n\t"
|
|
@@ -143,7 +155,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
|
|
|
*/
|
|
|
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
|
|
|
{
|
|
|
- int tmp;
|
|
|
+ rwsem_count_t tmp;
|
|
|
|
|
|
tmp = RWSEM_ACTIVE_WRITE_BIAS;
|
|
|
asm volatile("# beginning down_write\n\t"
|
|
@@ -170,9 +182,9 @@ static inline void __down_write(struct rw_semaphore *sem)
|
|
|
*/
|
|
|
static inline int __down_write_trylock(struct rw_semaphore *sem)
|
|
|
{
|
|
|
- signed long ret = cmpxchg(&sem->count,
|
|
|
- RWSEM_UNLOCKED_VALUE,
|
|
|
- RWSEM_ACTIVE_WRITE_BIAS);
|
|
|
+ rwsem_count_t ret = cmpxchg(&sem->count,
|
|
|
+ RWSEM_UNLOCKED_VALUE,
|
|
|
+ RWSEM_ACTIVE_WRITE_BIAS);
|
|
|
if (ret == RWSEM_UNLOCKED_VALUE)
|
|
|
return 1;
|
|
|
return 0;
|
|
@@ -183,7 +195,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
|
|
|
*/
|
|
|
static inline void __up_read(struct rw_semaphore *sem)
|
|
|
{
|
|
|
- __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
|
|
|
+ rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
|
|
|
asm volatile("# beginning __up_read\n\t"
|
|
|
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
|
|
/* subtracts 1, returns the old value */
|
|
@@ -201,7 +213,7 @@ static inline void __up_read(struct rw_semaphore *sem)
|
|
|
*/
|
|
|
static inline void __up_write(struct rw_semaphore *sem)
|
|
|
{
|
|
|
- unsigned long tmp;
|
|
|
+ rwsem_count_t tmp;
|
|
|
asm volatile("# beginning __up_write\n\t"
|
|
|
LOCK_PREFIX " xadd %1,(%2)\n\t"
|
|
|
/* tries to transition
|
|
@@ -221,33 +233,38 @@ static inline void __up_write(struct rw_semaphore *sem)
|
|
|
static inline void __downgrade_write(struct rw_semaphore *sem)
|
|
|
{
|
|
|
asm volatile("# beginning __downgrade_write\n\t"
|
|
|
- LOCK_PREFIX " add%z0 %2,(%1)\n\t"
|
|
|
- /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
|
|
|
+ LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
|
|
|
+ /*
|
|
|
+ * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
|
|
|
+ * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
|
|
|
+ */
|
|
|
" jns 1f\n\t"
|
|
|
" call call_rwsem_downgrade_wake\n"
|
|
|
"1:\n\t"
|
|
|
"# ending __downgrade_write\n"
|
|
|
: "+m" (sem->count)
|
|
|
- : "a" (sem), "i" (-RWSEM_WAITING_BIAS)
|
|
|
+ : "a" (sem), "er" (-RWSEM_WAITING_BIAS)
|
|
|
: "memory", "cc");
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* implement atomic add functionality
|
|
|
*/
|
|
|
-static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
|
|
|
+static inline void rwsem_atomic_add(rwsem_count_t delta,
|
|
|
+ struct rw_semaphore *sem)
|
|
|
{
|
|
|
- asm volatile(LOCK_PREFIX "add%z0 %1,%0"
|
|
|
+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
|
|
|
: "+m" (sem->count)
|
|
|
- : "ir" (delta));
|
|
|
+ : "er" (delta));
|
|
|
}
|
|
|
|
|
|
/*
|
|
|
* implement exchange and add functionality
|
|
|
*/
|
|
|
-static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
|
|
|
+static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
|
|
|
+ struct rw_semaphore *sem)
|
|
|
{
|
|
|
- int tmp = delta;
|
|
|
+ rwsem_count_t tmp = delta;
|
|
|
|
|
|
asm volatile(LOCK_PREFIX "xadd %0,%1"
|
|
|
: "+r" (tmp), "+m" (sem->count)
|