|
@@ -105,7 +105,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
|
|
|
{
|
|
|
unsigned __percpu *pcpu_count;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
+ rcu_read_lock_sched();
|
|
|
|
|
|
pcpu_count = ACCESS_ONCE(ref->pcpu_count);
|
|
|
|
|
@@ -114,7 +114,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
|
|
|
else
|
|
|
atomic_inc(&ref->count);
|
|
|
|
|
|
- rcu_read_unlock();
|
|
|
+ rcu_read_unlock_sched();
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -134,7 +134,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
|
|
|
unsigned __percpu *pcpu_count;
|
|
|
int ret = false;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
+ rcu_read_lock_sched();
|
|
|
|
|
|
pcpu_count = ACCESS_ONCE(ref->pcpu_count);
|
|
|
|
|
@@ -143,7 +143,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref)
|
|
|
ret = true;
|
|
|
}
|
|
|
|
|
|
- rcu_read_unlock();
|
|
|
+ rcu_read_unlock_sched();
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -159,7 +159,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
|
|
|
{
|
|
|
unsigned __percpu *pcpu_count;
|
|
|
|
|
|
- rcu_read_lock();
|
|
|
+ rcu_read_lock_sched();
|
|
|
|
|
|
pcpu_count = ACCESS_ONCE(ref->pcpu_count);
|
|
|
|
|
@@ -168,7 +168,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref)
|
|
|
else if (unlikely(atomic_dec_and_test(&ref->count)))
|
|
|
ref->release(ref);
|
|
|
|
|
|
- rcu_read_unlock();
|
|
|
+ rcu_read_unlock_sched();
|
|
|
}
|
|
|
|
|
|
#endif
|