percpu-rwsem.h 1.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889
  1. #ifndef _LINUX_PERCPU_RWSEM_H
  2. #define _LINUX_PERCPU_RWSEM_H
  3. #include <linux/mutex.h>
  4. #include <linux/percpu.h>
  5. #include <linux/rcupdate.h>
  6. #include <linux/delay.h>
  7. struct percpu_rw_semaphore {
  8. unsigned __percpu *counters;
  9. bool locked;
  10. struct mutex mtx;
  11. };
  12. static inline void percpu_down_read(struct percpu_rw_semaphore *p)
  13. {
  14. rcu_read_lock();
  15. if (unlikely(p->locked)) {
  16. rcu_read_unlock();
  17. mutex_lock(&p->mtx);
  18. this_cpu_inc(*p->counters);
  19. mutex_unlock(&p->mtx);
  20. return;
  21. }
  22. this_cpu_inc(*p->counters);
  23. rcu_read_unlock();
  24. }
  25. static inline void percpu_up_read(struct percpu_rw_semaphore *p)
  26. {
  27. /*
  28. * On X86, write operation in this_cpu_dec serves as a memory unlock
  29. * barrier (i.e. memory accesses may be moved before the write, but
  30. * no memory accesses are moved past the write).
  31. * On other architectures this may not be the case, so we need smp_mb()
  32. * there.
  33. */
  34. #if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
  35. barrier();
  36. #else
  37. smp_mb();
  38. #endif
  39. this_cpu_dec(*p->counters);
  40. }
  41. static inline unsigned __percpu_count(unsigned __percpu *counters)
  42. {
  43. unsigned total = 0;
  44. int cpu;
  45. for_each_possible_cpu(cpu)
  46. total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
  47. return total;
  48. }
  49. static inline void percpu_down_write(struct percpu_rw_semaphore *p)
  50. {
  51. mutex_lock(&p->mtx);
  52. p->locked = true;
  53. synchronize_rcu();
  54. while (__percpu_count(p->counters))
  55. msleep(1);
  56. smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
  57. }
  58. static inline void percpu_up_write(struct percpu_rw_semaphore *p)
  59. {
  60. p->locked = false;
  61. mutex_unlock(&p->mtx);
  62. }
  63. static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
  64. {
  65. p->counters = alloc_percpu(unsigned);
  66. if (unlikely(!p->counters))
  67. return -ENOMEM;
  68. p->locked = false;
  69. mutex_init(&p->mtx);
  70. return 0;
  71. }
  72. static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
  73. {
  74. free_percpu(p->counters);
  75. p->counters = NULL; /* catch use after free bugs */
  76. }
  77. #endif