cmpxchg.h 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef __ASM_ARC_CMPXCHG_H
  9. #define __ASM_ARC_CMPXCHG_H
  10. #include <linux/types.h>
  11. #include <asm/smp.h>
  12. #ifdef CONFIG_ARC_HAS_LLSC
  13. static inline unsigned long
  14. __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
  15. {
  16. unsigned long prev;
  17. __asm__ __volatile__(
  18. "1: llock %0, [%1] \n"
  19. " brne %0, %2, 2f \n"
  20. " scond %3, [%1] \n"
  21. " bnz 1b \n"
  22. "2: \n"
  23. : "=&r"(prev)
  24. : "r"(ptr), "ir"(expected),
  25. "r"(new) /* can't be "ir". scond can't take limm for "b" */
  26. : "cc");
  27. return prev;
  28. }
  29. #else
  30. static inline unsigned long
  31. __cmpxchg(volatile void *ptr, unsigned long expected, unsigned long new)
  32. {
  33. unsigned long flags;
  34. int prev;
  35. volatile unsigned long *p = ptr;
  36. atomic_ops_lock(flags);
  37. prev = *p;
  38. if (prev == expected)
  39. *p = new;
  40. atomic_ops_unlock(flags);
  41. return prev;
  42. }
  43. #endif /* CONFIG_ARC_HAS_LLSC */
  44. #define cmpxchg(ptr, o, n) ((typeof(*(ptr)))__cmpxchg((ptr), \
  45. (unsigned long)(o), (unsigned long)(n)))
  46. /*
  47. * Since not supported natively, ARC cmpxchg() uses atomic_ops_lock (UP/SMP)
  48. * just to gaurantee semantics.
  49. * atomic_cmpxchg() needs to use the same locks as it's other atomic siblings
  50. * which also happens to be atomic_ops_lock.
  51. *
  52. * Thus despite semantically being different, implementation of atomic_cmpxchg()
  53. * is same as cmpxchg().
  54. */
  55. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  56. /*
  57. * xchg (reg with memory) based on "Native atomic" EX insn
  58. */
  59. static inline unsigned long __xchg(unsigned long val, volatile void *ptr,
  60. int size)
  61. {
  62. extern unsigned long __xchg_bad_pointer(void);
  63. switch (size) {
  64. case 4:
  65. __asm__ __volatile__(
  66. " ex %0, [%1] \n"
  67. : "+r"(val)
  68. : "r"(ptr)
  69. : "memory");
  70. return val;
  71. }
  72. return __xchg_bad_pointer();
  73. }
  74. #define _xchg(ptr, with) ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), \
  75. sizeof(*(ptr))))
  76. /*
  77. * On ARC700, EX insn is inherently atomic, so by default "vanilla" xchg() need
  78. * not require any locking. However there's a quirk.
  79. * ARC lacks native CMPXCHG, thus emulated (see above), using external locking -
  80. * incidently it "reuses" the same atomic_ops_lock used by atomic APIs.
  81. * Now, llist code uses cmpxchg() and xchg() on same data, so xchg() needs to
  82. * abide by same serializing rules, thus ends up using atomic_ops_lock as well.
  83. *
  84. * This however is only relevant if SMP and/or ARC lacks LLSC
  85. * if (UP or LLSC)
  86. * xchg doesn't need serialization
  87. * else <==> !(UP or LLSC) <==> (!UP and !LLSC) <==> (SMP and !LLSC)
  88. * xchg needs serialization
  89. */
  90. #if !defined(CONFIG_ARC_HAS_LLSC) && defined(CONFIG_SMP)
  91. #define xchg(ptr, with) \
  92. ({ \
  93. unsigned long flags; \
  94. typeof(*(ptr)) old_val; \
  95. \
  96. atomic_ops_lock(flags); \
  97. old_val = _xchg(ptr, with); \
  98. atomic_ops_unlock(flags); \
  99. old_val; \
  100. })
  101. #else
  102. #define xchg(ptr, with) _xchg(ptr, with)
  103. #endif
  104. /*
  105. * "atomic" variant of xchg()
  106. * REQ: It needs to follow the same serialization rules as other atomic_xxx()
  107. * Since xchg() doesn't always do that, it would seem that following defintion
  108. * is incorrect. But here's the rationale:
  109. * SMP : Even xchg() takes the atomic_ops_lock, so OK.
  110. * LLSC: atomic_ops_lock are not relevent at all (even if SMP, since LLSC
  111. * is natively "SMP safe", no serialization required).
  112. * UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
  113. * could clobber them. atomic_xchg() itself would be 1 insn, so it
  114. * can't be clobbered by others. Thus no serialization required when
  115. * atomic_xchg is involved.
  116. */
  117. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  118. #endif