locks.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /*
  2. * linux/include/asm-arm/locks.h
  3. *
  4. * Copyright (C) 2000 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. *
  10. * Interrupt safe locking assembler.
  11. */
  12. #ifndef __ASM_PROC_LOCKS_H
  13. #define __ASM_PROC_LOCKS_H
  14. #if __LINUX_ARM_ARCH__ >= 6
  15. #define __down_op(ptr,fail) \
  16. ({ \
  17. __asm__ __volatile__( \
  18. "@ down_op\n" \
  19. "1: ldrex lr, [%0]\n" \
  20. " sub lr, lr, %1\n" \
  21. " strex ip, lr, [%0]\n" \
  22. " teq ip, #0\n" \
  23. " bne 1b\n" \
  24. " teq lr, #0\n" \
  25. " movmi ip, %0\n" \
  26. " blmi " #fail \
  27. : \
  28. : "r" (ptr), "I" (1) \
  29. : "ip", "lr", "cc", "memory"); \
  30. })
  31. #define __down_op_ret(ptr,fail) \
  32. ({ \
  33. unsigned int ret; \
  34. __asm__ __volatile__( \
  35. "@ down_op_ret\n" \
  36. "1: ldrex lr, [%1]\n" \
  37. " sub lr, lr, %2\n" \
  38. " strex ip, lr, [%1]\n" \
  39. " teq ip, #0\n" \
  40. " bne 1b\n" \
  41. " teq lr, #0\n" \
  42. " movmi ip, %1\n" \
  43. " movpl ip, #0\n" \
  44. " blmi " #fail "\n" \
  45. " mov %0, ip" \
  46. : "=&r" (ret) \
  47. : "r" (ptr), "I" (1) \
  48. : "ip", "lr", "cc", "memory"); \
  49. ret; \
  50. })
  51. #define __up_op(ptr,wake) \
  52. ({ \
  53. __asm__ __volatile__( \
  54. "@ up_op\n" \
  55. "1: ldrex lr, [%0]\n" \
  56. " add lr, lr, %1\n" \
  57. " strex ip, lr, [%0]\n" \
  58. " teq ip, #0\n" \
  59. " bne 1b\n" \
  60. " teq lr, #0\n" \
  61. " movle ip, %0\n" \
  62. " blle " #wake \
  63. : \
  64. : "r" (ptr), "I" (1) \
  65. : "ip", "lr", "cc", "memory"); \
  66. })
  67. /*
  68. * The value 0x01000000 supports up to 128 processors and
  69. * lots of processes. BIAS must be chosen such that sub'ing
  70. * BIAS once per CPU will result in the long remaining
  71. * negative.
  72. */
  73. #define RW_LOCK_BIAS 0x01000000
  74. #define RW_LOCK_BIAS_STR "0x01000000"
  75. #define __down_op_write(ptr,fail) \
  76. ({ \
  77. __asm__ __volatile__( \
  78. "@ down_op_write\n" \
  79. "1: ldrex lr, [%0]\n" \
  80. " sub lr, lr, %1\n" \
  81. " strex ip, lr, [%0]\n" \
  82. " teq ip, #0\n" \
  83. " bne 1b\n" \
  84. " teq lr, #0\n" \
  85. " movne ip, %0\n" \
  86. " blne " #fail \
  87. : \
  88. : "r" (ptr), "I" (RW_LOCK_BIAS) \
  89. : "ip", "lr", "cc", "memory"); \
  90. })
  91. #define __up_op_write(ptr,wake) \
  92. ({ \
  93. __asm__ __volatile__( \
  94. "@ up_op_read\n" \
  95. "1: ldrex lr, [%0]\n" \
  96. " add lr, lr, %1\n" \
  97. " strex ip, lr, [%0]\n" \
  98. " teq ip, #0\n" \
  99. " bne 1b\n" \
  100. " movcs ip, %0\n" \
  101. " blcs " #wake \
  102. : \
  103. : "r" (ptr), "I" (RW_LOCK_BIAS) \
  104. : "ip", "lr", "cc", "memory"); \
  105. })
  106. #define __down_op_read(ptr,fail) \
  107. __down_op(ptr, fail)
  108. #define __up_op_read(ptr,wake) \
  109. ({ \
  110. __asm__ __volatile__( \
  111. "@ up_op_read\n" \
  112. "1: ldrex lr, [%0]\n" \
  113. " add lr, lr, %1\n" \
  114. " strex ip, lr, [%0]\n" \
  115. " teq ip, #0\n" \
  116. " bne 1b\n" \
  117. " teq lr, #0\n" \
  118. " moveq ip, %0\n" \
  119. " bleq " #wake \
  120. : \
  121. : "r" (ptr), "I" (1) \
  122. : "ip", "lr", "cc", "memory"); \
  123. })
  124. #else
  125. #define __down_op(ptr,fail) \
  126. ({ \
  127. __asm__ __volatile__( \
  128. "@ down_op\n" \
  129. " mrs ip, cpsr\n" \
  130. " orr lr, ip, #128\n" \
  131. " msr cpsr_c, lr\n" \
  132. " ldr lr, [%0]\n" \
  133. " subs lr, lr, %1\n" \
  134. " str lr, [%0]\n" \
  135. " msr cpsr_c, ip\n" \
  136. " movmi ip, %0\n" \
  137. " blmi " #fail \
  138. : \
  139. : "r" (ptr), "I" (1) \
  140. : "ip", "lr", "cc", "memory"); \
  141. })
  142. #define __down_op_ret(ptr,fail) \
  143. ({ \
  144. unsigned int ret; \
  145. __asm__ __volatile__( \
  146. "@ down_op_ret\n" \
  147. " mrs ip, cpsr\n" \
  148. " orr lr, ip, #128\n" \
  149. " msr cpsr_c, lr\n" \
  150. " ldr lr, [%1]\n" \
  151. " subs lr, lr, %2\n" \
  152. " str lr, [%1]\n" \
  153. " msr cpsr_c, ip\n" \
  154. " movmi ip, %1\n" \
  155. " movpl ip, #0\n" \
  156. " blmi " #fail "\n" \
  157. " mov %0, ip" \
  158. : "=&r" (ret) \
  159. : "r" (ptr), "I" (1) \
  160. : "ip", "lr", "cc", "memory"); \
  161. ret; \
  162. })
  163. #define __up_op(ptr,wake) \
  164. ({ \
  165. __asm__ __volatile__( \
  166. "@ up_op\n" \
  167. " mrs ip, cpsr\n" \
  168. " orr lr, ip, #128\n" \
  169. " msr cpsr_c, lr\n" \
  170. " ldr lr, [%0]\n" \
  171. " adds lr, lr, %1\n" \
  172. " str lr, [%0]\n" \
  173. " msr cpsr_c, ip\n" \
  174. " movle ip, %0\n" \
  175. " blle " #wake \
  176. : \
  177. : "r" (ptr), "I" (1) \
  178. : "ip", "lr", "cc", "memory"); \
  179. })
  180. /*
  181. * The value 0x01000000 supports up to 128 processors and
  182. * lots of processes. BIAS must be chosen such that sub'ing
  183. * BIAS once per CPU will result in the long remaining
  184. * negative.
  185. */
  186. #define RW_LOCK_BIAS 0x01000000
  187. #define RW_LOCK_BIAS_STR "0x01000000"
  188. #define __down_op_write(ptr,fail) \
  189. ({ \
  190. __asm__ __volatile__( \
  191. "@ down_op_write\n" \
  192. " mrs ip, cpsr\n" \
  193. " orr lr, ip, #128\n" \
  194. " msr cpsr_c, lr\n" \
  195. " ldr lr, [%0]\n" \
  196. " subs lr, lr, %1\n" \
  197. " str lr, [%0]\n" \
  198. " msr cpsr_c, ip\n" \
  199. " movne ip, %0\n" \
  200. " blne " #fail \
  201. : \
  202. : "r" (ptr), "I" (RW_LOCK_BIAS) \
  203. : "ip", "lr", "cc", "memory"); \
  204. })
  205. #define __up_op_write(ptr,wake) \
  206. ({ \
  207. __asm__ __volatile__( \
  208. "@ up_op_read\n" \
  209. " mrs ip, cpsr\n" \
  210. " orr lr, ip, #128\n" \
  211. " msr cpsr_c, lr\n" \
  212. " ldr lr, [%0]\n" \
  213. " adds lr, lr, %1\n" \
  214. " str lr, [%0]\n" \
  215. " msr cpsr_c, ip\n" \
  216. " movcs ip, %0\n" \
  217. " blcs " #wake \
  218. : \
  219. : "r" (ptr), "I" (RW_LOCK_BIAS) \
  220. : "ip", "lr", "cc", "memory"); \
  221. })
  222. #define __down_op_read(ptr,fail) \
  223. __down_op(ptr, fail)
  224. #define __up_op_read(ptr,wake) \
  225. ({ \
  226. __asm__ __volatile__( \
  227. "@ up_op_read\n" \
  228. " mrs ip, cpsr\n" \
  229. " orr lr, ip, #128\n" \
  230. " msr cpsr_c, lr\n" \
  231. " ldr lr, [%0]\n" \
  232. " adds lr, lr, %1\n" \
  233. " str lr, [%0]\n" \
  234. " msr cpsr_c, ip\n" \
  235. " moveq ip, %0\n" \
  236. " bleq " #wake \
  237. : \
  238. : "r" (ptr), "I" (1) \
  239. : "ip", "lr", "cc", "memory"); \
  240. })
  241. #endif
  242. #endif