atomic.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653
  1. /*
  2. * Atomic operations that C can't guarantee us. Useful for
  3. * resource counting etc..
  4. *
  5. * But use these as seldom as possible since they are much more slower
  6. * than regular operations.
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. *
  12. * Copyright (C) 1996, 97, 99, 2000, 03, 04 by Ralf Baechle
  13. */
  14. /*
  15. * As workaround for the ATOMIC_DEC_AND_LOCK / atomic_dec_and_lock mess in
  16. * <linux/spinlock.h> we have to include <linux/spinlock.h> outside the
  17. * main big wrapper ...
  18. */
  19. #include <linux/config.h>
  20. #include <linux/spinlock.h>
  21. #ifndef _ASM_ATOMIC_H
  22. #define _ASM_ATOMIC_H
  23. #include <asm/cpu-features.h>
  24. #include <asm/war.h>
  25. extern spinlock_t atomic_lock;
  26. typedef struct { volatile int counter; } atomic_t;
  27. #define ATOMIC_INIT(i) { (i) }
  28. /*
  29. * atomic_read - read atomic variable
  30. * @v: pointer of type atomic_t
  31. *
  32. * Atomically reads the value of @v.
  33. */
  34. #define atomic_read(v) ((v)->counter)
  35. /*
  36. * atomic_set - set atomic variable
  37. * @v: pointer of type atomic_t
  38. * @i: required value
  39. *
  40. * Atomically sets the value of @v to @i.
  41. */
  42. #define atomic_set(v,i) ((v)->counter = (i))
  43. /*
  44. * atomic_add - add integer to atomic variable
  45. * @i: integer value to add
  46. * @v: pointer of type atomic_t
  47. *
  48. * Atomically adds @i to @v.
  49. */
  50. static __inline__ void atomic_add(int i, atomic_t * v)
  51. {
  52. if (cpu_has_llsc && R10000_LLSC_WAR) {
  53. unsigned long temp;
  54. __asm__ __volatile__(
  55. "1: ll %0, %1 # atomic_add \n"
  56. " addu %0, %2 \n"
  57. " sc %0, %1 \n"
  58. " beqzl %0, 1b \n"
  59. : "=&r" (temp), "=m" (v->counter)
  60. : "Ir" (i), "m" (v->counter));
  61. } else if (cpu_has_llsc) {
  62. unsigned long temp;
  63. __asm__ __volatile__(
  64. "1: ll %0, %1 # atomic_add \n"
  65. " addu %0, %2 \n"
  66. " sc %0, %1 \n"
  67. " beqz %0, 1b \n"
  68. : "=&r" (temp), "=m" (v->counter)
  69. : "Ir" (i), "m" (v->counter));
  70. } else {
  71. unsigned long flags;
  72. spin_lock_irqsave(&atomic_lock, flags);
  73. v->counter += i;
  74. spin_unlock_irqrestore(&atomic_lock, flags);
  75. }
  76. }
  77. /*
  78. * atomic_sub - subtract the atomic variable
  79. * @i: integer value to subtract
  80. * @v: pointer of type atomic_t
  81. *
  82. * Atomically subtracts @i from @v.
  83. */
  84. static __inline__ void atomic_sub(int i, atomic_t * v)
  85. {
  86. if (cpu_has_llsc && R10000_LLSC_WAR) {
  87. unsigned long temp;
  88. __asm__ __volatile__(
  89. "1: ll %0, %1 # atomic_sub \n"
  90. " subu %0, %2 \n"
  91. " sc %0, %1 \n"
  92. " beqzl %0, 1b \n"
  93. : "=&r" (temp), "=m" (v->counter)
  94. : "Ir" (i), "m" (v->counter));
  95. } else if (cpu_has_llsc) {
  96. unsigned long temp;
  97. __asm__ __volatile__(
  98. "1: ll %0, %1 # atomic_sub \n"
  99. " subu %0, %2 \n"
  100. " sc %0, %1 \n"
  101. " beqz %0, 1b \n"
  102. : "=&r" (temp), "=m" (v->counter)
  103. : "Ir" (i), "m" (v->counter));
  104. } else {
  105. unsigned long flags;
  106. spin_lock_irqsave(&atomic_lock, flags);
  107. v->counter -= i;
  108. spin_unlock_irqrestore(&atomic_lock, flags);
  109. }
  110. }
  111. /*
  112. * Same as above, but return the result value
  113. */
  114. static __inline__ int atomic_add_return(int i, atomic_t * v)
  115. {
  116. unsigned long result;
  117. if (cpu_has_llsc && R10000_LLSC_WAR) {
  118. unsigned long temp;
  119. __asm__ __volatile__(
  120. "1: ll %1, %2 # atomic_add_return \n"
  121. " addu %0, %1, %3 \n"
  122. " sc %0, %2 \n"
  123. " beqzl %0, 1b \n"
  124. " addu %0, %1, %3 \n"
  125. " sync \n"
  126. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  127. : "Ir" (i), "m" (v->counter)
  128. : "memory");
  129. } else if (cpu_has_llsc) {
  130. unsigned long temp;
  131. __asm__ __volatile__(
  132. "1: ll %1, %2 # atomic_add_return \n"
  133. " addu %0, %1, %3 \n"
  134. " sc %0, %2 \n"
  135. " beqz %0, 1b \n"
  136. " addu %0, %1, %3 \n"
  137. " sync \n"
  138. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  139. : "Ir" (i), "m" (v->counter)
  140. : "memory");
  141. } else {
  142. unsigned long flags;
  143. spin_lock_irqsave(&atomic_lock, flags);
  144. result = v->counter;
  145. result += i;
  146. v->counter = result;
  147. spin_unlock_irqrestore(&atomic_lock, flags);
  148. }
  149. return result;
  150. }
  151. static __inline__ int atomic_sub_return(int i, atomic_t * v)
  152. {
  153. unsigned long result;
  154. if (cpu_has_llsc && R10000_LLSC_WAR) {
  155. unsigned long temp;
  156. __asm__ __volatile__(
  157. "1: ll %1, %2 # atomic_sub_return \n"
  158. " subu %0, %1, %3 \n"
  159. " sc %0, %2 \n"
  160. " beqzl %0, 1b \n"
  161. " subu %0, %1, %3 \n"
  162. " sync \n"
  163. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  164. : "Ir" (i), "m" (v->counter)
  165. : "memory");
  166. } else if (cpu_has_llsc) {
  167. unsigned long temp;
  168. __asm__ __volatile__(
  169. "1: ll %1, %2 # atomic_sub_return \n"
  170. " subu %0, %1, %3 \n"
  171. " sc %0, %2 \n"
  172. " beqz %0, 1b \n"
  173. " subu %0, %1, %3 \n"
  174. " sync \n"
  175. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  176. : "Ir" (i), "m" (v->counter)
  177. : "memory");
  178. } else {
  179. unsigned long flags;
  180. spin_lock_irqsave(&atomic_lock, flags);
  181. result = v->counter;
  182. result -= i;
  183. v->counter = result;
  184. spin_unlock_irqrestore(&atomic_lock, flags);
  185. }
  186. return result;
  187. }
  188. /*
  189. * atomic_sub_if_positive - add integer to atomic variable
  190. * @v: pointer of type atomic_t
  191. *
  192. * Atomically test @v and decrement if it is greater than 0.
  193. * The function returns the old value of @v minus 1.
  194. */
  195. static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
  196. {
  197. unsigned long result;
  198. if (cpu_has_llsc && R10000_LLSC_WAR) {
  199. unsigned long temp;
  200. __asm__ __volatile__(
  201. "1: ll %1, %2 # atomic_sub_if_positive\n"
  202. " subu %0, %1, %3 \n"
  203. " bltz %0, 1f \n"
  204. " sc %0, %2 \n"
  205. " beqzl %0, 1b \n"
  206. " sync \n"
  207. "1: \n"
  208. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  209. : "Ir" (i), "m" (v->counter)
  210. : "memory");
  211. } else if (cpu_has_llsc) {
  212. unsigned long temp;
  213. __asm__ __volatile__(
  214. "1: ll %1, %2 # atomic_sub_if_positive\n"
  215. " subu %0, %1, %3 \n"
  216. " bltz %0, 1f \n"
  217. " sc %0, %2 \n"
  218. " beqz %0, 1b \n"
  219. " sync \n"
  220. "1: \n"
  221. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  222. : "Ir" (i), "m" (v->counter)
  223. : "memory");
  224. } else {
  225. unsigned long flags;
  226. spin_lock_irqsave(&atomic_lock, flags);
  227. result = v->counter;
  228. result -= i;
  229. if (result >= 0)
  230. v->counter = result;
  231. spin_unlock_irqrestore(&atomic_lock, flags);
  232. }
  233. return result;
  234. }
  235. #define atomic_dec_return(v) atomic_sub_return(1,(v))
  236. #define atomic_inc_return(v) atomic_add_return(1,(v))
  237. /*
  238. * atomic_sub_and_test - subtract value from variable and test result
  239. * @i: integer value to subtract
  240. * @v: pointer of type atomic_t
  241. *
  242. * Atomically subtracts @i from @v and returns
  243. * true if the result is zero, or false for all
  244. * other cases.
  245. */
  246. #define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
  247. /*
  248. * atomic_inc_and_test - increment and test
  249. * @v: pointer of type atomic_t
  250. *
  251. * Atomically increments @v by 1
  252. * and returns true if the result is zero, or false for all
  253. * other cases.
  254. */
  255. #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
  256. /*
  257. * atomic_dec_and_test - decrement by 1 and test
  258. * @v: pointer of type atomic_t
  259. *
  260. * Atomically decrements @v by 1 and
  261. * returns true if the result is 0, or false for all other
  262. * cases.
  263. */
  264. #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
  265. /*
  266. * atomic_dec_if_positive - decrement by 1 if old value positive
  267. * @v: pointer of type atomic_t
  268. */
  269. #define atomic_dec_if_positive(v) atomic_sub_if_positive(1, v)
  270. /*
  271. * atomic_inc - increment atomic variable
  272. * @v: pointer of type atomic_t
  273. *
  274. * Atomically increments @v by 1.
  275. */
  276. #define atomic_inc(v) atomic_add(1,(v))
  277. /*
  278. * atomic_dec - decrement and test
  279. * @v: pointer of type atomic_t
  280. *
  281. * Atomically decrements @v by 1.
  282. */
  283. #define atomic_dec(v) atomic_sub(1,(v))
  284. /*
  285. * atomic_add_negative - add and test if negative
  286. * @v: pointer of type atomic_t
  287. * @i: integer value to add
  288. *
  289. * Atomically adds @i to @v and returns true
  290. * if the result is negative, or false when
  291. * result is greater than or equal to zero.
  292. */
  293. #define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
  294. #ifdef CONFIG_MIPS64
  295. typedef struct { volatile __s64 counter; } atomic64_t;
  296. #define ATOMIC64_INIT(i) { (i) }
  297. /*
  298. * atomic64_read - read atomic variable
  299. * @v: pointer of type atomic64_t
  300. *
  301. */
  302. #define atomic64_read(v) ((v)->counter)
  303. /*
  304. * atomic64_set - set atomic variable
  305. * @v: pointer of type atomic64_t
  306. * @i: required value
  307. */
  308. #define atomic64_set(v,i) ((v)->counter = (i))
  309. /*
  310. * atomic64_add - add integer to atomic variable
  311. * @i: integer value to add
  312. * @v: pointer of type atomic64_t
  313. *
  314. * Atomically adds @i to @v.
  315. */
  316. static __inline__ void atomic64_add(long i, atomic64_t * v)
  317. {
  318. if (cpu_has_llsc && R10000_LLSC_WAR) {
  319. unsigned long temp;
  320. __asm__ __volatile__(
  321. "1: lld %0, %1 # atomic64_add \n"
  322. " addu %0, %2 \n"
  323. " scd %0, %1 \n"
  324. " beqzl %0, 1b \n"
  325. : "=&r" (temp), "=m" (v->counter)
  326. : "Ir" (i), "m" (v->counter));
  327. } else if (cpu_has_llsc) {
  328. unsigned long temp;
  329. __asm__ __volatile__(
  330. "1: lld %0, %1 # atomic64_add \n"
  331. " addu %0, %2 \n"
  332. " scd %0, %1 \n"
  333. " beqz %0, 1b \n"
  334. : "=&r" (temp), "=m" (v->counter)
  335. : "Ir" (i), "m" (v->counter));
  336. } else {
  337. unsigned long flags;
  338. spin_lock_irqsave(&atomic_lock, flags);
  339. v->counter += i;
  340. spin_unlock_irqrestore(&atomic_lock, flags);
  341. }
  342. }
  343. /*
  344. * atomic64_sub - subtract the atomic variable
  345. * @i: integer value to subtract
  346. * @v: pointer of type atomic64_t
  347. *
  348. * Atomically subtracts @i from @v.
  349. */
  350. static __inline__ void atomic64_sub(long i, atomic64_t * v)
  351. {
  352. if (cpu_has_llsc && R10000_LLSC_WAR) {
  353. unsigned long temp;
  354. __asm__ __volatile__(
  355. "1: lld %0, %1 # atomic64_sub \n"
  356. " subu %0, %2 \n"
  357. " scd %0, %1 \n"
  358. " beqzl %0, 1b \n"
  359. : "=&r" (temp), "=m" (v->counter)
  360. : "Ir" (i), "m" (v->counter));
  361. } else if (cpu_has_llsc) {
  362. unsigned long temp;
  363. __asm__ __volatile__(
  364. "1: lld %0, %1 # atomic64_sub \n"
  365. " subu %0, %2 \n"
  366. " scd %0, %1 \n"
  367. " beqz %0, 1b \n"
  368. : "=&r" (temp), "=m" (v->counter)
  369. : "Ir" (i), "m" (v->counter));
  370. } else {
  371. unsigned long flags;
  372. spin_lock_irqsave(&atomic_lock, flags);
  373. v->counter -= i;
  374. spin_unlock_irqrestore(&atomic_lock, flags);
  375. }
  376. }
  377. /*
  378. * Same as above, but return the result value
  379. */
  380. static __inline__ long atomic64_add_return(long i, atomic64_t * v)
  381. {
  382. unsigned long result;
  383. if (cpu_has_llsc && R10000_LLSC_WAR) {
  384. unsigned long temp;
  385. __asm__ __volatile__(
  386. "1: lld %1, %2 # atomic64_add_return \n"
  387. " addu %0, %1, %3 \n"
  388. " scd %0, %2 \n"
  389. " beqzl %0, 1b \n"
  390. " addu %0, %1, %3 \n"
  391. " sync \n"
  392. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  393. : "Ir" (i), "m" (v->counter)
  394. : "memory");
  395. } else if (cpu_has_llsc) {
  396. unsigned long temp;
  397. __asm__ __volatile__(
  398. "1: lld %1, %2 # atomic64_add_return \n"
  399. " addu %0, %1, %3 \n"
  400. " scd %0, %2 \n"
  401. " beqz %0, 1b \n"
  402. " addu %0, %1, %3 \n"
  403. " sync \n"
  404. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  405. : "Ir" (i), "m" (v->counter)
  406. : "memory");
  407. } else {
  408. unsigned long flags;
  409. spin_lock_irqsave(&atomic_lock, flags);
  410. result = v->counter;
  411. result += i;
  412. v->counter = result;
  413. spin_unlock_irqrestore(&atomic_lock, flags);
  414. }
  415. return result;
  416. }
  417. static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
  418. {
  419. unsigned long result;
  420. if (cpu_has_llsc && R10000_LLSC_WAR) {
  421. unsigned long temp;
  422. __asm__ __volatile__(
  423. "1: lld %1, %2 # atomic64_sub_return \n"
  424. " subu %0, %1, %3 \n"
  425. " scd %0, %2 \n"
  426. " beqzl %0, 1b \n"
  427. " subu %0, %1, %3 \n"
  428. " sync \n"
  429. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  430. : "Ir" (i), "m" (v->counter)
  431. : "memory");
  432. } else if (cpu_has_llsc) {
  433. unsigned long temp;
  434. __asm__ __volatile__(
  435. "1: lld %1, %2 # atomic64_sub_return \n"
  436. " subu %0, %1, %3 \n"
  437. " scd %0, %2 \n"
  438. " beqz %0, 1b \n"
  439. " subu %0, %1, %3 \n"
  440. " sync \n"
  441. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  442. : "Ir" (i), "m" (v->counter)
  443. : "memory");
  444. } else {
  445. unsigned long flags;
  446. spin_lock_irqsave(&atomic_lock, flags);
  447. result = v->counter;
  448. result -= i;
  449. v->counter = result;
  450. spin_unlock_irqrestore(&atomic_lock, flags);
  451. }
  452. return result;
  453. }
  454. /*
  455. * atomic64_sub_if_positive - add integer to atomic variable
  456. * @v: pointer of type atomic64_t
  457. *
  458. * Atomically test @v and decrement if it is greater than 0.
  459. * The function returns the old value of @v minus 1.
  460. */
  461. static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
  462. {
  463. unsigned long result;
  464. if (cpu_has_llsc && R10000_LLSC_WAR) {
  465. unsigned long temp;
  466. __asm__ __volatile__(
  467. "1: lld %1, %2 # atomic64_sub_if_positive\n"
  468. " dsubu %0, %1, %3 \n"
  469. " bltz %0, 1f \n"
  470. " scd %0, %2 \n"
  471. " beqzl %0, 1b \n"
  472. " sync \n"
  473. "1: \n"
  474. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  475. : "Ir" (i), "m" (v->counter)
  476. : "memory");
  477. } else if (cpu_has_llsc) {
  478. unsigned long temp;
  479. __asm__ __volatile__(
  480. "1: lld %1, %2 # atomic64_sub_if_positive\n"
  481. " dsubu %0, %1, %3 \n"
  482. " bltz %0, 1f \n"
  483. " scd %0, %2 \n"
  484. " beqz %0, 1b \n"
  485. " sync \n"
  486. "1: \n"
  487. : "=&r" (result), "=&r" (temp), "=m" (v->counter)
  488. : "Ir" (i), "m" (v->counter)
  489. : "memory");
  490. } else {
  491. unsigned long flags;
  492. spin_lock_irqsave(&atomic_lock, flags);
  493. result = v->counter;
  494. result -= i;
  495. if (result >= 0)
  496. v->counter = result;
  497. spin_unlock_irqrestore(&atomic_lock, flags);
  498. }
  499. return result;
  500. }
  501. #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
  502. #define atomic64_inc_return(v) atomic64_add_return(1,(v))
  503. /*
  504. * atomic64_sub_and_test - subtract value from variable and test result
  505. * @i: integer value to subtract
  506. * @v: pointer of type atomic64_t
  507. *
  508. * Atomically subtracts @i from @v and returns
  509. * true if the result is zero, or false for all
  510. * other cases.
  511. */
  512. #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
  513. /*
  514. * atomic64_inc_and_test - increment and test
  515. * @v: pointer of type atomic64_t
  516. *
  517. * Atomically increments @v by 1
  518. * and returns true if the result is zero, or false for all
  519. * other cases.
  520. */
  521. #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
  522. /*
  523. * atomic64_dec_and_test - decrement by 1 and test
  524. * @v: pointer of type atomic64_t
  525. *
  526. * Atomically decrements @v by 1 and
  527. * returns true if the result is 0, or false for all other
  528. * cases.
  529. */
  530. #define atomic64_dec_and_test(v) (atomic64_sub_return(1, (v)) == 0)
  531. /*
  532. * atomic64_dec_if_positive - decrement by 1 if old value positive
  533. * @v: pointer of type atomic64_t
  534. */
  535. #define atomic64_dec_if_positive(v) atomic64_sub_if_positive(1, v)
  536. /*
  537. * atomic64_inc - increment atomic variable
  538. * @v: pointer of type atomic64_t
  539. *
  540. * Atomically increments @v by 1.
  541. */
  542. #define atomic64_inc(v) atomic64_add(1,(v))
  543. /*
  544. * atomic64_dec - decrement and test
  545. * @v: pointer of type atomic64_t
  546. *
  547. * Atomically decrements @v by 1.
  548. */
  549. #define atomic64_dec(v) atomic64_sub(1,(v))
  550. /*
  551. * atomic64_add_negative - add and test if negative
  552. * @v: pointer of type atomic64_t
  553. * @i: integer value to add
  554. *
  555. * Atomically adds @i to @v and returns true
  556. * if the result is negative, or false when
  557. * result is greater than or equal to zero.
  558. */
  559. #define atomic64_add_negative(i,v) (atomic64_add_return(i, (v)) < 0)
  560. #endif /* CONFIG_MIPS64 */
  561. /*
  562. * atomic*_return operations are serializing but not the non-*_return
  563. * versions.
  564. */
  565. #define smp_mb__before_atomic_dec() smp_mb()
  566. #define smp_mb__after_atomic_dec() smp_mb()
  567. #define smp_mb__before_atomic_inc() smp_mb()
  568. #define smp_mb__after_atomic_inc() smp_mb()
  569. #endif /* _ASM_ATOMIC_H */