hwspinlock_core.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550
  1. /*
  2. * Hardware spinlock framework
  3. *
  4. * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com
  5. *
  6. * Contact: Ohad Ben-Cohen <ohad@wizery.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License version 2 as published
  10. * by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #define pr_fmt(fmt) "%s: " fmt, __func__
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/types.h>
  22. #include <linux/err.h>
  23. #include <linux/jiffies.h>
  24. #include <linux/radix-tree.h>
  25. #include <linux/hwspinlock.h>
  26. #include <linux/pm_runtime.h>
  27. #include "hwspinlock_internal.h"
  28. /* radix tree tags */
  29. #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */
  30. /*
  31. * A radix tree is used to maintain the available hwspinlock instances.
  32. * The tree associates hwspinlock pointers with their integer key id,
  33. * and provides easy-to-use API which makes the hwspinlock core code simple
  34. * and easy to read.
  35. *
  36. * Radix trees are quick on lookups, and reasonably efficient in terms of
  37. * storage, especially with high density usages such as this framework
  38. * requires (a continuous range of integer keys, beginning with zero, is
  39. * used as the ID's of the hwspinlock instances).
  40. *
  41. * The radix tree API supports tagging items in the tree, which this
  42. * framework uses to mark unused hwspinlock instances (see the
  43. * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the
  44. * tree, looking for an unused hwspinlock instance, is now reduced to a
  45. * single radix tree API call.
  46. */
  47. static RADIX_TREE(hwspinlock_tree, GFP_KERNEL);
  48. /*
  49. * Synchronization of access to the tree is achieved using this spinlock,
  50. * as the radix-tree API requires that users provide all synchronisation.
  51. */
  52. static DEFINE_SPINLOCK(hwspinlock_tree_lock);
  53. /**
  54. * __hwspin_trylock() - attempt to lock a specific hwspinlock
  55. * @hwlock: an hwspinlock which we want to trylock
  56. * @mode: controls whether local interrupts are disabled or not
  57. * @flags: a pointer where the caller's interrupt state will be saved at (if
  58. * requested)
  59. *
  60. * This function attempts to lock an hwspinlock, and will immediately
  61. * fail if the hwspinlock is already taken.
  62. *
  63. * Upon a successful return from this function, preemption (and possibly
  64. * interrupts) is disabled, so the caller must not sleep, and is advised to
  65. * release the hwspinlock as soon as possible. This is required in order to
  66. * minimize remote cores polling on the hardware interconnect.
  67. *
  68. * The user decides whether local interrupts are disabled or not, and if yes,
  69. * whether he wants their previous state to be saved. It is up to the user
  70. * to choose the appropriate @mode of operation, exactly the same way users
  71. * should decide between spin_trylock, spin_trylock_irq and
  72. * spin_trylock_irqsave.
  73. *
  74. * Returns 0 if we successfully locked the hwspinlock or -EBUSY if
  75. * the hwspinlock was already taken.
  76. * This function will never sleep.
  77. */
  78. int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
  79. {
  80. int ret;
  81. BUG_ON(!hwlock);
  82. BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
  83. /*
  84. * This spin_lock{_irq, _irqsave} serves three purposes:
  85. *
  86. * 1. Disable preemption, in order to minimize the period of time
  87. * in which the hwspinlock is taken. This is important in order
  88. * to minimize the possible polling on the hardware interconnect
  89. * by a remote user of this lock.
  90. * 2. Make the hwspinlock SMP-safe (so we can take it from
  91. * additional contexts on the local host).
  92. * 3. Ensure that in_atomic/might_sleep checks catch potential
  93. * problems with hwspinlock usage (e.g. scheduler checks like
  94. * 'scheduling while atomic' etc.)
  95. */
  96. if (mode == HWLOCK_IRQSTATE)
  97. ret = spin_trylock_irqsave(&hwlock->lock, *flags);
  98. else if (mode == HWLOCK_IRQ)
  99. ret = spin_trylock_irq(&hwlock->lock);
  100. else
  101. ret = spin_trylock(&hwlock->lock);
  102. /* is lock already taken by another context on the local cpu ? */
  103. if (!ret)
  104. return -EBUSY;
  105. /* try to take the hwspinlock device */
  106. ret = hwlock->ops->trylock(hwlock);
  107. /* if hwlock is already taken, undo spin_trylock_* and exit */
  108. if (!ret) {
  109. if (mode == HWLOCK_IRQSTATE)
  110. spin_unlock_irqrestore(&hwlock->lock, *flags);
  111. else if (mode == HWLOCK_IRQ)
  112. spin_unlock_irq(&hwlock->lock);
  113. else
  114. spin_unlock(&hwlock->lock);
  115. return -EBUSY;
  116. }
  117. /*
  118. * We can be sure the other core's memory operations
  119. * are observable to us only _after_ we successfully take
  120. * the hwspinlock, and we must make sure that subsequent memory
  121. * operations (both reads and writes) will not be reordered before
  122. * we actually took the hwspinlock.
  123. *
  124. * Note: the implicit memory barrier of the spinlock above is too
  125. * early, so we need this additional explicit memory barrier.
  126. */
  127. mb();
  128. return 0;
  129. }
  130. EXPORT_SYMBOL_GPL(__hwspin_trylock);
  131. /**
  132. * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit
  133. * @hwlock: the hwspinlock to be locked
  134. * @timeout: timeout value in msecs
  135. * @mode: mode which controls whether local interrupts are disabled or not
  136. * @flags: a pointer to where the caller's interrupt state will be saved at (if
  137. * requested)
  138. *
  139. * This function locks the given @hwlock. If the @hwlock
  140. * is already taken, the function will busy loop waiting for it to
  141. * be released, but give up after @timeout msecs have elapsed.
  142. *
  143. * Upon a successful return from this function, preemption is disabled
  144. * (and possibly local interrupts, too), so the caller must not sleep,
  145. * and is advised to release the hwspinlock as soon as possible.
  146. * This is required in order to minimize remote cores polling on the
  147. * hardware interconnect.
  148. *
  149. * The user decides whether local interrupts are disabled or not, and if yes,
  150. * whether he wants their previous state to be saved. It is up to the user
  151. * to choose the appropriate @mode of operation, exactly the same way users
  152. * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave.
  153. *
  154. * Returns 0 when the @hwlock was successfully taken, and an appropriate
  155. * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still
  156. * busy after @timeout msecs). The function will never sleep.
  157. */
  158. int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to,
  159. int mode, unsigned long *flags)
  160. {
  161. int ret;
  162. unsigned long expire;
  163. expire = msecs_to_jiffies(to) + jiffies;
  164. for (;;) {
  165. /* Try to take the hwspinlock */
  166. ret = __hwspin_trylock(hwlock, mode, flags);
  167. if (ret != -EBUSY)
  168. break;
  169. /*
  170. * The lock is already taken, let's check if the user wants
  171. * us to try again
  172. */
  173. if (time_is_before_eq_jiffies(expire))
  174. return -ETIMEDOUT;
  175. /*
  176. * Allow platform-specific relax handlers to prevent
  177. * hogging the interconnect (no sleeping, though)
  178. */
  179. if (hwlock->ops->relax)
  180. hwlock->ops->relax(hwlock);
  181. }
  182. return ret;
  183. }
  184. EXPORT_SYMBOL_GPL(__hwspin_lock_timeout);
  185. /**
  186. * __hwspin_unlock() - unlock a specific hwspinlock
  187. * @hwlock: a previously-acquired hwspinlock which we want to unlock
  188. * @mode: controls whether local interrupts needs to be restored or not
  189. * @flags: previous caller's interrupt state to restore (if requested)
  190. *
  191. * This function will unlock a specific hwspinlock, enable preemption and
  192. * (possibly) enable interrupts or restore their previous state.
  193. * @hwlock must be already locked before calling this function: it is a bug
  194. * to call unlock on a @hwlock that is already unlocked.
  195. *
  196. * The user decides whether local interrupts should be enabled or not, and
  197. * if yes, whether he wants their previous state to be restored. It is up
  198. * to the user to choose the appropriate @mode of operation, exactly the
  199. * same way users decide between spin_unlock, spin_unlock_irq and
  200. * spin_unlock_irqrestore.
  201. *
  202. * The function will never sleep.
  203. */
  204. void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags)
  205. {
  206. BUG_ON(!hwlock);
  207. BUG_ON(!flags && mode == HWLOCK_IRQSTATE);
  208. /*
  209. * We must make sure that memory operations (both reads and writes),
  210. * done before unlocking the hwspinlock, will not be reordered
  211. * after the lock is released.
  212. *
  213. * That's the purpose of this explicit memory barrier.
  214. *
  215. * Note: the memory barrier induced by the spin_unlock below is too
  216. * late; the other core is going to access memory soon after it will
  217. * take the hwspinlock, and by then we want to be sure our memory
  218. * operations are already observable.
  219. */
  220. mb();
  221. hwlock->ops->unlock(hwlock);
  222. /* Undo the spin_trylock{_irq, _irqsave} called while locking */
  223. if (mode == HWLOCK_IRQSTATE)
  224. spin_unlock_irqrestore(&hwlock->lock, *flags);
  225. else if (mode == HWLOCK_IRQ)
  226. spin_unlock_irq(&hwlock->lock);
  227. else
  228. spin_unlock(&hwlock->lock);
  229. }
  230. EXPORT_SYMBOL_GPL(__hwspin_unlock);
  231. /**
  232. * hwspin_lock_register() - register a new hw spinlock
  233. * @hwlock: hwspinlock to register.
  234. *
  235. * This function should be called from the underlying platform-specific
  236. * implementation, to register a new hwspinlock instance.
  237. *
  238. * Can be called from an atomic context (will not sleep) but not from
  239. * within interrupt context.
  240. *
  241. * Returns 0 on success, or an appropriate error code on failure
  242. */
  243. int hwspin_lock_register(struct hwspinlock *hwlock)
  244. {
  245. struct hwspinlock *tmp;
  246. int ret;
  247. if (!hwlock || !hwlock->ops ||
  248. !hwlock->ops->trylock || !hwlock->ops->unlock) {
  249. pr_err("invalid parameters\n");
  250. return -EINVAL;
  251. }
  252. spin_lock_init(&hwlock->lock);
  253. spin_lock(&hwspinlock_tree_lock);
  254. ret = radix_tree_insert(&hwspinlock_tree, hwlock->id, hwlock);
  255. if (ret == -EEXIST)
  256. pr_err("hwspinlock id %d already exists!\n", hwlock->id);
  257. if (ret)
  258. goto out;
  259. /* mark this hwspinlock as available */
  260. tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
  261. HWSPINLOCK_UNUSED);
  262. /* self-sanity check which should never fail */
  263. WARN_ON(tmp != hwlock);
  264. out:
  265. spin_unlock(&hwspinlock_tree_lock);
  266. return ret;
  267. }
  268. EXPORT_SYMBOL_GPL(hwspin_lock_register);
  269. /**
  270. * hwspin_lock_unregister() - unregister an hw spinlock
  271. * @id: index of the specific hwspinlock to unregister
  272. *
  273. * This function should be called from the underlying platform-specific
  274. * implementation, to unregister an existing (and unused) hwspinlock.
  275. *
  276. * Can be called from an atomic context (will not sleep) but not from
  277. * within interrupt context.
  278. *
  279. * Returns the address of hwspinlock @id on success, or NULL on failure
  280. */
  281. struct hwspinlock *hwspin_lock_unregister(unsigned int id)
  282. {
  283. struct hwspinlock *hwlock = NULL;
  284. int ret;
  285. spin_lock(&hwspinlock_tree_lock);
  286. /* make sure the hwspinlock is not in use (tag is set) */
  287. ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
  288. if (ret == 0) {
  289. pr_err("hwspinlock %d still in use (or not present)\n", id);
  290. goto out;
  291. }
  292. hwlock = radix_tree_delete(&hwspinlock_tree, id);
  293. if (!hwlock) {
  294. pr_err("failed to delete hwspinlock %d\n", id);
  295. goto out;
  296. }
  297. out:
  298. spin_unlock(&hwspinlock_tree_lock);
  299. return hwlock;
  300. }
  301. EXPORT_SYMBOL_GPL(hwspin_lock_unregister);
  302. /**
  303. * __hwspin_lock_request() - tag an hwspinlock as used and power it up
  304. *
  305. * This is an internal function that prepares an hwspinlock instance
  306. * before it is given to the user. The function assumes that
  307. * hwspinlock_tree_lock is taken.
  308. *
  309. * Returns 0 or positive to indicate success, and a negative value to
  310. * indicate an error (with the appropriate error code)
  311. */
  312. static int __hwspin_lock_request(struct hwspinlock *hwlock)
  313. {
  314. struct hwspinlock *tmp;
  315. int ret;
  316. /* prevent underlying implementation from being removed */
  317. if (!try_module_get(hwlock->dev->driver->owner)) {
  318. dev_err(hwlock->dev, "%s: can't get owner\n", __func__);
  319. return -EINVAL;
  320. }
  321. /* notify PM core that power is now needed */
  322. ret = pm_runtime_get_sync(hwlock->dev);
  323. if (ret < 0) {
  324. dev_err(hwlock->dev, "%s: can't power on device\n", __func__);
  325. return ret;
  326. }
  327. /* mark hwspinlock as used, should not fail */
  328. tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock->id,
  329. HWSPINLOCK_UNUSED);
  330. /* self-sanity check that should never fail */
  331. WARN_ON(tmp != hwlock);
  332. return ret;
  333. }
  334. /**
  335. * hwspin_lock_get_id() - retrieve id number of a given hwspinlock
  336. * @hwlock: a valid hwspinlock instance
  337. *
  338. * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid.
  339. */
  340. int hwspin_lock_get_id(struct hwspinlock *hwlock)
  341. {
  342. if (!hwlock) {
  343. pr_err("invalid hwlock\n");
  344. return -EINVAL;
  345. }
  346. return hwlock->id;
  347. }
  348. EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
  349. /**
  350. * hwspin_lock_request() - request an hwspinlock
  351. *
  352. * This function should be called by users of the hwspinlock device,
  353. * in order to dynamically assign them an unused hwspinlock.
  354. * Usually the user of this lock will then have to communicate the lock's id
  355. * to the remote core before it can be used for synchronization (to get the
  356. * id of a given hwlock, use hwspin_lock_get_id()).
  357. *
  358. * Can be called from an atomic context (will not sleep) but not from
  359. * within interrupt context (simply because there is no use case for
  360. * that yet).
  361. *
  362. * Returns the address of the assigned hwspinlock, or NULL on error
  363. */
  364. struct hwspinlock *hwspin_lock_request(void)
  365. {
  366. struct hwspinlock *hwlock;
  367. int ret;
  368. spin_lock(&hwspinlock_tree_lock);
  369. /* look for an unused lock */
  370. ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
  371. 0, 1, HWSPINLOCK_UNUSED);
  372. if (ret == 0) {
  373. pr_warn("a free hwspinlock is not available\n");
  374. hwlock = NULL;
  375. goto out;
  376. }
  377. /* sanity check that should never fail */
  378. WARN_ON(ret > 1);
  379. /* mark as used and power up */
  380. ret = __hwspin_lock_request(hwlock);
  381. if (ret < 0)
  382. hwlock = NULL;
  383. out:
  384. spin_unlock(&hwspinlock_tree_lock);
  385. return hwlock;
  386. }
  387. EXPORT_SYMBOL_GPL(hwspin_lock_request);
  388. /**
  389. * hwspin_lock_request_specific() - request for a specific hwspinlock
  390. * @id: index of the specific hwspinlock that is requested
  391. *
  392. * This function should be called by users of the hwspinlock module,
  393. * in order to assign them a specific hwspinlock.
  394. * Usually early board code will be calling this function in order to
  395. * reserve specific hwspinlock ids for predefined purposes.
  396. *
  397. * Can be called from an atomic context (will not sleep) but not from
  398. * within interrupt context (simply because there is no use case for
  399. * that yet).
  400. *
  401. * Returns the address of the assigned hwspinlock, or NULL on error
  402. */
  403. struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
  404. {
  405. struct hwspinlock *hwlock;
  406. int ret;
  407. spin_lock(&hwspinlock_tree_lock);
  408. /* make sure this hwspinlock exists */
  409. hwlock = radix_tree_lookup(&hwspinlock_tree, id);
  410. if (!hwlock) {
  411. pr_warn("hwspinlock %u does not exist\n", id);
  412. goto out;
  413. }
  414. /* sanity check (this shouldn't happen) */
  415. WARN_ON(hwlock->id != id);
  416. /* make sure this hwspinlock is unused */
  417. ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED);
  418. if (ret == 0) {
  419. pr_warn("hwspinlock %u is already in use\n", id);
  420. hwlock = NULL;
  421. goto out;
  422. }
  423. /* mark as used and power up */
  424. ret = __hwspin_lock_request(hwlock);
  425. if (ret < 0)
  426. hwlock = NULL;
  427. out:
  428. spin_unlock(&hwspinlock_tree_lock);
  429. return hwlock;
  430. }
  431. EXPORT_SYMBOL_GPL(hwspin_lock_request_specific);
  432. /**
  433. * hwspin_lock_free() - free a specific hwspinlock
  434. * @hwlock: the specific hwspinlock to free
  435. *
  436. * This function mark @hwlock as free again.
  437. * Should only be called with an @hwlock that was retrieved from
  438. * an earlier call to omap_hwspin_lock_request{_specific}.
  439. *
  440. * Can be called from an atomic context (will not sleep) but not from
  441. * within interrupt context (simply because there is no use case for
  442. * that yet).
  443. *
  444. * Returns 0 on success, or an appropriate error code on failure
  445. */
  446. int hwspin_lock_free(struct hwspinlock *hwlock)
  447. {
  448. struct hwspinlock *tmp;
  449. int ret;
  450. if (!hwlock) {
  451. pr_err("invalid hwlock\n");
  452. return -EINVAL;
  453. }
  454. spin_lock(&hwspinlock_tree_lock);
  455. /* make sure the hwspinlock is used */
  456. ret = radix_tree_tag_get(&hwspinlock_tree, hwlock->id,
  457. HWSPINLOCK_UNUSED);
  458. if (ret == 1) {
  459. dev_err(hwlock->dev, "%s: hwlock is already free\n", __func__);
  460. dump_stack();
  461. ret = -EINVAL;
  462. goto out;
  463. }
  464. /* notify the underlying device that power is not needed */
  465. ret = pm_runtime_put(hwlock->dev);
  466. if (ret < 0)
  467. goto out;
  468. /* mark this hwspinlock as available */
  469. tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock->id,
  470. HWSPINLOCK_UNUSED);
  471. /* sanity check (this shouldn't happen) */
  472. WARN_ON(tmp != hwlock);
  473. module_put(hwlock->dev->driver->owner);
  474. out:
  475. spin_unlock(&hwspinlock_tree_lock);
  476. return ret;
  477. }
  478. EXPORT_SYMBOL_GPL(hwspin_lock_free);
  479. MODULE_LICENSE("GPL v2");
  480. MODULE_DESCRIPTION("Hardware spinlock interface");
  481. MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>");