blk-ioc.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. /*
  2. * Functions related to io context handling
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/slab.h>
  11. #include "blk.h"
  12. /*
  13. * For io context allocations
  14. */
  15. static struct kmem_cache *iocontext_cachep;
  16. /**
  17. * get_io_context - increment reference count to io_context
  18. * @ioc: io_context to get
  19. *
  20. * Increment reference count to @ioc.
  21. */
  22. void get_io_context(struct io_context *ioc)
  23. {
  24. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  25. atomic_long_inc(&ioc->refcount);
  26. }
  27. EXPORT_SYMBOL(get_io_context);
  28. static void icq_free_icq_rcu(struct rcu_head *head)
  29. {
  30. struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
  31. kmem_cache_free(icq->__rcu_icq_cache, icq);
  32. }
  33. /* Exit an icq. Called with both ioc and q locked. */
  34. static void ioc_exit_icq(struct io_cq *icq)
  35. {
  36. struct elevator_type *et = icq->q->elevator->type;
  37. if (icq->flags & ICQ_EXITED)
  38. return;
  39. if (et->ops.elevator_exit_icq_fn)
  40. et->ops.elevator_exit_icq_fn(icq);
  41. icq->flags |= ICQ_EXITED;
  42. }
  43. /* Release an icq. Called with both ioc and q locked. */
  44. static void ioc_destroy_icq(struct io_cq *icq)
  45. {
  46. struct io_context *ioc = icq->ioc;
  47. struct request_queue *q = icq->q;
  48. struct elevator_type *et = q->elevator->type;
  49. lockdep_assert_held(&ioc->lock);
  50. lockdep_assert_held(q->queue_lock);
  51. radix_tree_delete(&ioc->icq_tree, icq->q->id);
  52. hlist_del_init(&icq->ioc_node);
  53. list_del_init(&icq->q_node);
  54. /*
  55. * Both setting lookup hint to and clearing it from @icq are done
  56. * under queue_lock. If it's not pointing to @icq now, it never
  57. * will. Hint assignment itself can race safely.
  58. */
  59. if (rcu_dereference_raw(ioc->icq_hint) == icq)
  60. rcu_assign_pointer(ioc->icq_hint, NULL);
  61. ioc_exit_icq(icq);
  62. /*
  63. * @icq->q might have gone away by the time RCU callback runs
  64. * making it impossible to determine icq_cache. Record it in @icq.
  65. */
  66. icq->__rcu_icq_cache = et->icq_cache;
  67. call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
  68. }
  69. /*
  70. * Slow path for ioc release in put_io_context(). Performs double-lock
  71. * dancing to unlink all icq's and then frees ioc.
  72. */
  73. static void ioc_release_fn(struct work_struct *work)
  74. {
  75. struct io_context *ioc = container_of(work, struct io_context,
  76. release_work);
  77. unsigned long flags;
  78. /*
  79. * Exiting icq may call into put_io_context() through elevator
  80. * which will trigger lockdep warning. The ioc's are guaranteed to
  81. * be different, use a different locking subclass here. Use
  82. * irqsave variant as there's no spin_lock_irq_nested().
  83. */
  84. spin_lock_irqsave_nested(&ioc->lock, flags, 1);
  85. while (!hlist_empty(&ioc->icq_list)) {
  86. struct io_cq *icq = hlist_entry(ioc->icq_list.first,
  87. struct io_cq, ioc_node);
  88. struct request_queue *q = icq->q;
  89. if (spin_trylock(q->queue_lock)) {
  90. ioc_destroy_icq(icq);
  91. spin_unlock(q->queue_lock);
  92. } else {
  93. spin_unlock_irqrestore(&ioc->lock, flags);
  94. cpu_relax();
  95. spin_lock_irqsave_nested(&ioc->lock, flags, 1);
  96. }
  97. }
  98. spin_unlock_irqrestore(&ioc->lock, flags);
  99. kmem_cache_free(iocontext_cachep, ioc);
  100. }
  101. /**
  102. * put_io_context - put a reference of io_context
  103. * @ioc: io_context to put
  104. *
  105. * Decrement reference count of @ioc and release it if the count reaches
  106. * zero.
  107. */
  108. void put_io_context(struct io_context *ioc)
  109. {
  110. unsigned long flags;
  111. if (ioc == NULL)
  112. return;
  113. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  114. /*
  115. * Releasing ioc requires reverse order double locking and we may
  116. * already be holding a queue_lock. Do it asynchronously from wq.
  117. */
  118. if (atomic_long_dec_and_test(&ioc->refcount)) {
  119. spin_lock_irqsave(&ioc->lock, flags);
  120. if (!hlist_empty(&ioc->icq_list))
  121. schedule_work(&ioc->release_work);
  122. spin_unlock_irqrestore(&ioc->lock, flags);
  123. }
  124. }
  125. EXPORT_SYMBOL(put_io_context);
  126. /**
  127. * put_io_context_active - put active reference on ioc
  128. * @ioc: ioc of interest
  129. *
  130. * Undo get_io_context_active(). If active reference reaches zero after
  131. * put, @ioc can never issue further IOs and ioscheds are notified.
  132. */
  133. void put_io_context_active(struct io_context *ioc)
  134. {
  135. struct hlist_node *n;
  136. unsigned long flags;
  137. struct io_cq *icq;
  138. if (!atomic_dec_and_test(&ioc->active_ref)) {
  139. put_io_context(ioc);
  140. return;
  141. }
  142. /*
  143. * Need ioc lock to walk icq_list and q lock to exit icq. Perform
  144. * reverse double locking. Read comment in ioc_release_fn() for
  145. * explanation on the nested locking annotation.
  146. */
  147. retry:
  148. spin_lock_irqsave_nested(&ioc->lock, flags, 1);
  149. hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) {
  150. if (icq->flags & ICQ_EXITED)
  151. continue;
  152. if (spin_trylock(icq->q->queue_lock)) {
  153. ioc_exit_icq(icq);
  154. spin_unlock(icq->q->queue_lock);
  155. } else {
  156. spin_unlock_irqrestore(&ioc->lock, flags);
  157. cpu_relax();
  158. goto retry;
  159. }
  160. }
  161. spin_unlock_irqrestore(&ioc->lock, flags);
  162. put_io_context(ioc);
  163. }
  164. /* Called by the exiting task */
  165. void exit_io_context(struct task_struct *task)
  166. {
  167. struct io_context *ioc;
  168. task_lock(task);
  169. ioc = task->io_context;
  170. task->io_context = NULL;
  171. task_unlock(task);
  172. atomic_dec(&ioc->nr_tasks);
  173. put_io_context_active(ioc);
  174. }
  175. /**
  176. * ioc_clear_queue - break any ioc association with the specified queue
  177. * @q: request_queue being cleared
  178. *
  179. * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
  180. */
  181. void ioc_clear_queue(struct request_queue *q)
  182. {
  183. lockdep_assert_held(q->queue_lock);
  184. while (!list_empty(&q->icq_list)) {
  185. struct io_cq *icq = list_entry(q->icq_list.next,
  186. struct io_cq, q_node);
  187. struct io_context *ioc = icq->ioc;
  188. spin_lock(&ioc->lock);
  189. ioc_destroy_icq(icq);
  190. spin_unlock(&ioc->lock);
  191. }
  192. }
  193. int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
  194. {
  195. struct io_context *ioc;
  196. ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
  197. node);
  198. if (unlikely(!ioc))
  199. return -ENOMEM;
  200. /* initialize */
  201. atomic_long_set(&ioc->refcount, 1);
  202. atomic_set(&ioc->active_ref, 1);
  203. spin_lock_init(&ioc->lock);
  204. INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
  205. INIT_HLIST_HEAD(&ioc->icq_list);
  206. INIT_WORK(&ioc->release_work, ioc_release_fn);
  207. /*
  208. * Try to install. ioc shouldn't be installed if someone else
  209. * already did or @task, which isn't %current, is exiting. Note
  210. * that we need to allow ioc creation on exiting %current as exit
  211. * path may issue IOs from e.g. exit_files(). The exit path is
  212. * responsible for not issuing IO after exit_io_context().
  213. */
  214. task_lock(task);
  215. if (!task->io_context &&
  216. (task == current || !(task->flags & PF_EXITING)))
  217. task->io_context = ioc;
  218. else
  219. kmem_cache_free(iocontext_cachep, ioc);
  220. task_unlock(task);
  221. return 0;
  222. }
  223. /**
  224. * get_task_io_context - get io_context of a task
  225. * @task: task of interest
  226. * @gfp_flags: allocation flags, used if allocation is necessary
  227. * @node: allocation node, used if allocation is necessary
  228. *
  229. * Return io_context of @task. If it doesn't exist, it is created with
  230. * @gfp_flags and @node. The returned io_context has its reference count
  231. * incremented.
  232. *
  233. * This function always goes through task_lock() and it's better to use
  234. * %current->io_context + get_io_context() for %current.
  235. */
  236. struct io_context *get_task_io_context(struct task_struct *task,
  237. gfp_t gfp_flags, int node)
  238. {
  239. struct io_context *ioc;
  240. might_sleep_if(gfp_flags & __GFP_WAIT);
  241. do {
  242. task_lock(task);
  243. ioc = task->io_context;
  244. if (likely(ioc)) {
  245. get_io_context(ioc);
  246. task_unlock(task);
  247. return ioc;
  248. }
  249. task_unlock(task);
  250. } while (!create_task_io_context(task, gfp_flags, node));
  251. return NULL;
  252. }
  253. EXPORT_SYMBOL(get_task_io_context);
  254. /**
  255. * ioc_lookup_icq - lookup io_cq from ioc
  256. * @ioc: the associated io_context
  257. * @q: the associated request_queue
  258. *
  259. * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
  260. * with @q->queue_lock held.
  261. */
  262. struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
  263. {
  264. struct io_cq *icq;
  265. lockdep_assert_held(q->queue_lock);
  266. /*
  267. * icq's are indexed from @ioc using radix tree and hint pointer,
  268. * both of which are protected with RCU. All removals are done
  269. * holding both q and ioc locks, and we're holding q lock - if we
  270. * find a icq which points to us, it's guaranteed to be valid.
  271. */
  272. rcu_read_lock();
  273. icq = rcu_dereference(ioc->icq_hint);
  274. if (icq && icq->q == q)
  275. goto out;
  276. icq = radix_tree_lookup(&ioc->icq_tree, q->id);
  277. if (icq && icq->q == q)
  278. rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
  279. else
  280. icq = NULL;
  281. out:
  282. rcu_read_unlock();
  283. return icq;
  284. }
  285. EXPORT_SYMBOL(ioc_lookup_icq);
  286. /**
  287. * ioc_create_icq - create and link io_cq
  288. * @ioc: io_context of interest
  289. * @q: request_queue of interest
  290. * @gfp_mask: allocation mask
  291. *
  292. * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they
  293. * will be created using @gfp_mask.
  294. *
  295. * The caller is responsible for ensuring @ioc won't go away and @q is
  296. * alive and will stay alive until this function returns.
  297. */
  298. struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
  299. gfp_t gfp_mask)
  300. {
  301. struct elevator_type *et = q->elevator->type;
  302. struct io_cq *icq;
  303. /* allocate stuff */
  304. icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
  305. q->node);
  306. if (!icq)
  307. return NULL;
  308. if (radix_tree_preload(gfp_mask) < 0) {
  309. kmem_cache_free(et->icq_cache, icq);
  310. return NULL;
  311. }
  312. icq->ioc = ioc;
  313. icq->q = q;
  314. INIT_LIST_HEAD(&icq->q_node);
  315. INIT_HLIST_NODE(&icq->ioc_node);
  316. /* lock both q and ioc and try to link @icq */
  317. spin_lock_irq(q->queue_lock);
  318. spin_lock(&ioc->lock);
  319. if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
  320. hlist_add_head(&icq->ioc_node, &ioc->icq_list);
  321. list_add(&icq->q_node, &q->icq_list);
  322. if (et->ops.elevator_init_icq_fn)
  323. et->ops.elevator_init_icq_fn(icq);
  324. } else {
  325. kmem_cache_free(et->icq_cache, icq);
  326. icq = ioc_lookup_icq(ioc, q);
  327. if (!icq)
  328. printk(KERN_ERR "cfq: icq link failed!\n");
  329. }
  330. spin_unlock(&ioc->lock);
  331. spin_unlock_irq(q->queue_lock);
  332. radix_tree_preload_end();
  333. return icq;
  334. }
  335. static int __init blk_ioc_init(void)
  336. {
  337. iocontext_cachep = kmem_cache_create("blkdev_ioc",
  338. sizeof(struct io_context), 0, SLAB_PANIC, NULL);
  339. return 0;
  340. }
  341. subsys_initcall(blk_ioc_init);