blk-ioc.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411
  1. /*
  2. * Functions related to io context handling
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/slab.h>
  11. #include "blk.h"
  12. /*
  13. * For io context allocations
  14. */
  15. static struct kmem_cache *iocontext_cachep;
  16. /**
  17. * get_io_context - increment reference count to io_context
  18. * @ioc: io_context to get
  19. *
  20. * Increment reference count to @ioc.
  21. */
  22. void get_io_context(struct io_context *ioc)
  23. {
  24. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  25. atomic_long_inc(&ioc->refcount);
  26. }
  27. EXPORT_SYMBOL(get_io_context);
  28. static void icq_free_icq_rcu(struct rcu_head *head)
  29. {
  30. struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
  31. kmem_cache_free(icq->__rcu_icq_cache, icq);
  32. }
  33. /*
  34. * Exit and free an icq. Called with both ioc and q locked.
  35. */
  36. static void ioc_exit_icq(struct io_cq *icq)
  37. {
  38. struct io_context *ioc = icq->ioc;
  39. struct request_queue *q = icq->q;
  40. struct elevator_type *et = q->elevator->type;
  41. lockdep_assert_held(&ioc->lock);
  42. lockdep_assert_held(q->queue_lock);
  43. radix_tree_delete(&ioc->icq_tree, icq->q->id);
  44. hlist_del_init(&icq->ioc_node);
  45. list_del_init(&icq->q_node);
  46. /*
  47. * Both setting lookup hint to and clearing it from @icq are done
  48. * under queue_lock. If it's not pointing to @icq now, it never
  49. * will. Hint assignment itself can race safely.
  50. */
  51. if (rcu_dereference_raw(ioc->icq_hint) == icq)
  52. rcu_assign_pointer(ioc->icq_hint, NULL);
  53. if (et->ops.elevator_exit_icq_fn)
  54. et->ops.elevator_exit_icq_fn(icq);
  55. /*
  56. * @icq->q might have gone away by the time RCU callback runs
  57. * making it impossible to determine icq_cache. Record it in @icq.
  58. */
  59. icq->__rcu_icq_cache = et->icq_cache;
  60. call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
  61. }
  62. /*
  63. * Slow path for ioc release in put_io_context(). Performs double-lock
  64. * dancing to unlink all icq's and then frees ioc.
  65. */
  66. static void ioc_release_fn(struct work_struct *work)
  67. {
  68. struct io_context *ioc = container_of(work, struct io_context,
  69. release_work);
  70. struct request_queue *last_q = NULL;
  71. spin_lock_irq(&ioc->lock);
  72. while (!hlist_empty(&ioc->icq_list)) {
  73. struct io_cq *icq = hlist_entry(ioc->icq_list.first,
  74. struct io_cq, ioc_node);
  75. struct request_queue *this_q = icq->q;
  76. if (this_q != last_q) {
  77. /*
  78. * Need to switch to @this_q. Once we release
  79. * @ioc->lock, it can go away along with @cic.
  80. * Hold on to it.
  81. */
  82. __blk_get_queue(this_q);
  83. /*
  84. * blk_put_queue() might sleep thanks to kobject
  85. * idiocy. Always release both locks, put and
  86. * restart.
  87. */
  88. if (last_q) {
  89. spin_unlock(last_q->queue_lock);
  90. spin_unlock_irq(&ioc->lock);
  91. blk_put_queue(last_q);
  92. } else {
  93. spin_unlock_irq(&ioc->lock);
  94. }
  95. last_q = this_q;
  96. spin_lock_irq(this_q->queue_lock);
  97. spin_lock(&ioc->lock);
  98. continue;
  99. }
  100. ioc_exit_icq(icq);
  101. }
  102. if (last_q) {
  103. spin_unlock(last_q->queue_lock);
  104. spin_unlock_irq(&ioc->lock);
  105. blk_put_queue(last_q);
  106. } else {
  107. spin_unlock_irq(&ioc->lock);
  108. }
  109. kmem_cache_free(iocontext_cachep, ioc);
  110. }
  111. /**
  112. * put_io_context - put a reference of io_context
  113. * @ioc: io_context to put
  114. *
  115. * Decrement reference count of @ioc and release it if the count reaches
  116. * zero.
  117. */
  118. void put_io_context(struct io_context *ioc)
  119. {
  120. unsigned long flags;
  121. if (ioc == NULL)
  122. return;
  123. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  124. /*
  125. * Releasing ioc requires reverse order double locking and we may
  126. * already be holding a queue_lock. Do it asynchronously from wq.
  127. */
  128. if (atomic_long_dec_and_test(&ioc->refcount)) {
  129. spin_lock_irqsave(&ioc->lock, flags);
  130. if (!hlist_empty(&ioc->icq_list))
  131. schedule_work(&ioc->release_work);
  132. spin_unlock_irqrestore(&ioc->lock, flags);
  133. }
  134. }
  135. EXPORT_SYMBOL(put_io_context);
  136. /* Called by the exiting task */
  137. void exit_io_context(struct task_struct *task)
  138. {
  139. struct io_context *ioc;
  140. task_lock(task);
  141. ioc = task->io_context;
  142. task->io_context = NULL;
  143. task_unlock(task);
  144. atomic_dec(&ioc->nr_tasks);
  145. put_io_context(ioc);
  146. }
  147. /**
  148. * ioc_clear_queue - break any ioc association with the specified queue
  149. * @q: request_queue being cleared
  150. *
  151. * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
  152. */
  153. void ioc_clear_queue(struct request_queue *q)
  154. {
  155. lockdep_assert_held(q->queue_lock);
  156. while (!list_empty(&q->icq_list)) {
  157. struct io_cq *icq = list_entry(q->icq_list.next,
  158. struct io_cq, q_node);
  159. struct io_context *ioc = icq->ioc;
  160. spin_lock(&ioc->lock);
  161. ioc_exit_icq(icq);
  162. spin_unlock(&ioc->lock);
  163. }
  164. }
  165. void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
  166. int node)
  167. {
  168. struct io_context *ioc;
  169. ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
  170. node);
  171. if (unlikely(!ioc))
  172. return;
  173. /* initialize */
  174. atomic_long_set(&ioc->refcount, 1);
  175. atomic_set(&ioc->nr_tasks, 1);
  176. spin_lock_init(&ioc->lock);
  177. INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
  178. INIT_HLIST_HEAD(&ioc->icq_list);
  179. INIT_WORK(&ioc->release_work, ioc_release_fn);
  180. /*
  181. * Try to install. ioc shouldn't be installed if someone else
  182. * already did or @task, which isn't %current, is exiting. Note
  183. * that we need to allow ioc creation on exiting %current as exit
  184. * path may issue IOs from e.g. exit_files(). The exit path is
  185. * responsible for not issuing IO after exit_io_context().
  186. */
  187. task_lock(task);
  188. if (!task->io_context &&
  189. (task == current || !(task->flags & PF_EXITING)))
  190. task->io_context = ioc;
  191. else
  192. kmem_cache_free(iocontext_cachep, ioc);
  193. task_unlock(task);
  194. }
  195. /**
  196. * get_task_io_context - get io_context of a task
  197. * @task: task of interest
  198. * @gfp_flags: allocation flags, used if allocation is necessary
  199. * @node: allocation node, used if allocation is necessary
  200. *
  201. * Return io_context of @task. If it doesn't exist, it is created with
  202. * @gfp_flags and @node. The returned io_context has its reference count
  203. * incremented.
  204. *
  205. * This function always goes through task_lock() and it's better to use
  206. * %current->io_context + get_io_context() for %current.
  207. */
  208. struct io_context *get_task_io_context(struct task_struct *task,
  209. gfp_t gfp_flags, int node)
  210. {
  211. struct io_context *ioc;
  212. might_sleep_if(gfp_flags & __GFP_WAIT);
  213. do {
  214. task_lock(task);
  215. ioc = task->io_context;
  216. if (likely(ioc)) {
  217. get_io_context(ioc);
  218. task_unlock(task);
  219. return ioc;
  220. }
  221. task_unlock(task);
  222. } while (create_io_context(task, gfp_flags, node));
  223. return NULL;
  224. }
  225. EXPORT_SYMBOL(get_task_io_context);
  226. /**
  227. * ioc_lookup_icq - lookup io_cq from ioc
  228. * @ioc: the associated io_context
  229. * @q: the associated request_queue
  230. *
  231. * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
  232. * with @q->queue_lock held.
  233. */
  234. struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
  235. {
  236. struct io_cq *icq;
  237. lockdep_assert_held(q->queue_lock);
  238. /*
  239. * icq's are indexed from @ioc using radix tree and hint pointer,
  240. * both of which are protected with RCU. All removals are done
  241. * holding both q and ioc locks, and we're holding q lock - if we
  242. * find a icq which points to us, it's guaranteed to be valid.
  243. */
  244. rcu_read_lock();
  245. icq = rcu_dereference(ioc->icq_hint);
  246. if (icq && icq->q == q)
  247. goto out;
  248. icq = radix_tree_lookup(&ioc->icq_tree, q->id);
  249. if (icq && icq->q == q)
  250. rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
  251. else
  252. icq = NULL;
  253. out:
  254. rcu_read_unlock();
  255. return icq;
  256. }
  257. EXPORT_SYMBOL(ioc_lookup_icq);
  258. /**
  259. * ioc_create_icq - create and link io_cq
  260. * @q: request_queue of interest
  261. * @gfp_mask: allocation mask
  262. *
  263. * Make sure io_cq linking %current->io_context and @q exists. If either
  264. * io_context and/or icq don't exist, they will be created using @gfp_mask.
  265. *
  266. * The caller is responsible for ensuring @ioc won't go away and @q is
  267. * alive and will stay alive until this function returns.
  268. */
  269. struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
  270. {
  271. struct elevator_type *et = q->elevator->type;
  272. struct io_context *ioc;
  273. struct io_cq *icq;
  274. /* allocate stuff */
  275. ioc = create_io_context(current, gfp_mask, q->node);
  276. if (!ioc)
  277. return NULL;
  278. icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
  279. q->node);
  280. if (!icq)
  281. return NULL;
  282. if (radix_tree_preload(gfp_mask) < 0) {
  283. kmem_cache_free(et->icq_cache, icq);
  284. return NULL;
  285. }
  286. icq->ioc = ioc;
  287. icq->q = q;
  288. INIT_LIST_HEAD(&icq->q_node);
  289. INIT_HLIST_NODE(&icq->ioc_node);
  290. /* lock both q and ioc and try to link @icq */
  291. spin_lock_irq(q->queue_lock);
  292. spin_lock(&ioc->lock);
  293. if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
  294. hlist_add_head(&icq->ioc_node, &ioc->icq_list);
  295. list_add(&icq->q_node, &q->icq_list);
  296. if (et->ops.elevator_init_icq_fn)
  297. et->ops.elevator_init_icq_fn(icq);
  298. } else {
  299. kmem_cache_free(et->icq_cache, icq);
  300. icq = ioc_lookup_icq(ioc, q);
  301. if (!icq)
  302. printk(KERN_ERR "cfq: icq link failed!\n");
  303. }
  304. spin_unlock(&ioc->lock);
  305. spin_unlock_irq(q->queue_lock);
  306. radix_tree_preload_end();
  307. return icq;
  308. }
  309. void ioc_set_changed(struct io_context *ioc, int which)
  310. {
  311. struct io_cq *icq;
  312. struct hlist_node *n;
  313. hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
  314. set_bit(which, &icq->changed);
  315. }
  316. /**
  317. * ioc_ioprio_changed - notify ioprio change
  318. * @ioc: io_context of interest
  319. * @ioprio: new ioprio
  320. *
  321. * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
  322. * icq's. iosched is responsible for checking the bit and applying it on
  323. * request issue path.
  324. */
  325. void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
  326. {
  327. unsigned long flags;
  328. spin_lock_irqsave(&ioc->lock, flags);
  329. ioc->ioprio = ioprio;
  330. ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
  331. spin_unlock_irqrestore(&ioc->lock, flags);
  332. }
  333. /**
  334. * ioc_cgroup_changed - notify cgroup change
  335. * @ioc: io_context of interest
  336. *
  337. * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
  338. * iosched is responsible for checking the bit and applying it on request
  339. * issue path.
  340. */
  341. void ioc_cgroup_changed(struct io_context *ioc)
  342. {
  343. unsigned long flags;
  344. spin_lock_irqsave(&ioc->lock, flags);
  345. ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
  346. spin_unlock_irqrestore(&ioc->lock, flags);
  347. }
  348. EXPORT_SYMBOL(ioc_cgroup_changed);
  349. static int __init blk_ioc_init(void)
  350. {
  351. iocontext_cachep = kmem_cache_create("blkdev_ioc",
  352. sizeof(struct io_context), 0, SLAB_PANIC, NULL);
  353. return 0;
  354. }
  355. subsys_initcall(blk_ioc_init);