blk-ioc.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. /*
  2. * Functions related to io context handling
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/slab.h>
  11. #include "blk.h"
  12. /*
  13. * For io context allocations
  14. */
  15. static struct kmem_cache *iocontext_cachep;
  16. /**
  17. * get_io_context - increment reference count to io_context
  18. * @ioc: io_context to get
  19. *
  20. * Increment reference count to @ioc.
  21. */
  22. void get_io_context(struct io_context *ioc)
  23. {
  24. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  25. atomic_long_inc(&ioc->refcount);
  26. }
  27. EXPORT_SYMBOL(get_io_context);
  28. static void icq_free_icq_rcu(struct rcu_head *head)
  29. {
  30. struct io_cq *icq = container_of(head, struct io_cq, __rcu_head);
  31. kmem_cache_free(icq->__rcu_icq_cache, icq);
  32. }
  33. /*
  34. * Exit and free an icq. Called with both ioc and q locked.
  35. */
  36. static void ioc_exit_icq(struct io_cq *icq)
  37. {
  38. struct io_context *ioc = icq->ioc;
  39. struct request_queue *q = icq->q;
  40. struct elevator_type *et = q->elevator->type;
  41. lockdep_assert_held(&ioc->lock);
  42. lockdep_assert_held(q->queue_lock);
  43. radix_tree_delete(&ioc->icq_tree, icq->q->id);
  44. hlist_del_init(&icq->ioc_node);
  45. list_del_init(&icq->q_node);
  46. /*
  47. * Both setting lookup hint to and clearing it from @icq are done
  48. * under queue_lock. If it's not pointing to @icq now, it never
  49. * will. Hint assignment itself can race safely.
  50. */
  51. if (rcu_dereference_raw(ioc->icq_hint) == icq)
  52. rcu_assign_pointer(ioc->icq_hint, NULL);
  53. if (et->ops.elevator_exit_icq_fn)
  54. et->ops.elevator_exit_icq_fn(icq);
  55. /*
  56. * @icq->q might have gone away by the time RCU callback runs
  57. * making it impossible to determine icq_cache. Record it in @icq.
  58. */
  59. icq->__rcu_icq_cache = et->icq_cache;
  60. call_rcu(&icq->__rcu_head, icq_free_icq_rcu);
  61. }
  62. /*
  63. * Slow path for ioc release in put_io_context(). Performs double-lock
  64. * dancing to unlink all icq's and then frees ioc.
  65. */
  66. static void ioc_release_fn(struct work_struct *work)
  67. {
  68. struct io_context *ioc = container_of(work, struct io_context,
  69. release_work);
  70. struct request_queue *last_q = NULL;
  71. unsigned long flags;
  72. /*
  73. * Exiting icq may call into put_io_context() through elevator
  74. * which will trigger lockdep warning. The ioc's are guaranteed to
  75. * be different, use a different locking subclass here. Use
  76. * irqsave variant as there's no spin_lock_irq_nested().
  77. */
  78. spin_lock_irqsave_nested(&ioc->lock, flags, 1);
  79. while (!hlist_empty(&ioc->icq_list)) {
  80. struct io_cq *icq = hlist_entry(ioc->icq_list.first,
  81. struct io_cq, ioc_node);
  82. struct request_queue *this_q = icq->q;
  83. if (this_q != last_q) {
  84. /*
  85. * Need to switch to @this_q. Once we release
  86. * @ioc->lock, it can go away along with @cic.
  87. * Hold on to it.
  88. */
  89. __blk_get_queue(this_q);
  90. /*
  91. * blk_put_queue() might sleep thanks to kobject
  92. * idiocy. Always release both locks, put and
  93. * restart.
  94. */
  95. if (last_q) {
  96. spin_unlock(last_q->queue_lock);
  97. spin_unlock_irqrestore(&ioc->lock, flags);
  98. blk_put_queue(last_q);
  99. } else {
  100. spin_unlock_irqrestore(&ioc->lock, flags);
  101. }
  102. last_q = this_q;
  103. spin_lock_irqsave(this_q->queue_lock, flags);
  104. spin_lock_nested(&ioc->lock, 1);
  105. continue;
  106. }
  107. ioc_exit_icq(icq);
  108. }
  109. if (last_q) {
  110. spin_unlock(last_q->queue_lock);
  111. spin_unlock_irqrestore(&ioc->lock, flags);
  112. blk_put_queue(last_q);
  113. } else {
  114. spin_unlock_irqrestore(&ioc->lock, flags);
  115. }
  116. kmem_cache_free(iocontext_cachep, ioc);
  117. }
  118. /**
  119. * put_io_context - put a reference of io_context
  120. * @ioc: io_context to put
  121. *
  122. * Decrement reference count of @ioc and release it if the count reaches
  123. * zero.
  124. */
  125. void put_io_context(struct io_context *ioc)
  126. {
  127. unsigned long flags;
  128. if (ioc == NULL)
  129. return;
  130. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  131. /*
  132. * Releasing ioc requires reverse order double locking and we may
  133. * already be holding a queue_lock. Do it asynchronously from wq.
  134. */
  135. if (atomic_long_dec_and_test(&ioc->refcount)) {
  136. spin_lock_irqsave(&ioc->lock, flags);
  137. if (!hlist_empty(&ioc->icq_list))
  138. schedule_work(&ioc->release_work);
  139. spin_unlock_irqrestore(&ioc->lock, flags);
  140. }
  141. }
  142. EXPORT_SYMBOL(put_io_context);
  143. /* Called by the exiting task */
  144. void exit_io_context(struct task_struct *task)
  145. {
  146. struct io_context *ioc;
  147. task_lock(task);
  148. ioc = task->io_context;
  149. task->io_context = NULL;
  150. task_unlock(task);
  151. atomic_dec(&ioc->nr_tasks);
  152. put_io_context(ioc);
  153. }
  154. /**
  155. * ioc_clear_queue - break any ioc association with the specified queue
  156. * @q: request_queue being cleared
  157. *
  158. * Walk @q->icq_list and exit all io_cq's. Must be called with @q locked.
  159. */
  160. void ioc_clear_queue(struct request_queue *q)
  161. {
  162. lockdep_assert_held(q->queue_lock);
  163. while (!list_empty(&q->icq_list)) {
  164. struct io_cq *icq = list_entry(q->icq_list.next,
  165. struct io_cq, q_node);
  166. struct io_context *ioc = icq->ioc;
  167. spin_lock(&ioc->lock);
  168. ioc_exit_icq(icq);
  169. spin_unlock(&ioc->lock);
  170. }
  171. }
  172. void create_io_context_slowpath(struct task_struct *task, gfp_t gfp_flags,
  173. int node)
  174. {
  175. struct io_context *ioc;
  176. ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
  177. node);
  178. if (unlikely(!ioc))
  179. return;
  180. /* initialize */
  181. atomic_long_set(&ioc->refcount, 1);
  182. atomic_set(&ioc->nr_tasks, 1);
  183. spin_lock_init(&ioc->lock);
  184. INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC | __GFP_HIGH);
  185. INIT_HLIST_HEAD(&ioc->icq_list);
  186. INIT_WORK(&ioc->release_work, ioc_release_fn);
  187. /*
  188. * Try to install. ioc shouldn't be installed if someone else
  189. * already did or @task, which isn't %current, is exiting. Note
  190. * that we need to allow ioc creation on exiting %current as exit
  191. * path may issue IOs from e.g. exit_files(). The exit path is
  192. * responsible for not issuing IO after exit_io_context().
  193. */
  194. task_lock(task);
  195. if (!task->io_context &&
  196. (task == current || !(task->flags & PF_EXITING)))
  197. task->io_context = ioc;
  198. else
  199. kmem_cache_free(iocontext_cachep, ioc);
  200. task_unlock(task);
  201. }
  202. /**
  203. * get_task_io_context - get io_context of a task
  204. * @task: task of interest
  205. * @gfp_flags: allocation flags, used if allocation is necessary
  206. * @node: allocation node, used if allocation is necessary
  207. *
  208. * Return io_context of @task. If it doesn't exist, it is created with
  209. * @gfp_flags and @node. The returned io_context has its reference count
  210. * incremented.
  211. *
  212. * This function always goes through task_lock() and it's better to use
  213. * %current->io_context + get_io_context() for %current.
  214. */
  215. struct io_context *get_task_io_context(struct task_struct *task,
  216. gfp_t gfp_flags, int node)
  217. {
  218. struct io_context *ioc;
  219. might_sleep_if(gfp_flags & __GFP_WAIT);
  220. do {
  221. task_lock(task);
  222. ioc = task->io_context;
  223. if (likely(ioc)) {
  224. get_io_context(ioc);
  225. task_unlock(task);
  226. return ioc;
  227. }
  228. task_unlock(task);
  229. } while (create_io_context(task, gfp_flags, node));
  230. return NULL;
  231. }
  232. EXPORT_SYMBOL(get_task_io_context);
  233. /**
  234. * ioc_lookup_icq - lookup io_cq from ioc
  235. * @ioc: the associated io_context
  236. * @q: the associated request_queue
  237. *
  238. * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called
  239. * with @q->queue_lock held.
  240. */
  241. struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q)
  242. {
  243. struct io_cq *icq;
  244. lockdep_assert_held(q->queue_lock);
  245. /*
  246. * icq's are indexed from @ioc using radix tree and hint pointer,
  247. * both of which are protected with RCU. All removals are done
  248. * holding both q and ioc locks, and we're holding q lock - if we
  249. * find a icq which points to us, it's guaranteed to be valid.
  250. */
  251. rcu_read_lock();
  252. icq = rcu_dereference(ioc->icq_hint);
  253. if (icq && icq->q == q)
  254. goto out;
  255. icq = radix_tree_lookup(&ioc->icq_tree, q->id);
  256. if (icq && icq->q == q)
  257. rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */
  258. else
  259. icq = NULL;
  260. out:
  261. rcu_read_unlock();
  262. return icq;
  263. }
  264. EXPORT_SYMBOL(ioc_lookup_icq);
  265. /**
  266. * ioc_create_icq - create and link io_cq
  267. * @q: request_queue of interest
  268. * @gfp_mask: allocation mask
  269. *
  270. * Make sure io_cq linking %current->io_context and @q exists. If either
  271. * io_context and/or icq don't exist, they will be created using @gfp_mask.
  272. *
  273. * The caller is responsible for ensuring @ioc won't go away and @q is
  274. * alive and will stay alive until this function returns.
  275. */
  276. struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask)
  277. {
  278. struct elevator_type *et = q->elevator->type;
  279. struct io_context *ioc;
  280. struct io_cq *icq;
  281. /* allocate stuff */
  282. ioc = create_io_context(current, gfp_mask, q->node);
  283. if (!ioc)
  284. return NULL;
  285. icq = kmem_cache_alloc_node(et->icq_cache, gfp_mask | __GFP_ZERO,
  286. q->node);
  287. if (!icq)
  288. return NULL;
  289. if (radix_tree_preload(gfp_mask) < 0) {
  290. kmem_cache_free(et->icq_cache, icq);
  291. return NULL;
  292. }
  293. icq->ioc = ioc;
  294. icq->q = q;
  295. INIT_LIST_HEAD(&icq->q_node);
  296. INIT_HLIST_NODE(&icq->ioc_node);
  297. /* lock both q and ioc and try to link @icq */
  298. spin_lock_irq(q->queue_lock);
  299. spin_lock(&ioc->lock);
  300. if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
  301. hlist_add_head(&icq->ioc_node, &ioc->icq_list);
  302. list_add(&icq->q_node, &q->icq_list);
  303. if (et->ops.elevator_init_icq_fn)
  304. et->ops.elevator_init_icq_fn(icq);
  305. } else {
  306. kmem_cache_free(et->icq_cache, icq);
  307. icq = ioc_lookup_icq(ioc, q);
  308. if (!icq)
  309. printk(KERN_ERR "cfq: icq link failed!\n");
  310. }
  311. spin_unlock(&ioc->lock);
  312. spin_unlock_irq(q->queue_lock);
  313. radix_tree_preload_end();
  314. return icq;
  315. }
  316. void ioc_set_changed(struct io_context *ioc, int which)
  317. {
  318. struct io_cq *icq;
  319. struct hlist_node *n;
  320. hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node)
  321. set_bit(which, &icq->changed);
  322. }
  323. /**
  324. * ioc_ioprio_changed - notify ioprio change
  325. * @ioc: io_context of interest
  326. * @ioprio: new ioprio
  327. *
  328. * @ioc's ioprio has changed to @ioprio. Set %ICQ_IOPRIO_CHANGED for all
  329. * icq's. iosched is responsible for checking the bit and applying it on
  330. * request issue path.
  331. */
  332. void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
  333. {
  334. unsigned long flags;
  335. spin_lock_irqsave(&ioc->lock, flags);
  336. ioc->ioprio = ioprio;
  337. ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED);
  338. spin_unlock_irqrestore(&ioc->lock, flags);
  339. }
  340. /**
  341. * ioc_cgroup_changed - notify cgroup change
  342. * @ioc: io_context of interest
  343. *
  344. * @ioc's cgroup has changed. Set %ICQ_CGROUP_CHANGED for all icq's.
  345. * iosched is responsible for checking the bit and applying it on request
  346. * issue path.
  347. */
  348. void ioc_cgroup_changed(struct io_context *ioc)
  349. {
  350. unsigned long flags;
  351. spin_lock_irqsave(&ioc->lock, flags);
  352. ioc_set_changed(ioc, ICQ_CGROUP_CHANGED);
  353. spin_unlock_irqrestore(&ioc->lock, flags);
  354. }
  355. EXPORT_SYMBOL(ioc_cgroup_changed);
  356. static int __init blk_ioc_init(void)
  357. {
  358. iocontext_cachep = kmem_cache_create("blkdev_ioc",
  359. sizeof(struct io_context), 0, SLAB_PANIC, NULL);
  360. return 0;
  361. }
  362. subsys_initcall(blk_ioc_init);