blk-ioc.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. /*
  2. * Functions related to io context handling
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/slab.h>
  11. #include "blk.h"
  12. /*
  13. * For io context allocations
  14. */
  15. static struct kmem_cache *iocontext_cachep;
  16. /**
  17. * get_io_context - increment reference count to io_context
  18. * @ioc: io_context to get
  19. *
  20. * Increment reference count to @ioc.
  21. */
  22. void get_io_context(struct io_context *ioc)
  23. {
  24. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  25. atomic_long_inc(&ioc->refcount);
  26. }
  27. EXPORT_SYMBOL(get_io_context);
  28. static void cfq_dtor(struct io_context *ioc)
  29. {
  30. if (!hlist_empty(&ioc->cic_list)) {
  31. struct cfq_io_context *cic;
  32. cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
  33. cic_list);
  34. cic->dtor(ioc);
  35. }
  36. }
  37. /**
  38. * put_io_context - put a reference of io_context
  39. * @ioc: io_context to put
  40. *
  41. * Decrement reference count of @ioc and release it if the count reaches
  42. * zero.
  43. */
  44. void put_io_context(struct io_context *ioc)
  45. {
  46. if (ioc == NULL)
  47. return;
  48. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  49. if (!atomic_long_dec_and_test(&ioc->refcount))
  50. return;
  51. rcu_read_lock();
  52. cfq_dtor(ioc);
  53. rcu_read_unlock();
  54. kmem_cache_free(iocontext_cachep, ioc);
  55. }
  56. EXPORT_SYMBOL(put_io_context);
  57. static void cfq_exit(struct io_context *ioc)
  58. {
  59. rcu_read_lock();
  60. if (!hlist_empty(&ioc->cic_list)) {
  61. struct cfq_io_context *cic;
  62. cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
  63. cic_list);
  64. cic->exit(ioc);
  65. }
  66. rcu_read_unlock();
  67. }
  68. /* Called by the exiting task */
  69. void exit_io_context(struct task_struct *task)
  70. {
  71. struct io_context *ioc;
  72. /* PF_EXITING prevents new io_context from being attached to @task */
  73. WARN_ON_ONCE(!(current->flags & PF_EXITING));
  74. task_lock(task);
  75. ioc = task->io_context;
  76. task->io_context = NULL;
  77. task_unlock(task);
  78. if (atomic_dec_and_test(&ioc->nr_tasks))
  79. cfq_exit(ioc);
  80. put_io_context(ioc);
  81. }
  82. static struct io_context *create_task_io_context(struct task_struct *task,
  83. gfp_t gfp_flags, int node,
  84. bool take_ref)
  85. {
  86. struct io_context *ioc;
  87. ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
  88. node);
  89. if (unlikely(!ioc))
  90. return NULL;
  91. /* initialize */
  92. atomic_long_set(&ioc->refcount, 1);
  93. atomic_set(&ioc->nr_tasks, 1);
  94. spin_lock_init(&ioc->lock);
  95. INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
  96. INIT_HLIST_HEAD(&ioc->cic_list);
  97. /* try to install, somebody might already have beaten us to it */
  98. task_lock(task);
  99. if (!task->io_context && !(task->flags & PF_EXITING)) {
  100. task->io_context = ioc;
  101. } else {
  102. kmem_cache_free(iocontext_cachep, ioc);
  103. ioc = task->io_context;
  104. }
  105. if (ioc && take_ref)
  106. get_io_context(ioc);
  107. task_unlock(task);
  108. return ioc;
  109. }
  110. /**
  111. * current_io_context - get io_context of %current
  112. * @gfp_flags: allocation flags, used if allocation is necessary
  113. * @node: allocation node, used if allocation is necessary
  114. *
  115. * Return io_context of %current. If it doesn't exist, it is created with
  116. * @gfp_flags and @node. The returned io_context does NOT have its
  117. * reference count incremented. Because io_context is exited only on task
  118. * exit, %current can be sure that the returned io_context is valid and
  119. * alive as long as it is executing.
  120. */
  121. struct io_context *current_io_context(gfp_t gfp_flags, int node)
  122. {
  123. might_sleep_if(gfp_flags & __GFP_WAIT);
  124. if (current->io_context)
  125. return current->io_context;
  126. return create_task_io_context(current, gfp_flags, node, false);
  127. }
  128. EXPORT_SYMBOL(current_io_context);
  129. /**
  130. * get_task_io_context - get io_context of a task
  131. * @task: task of interest
  132. * @gfp_flags: allocation flags, used if allocation is necessary
  133. * @node: allocation node, used if allocation is necessary
  134. *
  135. * Return io_context of @task. If it doesn't exist, it is created with
  136. * @gfp_flags and @node. The returned io_context has its reference count
  137. * incremented.
  138. *
  139. * This function always goes through task_lock() and it's better to use
  140. * current_io_context() + get_io_context() for %current.
  141. */
  142. struct io_context *get_task_io_context(struct task_struct *task,
  143. gfp_t gfp_flags, int node)
  144. {
  145. struct io_context *ioc;
  146. might_sleep_if(gfp_flags & __GFP_WAIT);
  147. task_lock(task);
  148. ioc = task->io_context;
  149. if (likely(ioc)) {
  150. get_io_context(ioc);
  151. task_unlock(task);
  152. return ioc;
  153. }
  154. task_unlock(task);
  155. return create_task_io_context(task, gfp_flags, node, true);
  156. }
  157. EXPORT_SYMBOL(get_task_io_context);
  158. void ioc_set_changed(struct io_context *ioc, int which)
  159. {
  160. struct cfq_io_context *cic;
  161. struct hlist_node *n;
  162. hlist_for_each_entry(cic, n, &ioc->cic_list, cic_list)
  163. set_bit(which, &cic->changed);
  164. }
  165. /**
  166. * ioc_ioprio_changed - notify ioprio change
  167. * @ioc: io_context of interest
  168. * @ioprio: new ioprio
  169. *
  170. * @ioc's ioprio has changed to @ioprio. Set %CIC_IOPRIO_CHANGED for all
  171. * cic's. iosched is responsible for checking the bit and applying it on
  172. * request issue path.
  173. */
  174. void ioc_ioprio_changed(struct io_context *ioc, int ioprio)
  175. {
  176. unsigned long flags;
  177. spin_lock_irqsave(&ioc->lock, flags);
  178. ioc->ioprio = ioprio;
  179. ioc_set_changed(ioc, CIC_IOPRIO_CHANGED);
  180. spin_unlock_irqrestore(&ioc->lock, flags);
  181. }
  182. /**
  183. * ioc_cgroup_changed - notify cgroup change
  184. * @ioc: io_context of interest
  185. *
  186. * @ioc's cgroup has changed. Set %CIC_CGROUP_CHANGED for all cic's.
  187. * iosched is responsible for checking the bit and applying it on request
  188. * issue path.
  189. */
  190. void ioc_cgroup_changed(struct io_context *ioc)
  191. {
  192. unsigned long flags;
  193. spin_lock_irqsave(&ioc->lock, flags);
  194. ioc_set_changed(ioc, CIC_CGROUP_CHANGED);
  195. spin_unlock_irqrestore(&ioc->lock, flags);
  196. }
  197. static int __init blk_ioc_init(void)
  198. {
  199. iocontext_cachep = kmem_cache_create("blkdev_ioc",
  200. sizeof(struct io_context), 0, SLAB_PANIC, NULL);
  201. return 0;
  202. }
  203. subsys_initcall(blk_ioc_init);