blk-ioc.c 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. /*
  2. * Functions related to io context handling
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/bio.h>
  8. #include <linux/blkdev.h>
  9. #include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
  10. #include <linux/slab.h>
  11. #include "blk.h"
  12. /*
  13. * For io context allocations
  14. */
  15. static struct kmem_cache *iocontext_cachep;
  16. static void cfq_dtor(struct io_context *ioc)
  17. {
  18. if (!hlist_empty(&ioc->cic_list)) {
  19. struct cfq_io_context *cic;
  20. cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
  21. cic_list);
  22. cic->dtor(ioc);
  23. }
  24. }
  25. /**
  26. * put_io_context - put a reference of io_context
  27. * @ioc: io_context to put
  28. *
  29. * Decrement reference count of @ioc and release it if the count reaches
  30. * zero.
  31. */
  32. void put_io_context(struct io_context *ioc)
  33. {
  34. if (ioc == NULL)
  35. return;
  36. BUG_ON(atomic_long_read(&ioc->refcount) <= 0);
  37. if (!atomic_long_dec_and_test(&ioc->refcount))
  38. return;
  39. rcu_read_lock();
  40. cfq_dtor(ioc);
  41. rcu_read_unlock();
  42. kmem_cache_free(iocontext_cachep, ioc);
  43. }
  44. EXPORT_SYMBOL(put_io_context);
  45. static void cfq_exit(struct io_context *ioc)
  46. {
  47. rcu_read_lock();
  48. if (!hlist_empty(&ioc->cic_list)) {
  49. struct cfq_io_context *cic;
  50. cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
  51. cic_list);
  52. cic->exit(ioc);
  53. }
  54. rcu_read_unlock();
  55. }
  56. /* Called by the exiting task */
  57. void exit_io_context(struct task_struct *task)
  58. {
  59. struct io_context *ioc;
  60. task_lock(task);
  61. ioc = task->io_context;
  62. task->io_context = NULL;
  63. task_unlock(task);
  64. if (atomic_dec_and_test(&ioc->nr_tasks))
  65. cfq_exit(ioc);
  66. put_io_context(ioc);
  67. }
  68. struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
  69. {
  70. struct io_context *ioc;
  71. ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
  72. node);
  73. if (unlikely(!ioc))
  74. return NULL;
  75. /* initialize */
  76. atomic_long_set(&ioc->refcount, 1);
  77. atomic_set(&ioc->nr_tasks, 1);
  78. spin_lock_init(&ioc->lock);
  79. INIT_RADIX_TREE(&ioc->radix_root, GFP_ATOMIC | __GFP_HIGH);
  80. INIT_HLIST_HEAD(&ioc->cic_list);
  81. return ioc;
  82. }
  83. /**
  84. * current_io_context - get io_context of %current
  85. * @gfp_flags: allocation flags, used if allocation is necessary
  86. * @node: allocation node, used if allocation is necessary
  87. *
  88. * Return io_context of %current. If it doesn't exist, it is created with
  89. * @gfp_flags and @node. The returned io_context does NOT have its
  90. * reference count incremented. Because io_context is exited only on task
  91. * exit, %current can be sure that the returned io_context is valid and
  92. * alive as long as it is executing.
  93. */
  94. struct io_context *current_io_context(gfp_t gfp_flags, int node)
  95. {
  96. struct task_struct *tsk = current;
  97. struct io_context *ret;
  98. ret = tsk->io_context;
  99. if (likely(ret))
  100. return ret;
  101. ret = alloc_io_context(gfp_flags, node);
  102. if (ret) {
  103. /* make sure set_task_ioprio() sees the settings above */
  104. smp_wmb();
  105. tsk->io_context = ret;
  106. }
  107. return ret;
  108. }
  109. /*
  110. * If the current task has no IO context then create one and initialise it.
  111. * If it does have a context, take a ref on it.
  112. *
  113. * This is always called in the context of the task which submitted the I/O.
  114. */
  115. struct io_context *get_io_context(gfp_t gfp_flags, int node)
  116. {
  117. struct io_context *ioc = NULL;
  118. /*
  119. * Check for unlikely race with exiting task. ioc ref count is
  120. * zero when ioc is being detached.
  121. */
  122. do {
  123. ioc = current_io_context(gfp_flags, node);
  124. if (unlikely(!ioc))
  125. break;
  126. } while (!atomic_long_inc_not_zero(&ioc->refcount));
  127. return ioc;
  128. }
  129. EXPORT_SYMBOL(get_io_context);
  130. static int __init blk_ioc_init(void)
  131. {
  132. iocontext_cachep = kmem_cache_create("blkdev_ioc",
  133. sizeof(struct io_context), 0, SLAB_PANIC, NULL);
  134. return 0;
  135. }
  136. subsys_initcall(blk_ioc_init);