blk-mq-tag.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/percpu_ida.h>
  4. #include <linux/blk-mq.h>
  5. #include "blk.h"
  6. #include "blk-mq.h"
  7. #include "blk-mq-tag.h"
  8. /*
  9. * Per tagged queue (tag address space) map
  10. */
  11. struct blk_mq_tags {
  12. unsigned int nr_tags;
  13. unsigned int nr_reserved_tags;
  14. unsigned int nr_batch_move;
  15. unsigned int nr_max_cache;
  16. struct percpu_ida free_tags;
  17. struct percpu_ida reserved_tags;
  18. };
  19. void blk_mq_wait_for_tags(struct blk_mq_tags *tags)
  20. {
  21. int tag = blk_mq_get_tag(tags, __GFP_WAIT, false);
  22. blk_mq_put_tag(tags, tag);
  23. }
  24. bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
  25. {
  26. return !tags ||
  27. percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids) != 0;
  28. }
  29. static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp)
  30. {
  31. int tag;
  32. tag = percpu_ida_alloc(&tags->free_tags, gfp);
  33. if (tag < 0)
  34. return BLK_MQ_TAG_FAIL;
  35. return tag + tags->nr_reserved_tags;
  36. }
  37. static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags,
  38. gfp_t gfp)
  39. {
  40. int tag;
  41. if (unlikely(!tags->nr_reserved_tags)) {
  42. WARN_ON_ONCE(1);
  43. return BLK_MQ_TAG_FAIL;
  44. }
  45. tag = percpu_ida_alloc(&tags->reserved_tags, gfp);
  46. if (tag < 0)
  47. return BLK_MQ_TAG_FAIL;
  48. return tag;
  49. }
  50. unsigned int blk_mq_get_tag(struct blk_mq_tags *tags, gfp_t gfp, bool reserved)
  51. {
  52. if (!reserved)
  53. return __blk_mq_get_tag(tags, gfp);
  54. return __blk_mq_get_reserved_tag(tags, gfp);
  55. }
  56. static void __blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
  57. {
  58. BUG_ON(tag >= tags->nr_tags);
  59. percpu_ida_free(&tags->free_tags, tag - tags->nr_reserved_tags);
  60. }
  61. static void __blk_mq_put_reserved_tag(struct blk_mq_tags *tags,
  62. unsigned int tag)
  63. {
  64. BUG_ON(tag >= tags->nr_reserved_tags);
  65. percpu_ida_free(&tags->reserved_tags, tag);
  66. }
  67. void blk_mq_put_tag(struct blk_mq_tags *tags, unsigned int tag)
  68. {
  69. if (tag >= tags->nr_reserved_tags)
  70. __blk_mq_put_tag(tags, tag);
  71. else
  72. __blk_mq_put_reserved_tag(tags, tag);
  73. }
  74. static int __blk_mq_tag_iter(unsigned id, void *data)
  75. {
  76. unsigned long *tag_map = data;
  77. __set_bit(id, tag_map);
  78. return 0;
  79. }
  80. void blk_mq_tag_busy_iter(struct blk_mq_tags *tags,
  81. void (*fn)(void *, unsigned long *), void *data)
  82. {
  83. unsigned long *tag_map;
  84. size_t map_size;
  85. map_size = ALIGN(tags->nr_tags, BITS_PER_LONG) / BITS_PER_LONG;
  86. tag_map = kzalloc(map_size * sizeof(unsigned long), GFP_ATOMIC);
  87. if (!tag_map)
  88. return;
  89. percpu_ida_for_each_free(&tags->free_tags, __blk_mq_tag_iter, tag_map);
  90. if (tags->nr_reserved_tags)
  91. percpu_ida_for_each_free(&tags->reserved_tags, __blk_mq_tag_iter,
  92. tag_map);
  93. fn(data, tag_map);
  94. kfree(tag_map);
  95. }
  96. struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
  97. unsigned int reserved_tags, int node)
  98. {
  99. unsigned int nr_tags, nr_cache;
  100. struct blk_mq_tags *tags;
  101. int ret;
  102. if (total_tags > BLK_MQ_TAG_MAX) {
  103. pr_err("blk-mq: tag depth too large\n");
  104. return NULL;
  105. }
  106. tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
  107. if (!tags)
  108. return NULL;
  109. nr_tags = total_tags - reserved_tags;
  110. nr_cache = nr_tags / num_possible_cpus();
  111. if (nr_cache < BLK_MQ_TAG_CACHE_MIN)
  112. nr_cache = BLK_MQ_TAG_CACHE_MIN;
  113. else if (nr_cache > BLK_MQ_TAG_CACHE_MAX)
  114. nr_cache = BLK_MQ_TAG_CACHE_MAX;
  115. tags->nr_tags = total_tags;
  116. tags->nr_reserved_tags = reserved_tags;
  117. tags->nr_max_cache = nr_cache;
  118. tags->nr_batch_move = max(1u, nr_cache / 2);
  119. ret = __percpu_ida_init(&tags->free_tags, tags->nr_tags -
  120. tags->nr_reserved_tags,
  121. tags->nr_max_cache,
  122. tags->nr_batch_move);
  123. if (ret)
  124. goto err_free_tags;
  125. if (reserved_tags) {
  126. /*
  127. * With max_cahe and batch set to 1, the allocator fallbacks to
  128. * no cached. It's fine reserved tags allocation is slow.
  129. */
  130. ret = __percpu_ida_init(&tags->reserved_tags, reserved_tags,
  131. 1, 1);
  132. if (ret)
  133. goto err_reserved_tags;
  134. }
  135. return tags;
  136. err_reserved_tags:
  137. percpu_ida_destroy(&tags->free_tags);
  138. err_free_tags:
  139. kfree(tags);
  140. return NULL;
  141. }
  142. void blk_mq_free_tags(struct blk_mq_tags *tags)
  143. {
  144. percpu_ida_destroy(&tags->free_tags);
  145. percpu_ida_destroy(&tags->reserved_tags);
  146. kfree(tags);
  147. }
  148. ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
  149. {
  150. char *orig_page = page;
  151. int cpu;
  152. if (!tags)
  153. return 0;
  154. page += sprintf(page, "nr_tags=%u, reserved_tags=%u, batch_move=%u,"
  155. " max_cache=%u\n", tags->nr_tags, tags->nr_reserved_tags,
  156. tags->nr_batch_move, tags->nr_max_cache);
  157. page += sprintf(page, "nr_free=%u, nr_reserved=%u\n",
  158. percpu_ida_free_tags(&tags->free_tags, nr_cpu_ids),
  159. percpu_ida_free_tags(&tags->reserved_tags, nr_cpu_ids));
  160. for_each_possible_cpu(cpu) {
  161. page += sprintf(page, " cpu%02u: nr_free=%u\n", cpu,
  162. percpu_ida_free_tags(&tags->free_tags, cpu));
  163. }
  164. return page - orig_page;
  165. }