blk-mq-sysfs.c 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. #include <linux/kernel.h>
  2. #include <linux/module.h>
  3. #include <linux/backing-dev.h>
  4. #include <linux/bio.h>
  5. #include <linux/blkdev.h>
  6. #include <linux/mm.h>
  7. #include <linux/init.h>
  8. #include <linux/slab.h>
  9. #include <linux/workqueue.h>
  10. #include <linux/smp.h>
  11. #include <linux/blk-mq.h>
  12. #include "blk-mq.h"
  13. #include "blk-mq-tag.h"
  14. static void blk_mq_sysfs_release(struct kobject *kobj)
  15. {
  16. }
  17. struct blk_mq_ctx_sysfs_entry {
  18. struct attribute attr;
  19. ssize_t (*show)(struct blk_mq_ctx *, char *);
  20. ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
  21. };
  22. struct blk_mq_hw_ctx_sysfs_entry {
  23. struct attribute attr;
  24. ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
  25. ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
  26. };
  27. static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
  28. char *page)
  29. {
  30. struct blk_mq_ctx_sysfs_entry *entry;
  31. struct blk_mq_ctx *ctx;
  32. struct request_queue *q;
  33. ssize_t res;
  34. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  35. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  36. q = ctx->queue;
  37. if (!entry->show)
  38. return -EIO;
  39. res = -ENOENT;
  40. mutex_lock(&q->sysfs_lock);
  41. if (!blk_queue_dying(q))
  42. res = entry->show(ctx, page);
  43. mutex_unlock(&q->sysfs_lock);
  44. return res;
  45. }
  46. static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
  47. const char *page, size_t length)
  48. {
  49. struct blk_mq_ctx_sysfs_entry *entry;
  50. struct blk_mq_ctx *ctx;
  51. struct request_queue *q;
  52. ssize_t res;
  53. entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
  54. ctx = container_of(kobj, struct blk_mq_ctx, kobj);
  55. q = ctx->queue;
  56. if (!entry->store)
  57. return -EIO;
  58. res = -ENOENT;
  59. mutex_lock(&q->sysfs_lock);
  60. if (!blk_queue_dying(q))
  61. res = entry->store(ctx, page, length);
  62. mutex_unlock(&q->sysfs_lock);
  63. return res;
  64. }
  65. static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
  66. struct attribute *attr, char *page)
  67. {
  68. struct blk_mq_hw_ctx_sysfs_entry *entry;
  69. struct blk_mq_hw_ctx *hctx;
  70. struct request_queue *q;
  71. ssize_t res;
  72. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  73. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  74. q = hctx->queue;
  75. if (!entry->show)
  76. return -EIO;
  77. res = -ENOENT;
  78. mutex_lock(&q->sysfs_lock);
  79. if (!blk_queue_dying(q))
  80. res = entry->show(hctx, page);
  81. mutex_unlock(&q->sysfs_lock);
  82. return res;
  83. }
  84. static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
  85. struct attribute *attr, const char *page,
  86. size_t length)
  87. {
  88. struct blk_mq_hw_ctx_sysfs_entry *entry;
  89. struct blk_mq_hw_ctx *hctx;
  90. struct request_queue *q;
  91. ssize_t res;
  92. entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
  93. hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
  94. q = hctx->queue;
  95. if (!entry->store)
  96. return -EIO;
  97. res = -ENOENT;
  98. mutex_lock(&q->sysfs_lock);
  99. if (!blk_queue_dying(q))
  100. res = entry->store(hctx, page, length);
  101. mutex_unlock(&q->sysfs_lock);
  102. return res;
  103. }
  104. static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page)
  105. {
  106. return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
  107. ctx->rq_dispatched[0]);
  108. }
  109. static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
  110. {
  111. return sprintf(page, "%lu\n", ctx->rq_merged);
  112. }
  113. static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
  114. {
  115. return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
  116. ctx->rq_completed[0]);
  117. }
  118. static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
  119. {
  120. char *start_page = page;
  121. struct request *rq;
  122. page += sprintf(page, "%s:\n", msg);
  123. list_for_each_entry(rq, list, queuelist)
  124. page += sprintf(page, "\t%p\n", rq);
  125. return page - start_page;
  126. }
  127. static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
  128. {
  129. ssize_t ret;
  130. spin_lock(&ctx->lock);
  131. ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
  132. spin_unlock(&ctx->lock);
  133. return ret;
  134. }
  135. static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
  136. char *page)
  137. {
  138. return sprintf(page, "%lu\n", hctx->queued);
  139. }
  140. static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page)
  141. {
  142. return sprintf(page, "%lu\n", hctx->run);
  143. }
  144. static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
  145. char *page)
  146. {
  147. char *start_page = page;
  148. int i;
  149. page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
  150. for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER; i++) {
  151. unsigned long d = 1U << (i - 1);
  152. page += sprintf(page, "%8lu\t%lu\n", d, hctx->dispatched[i]);
  153. }
  154. return page - start_page;
  155. }
  156. static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
  157. char *page)
  158. {
  159. ssize_t ret;
  160. spin_lock(&hctx->lock);
  161. ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
  162. spin_unlock(&hctx->lock);
  163. return ret;
  164. }
  165. static ssize_t blk_mq_hw_sysfs_ipi_show(struct blk_mq_hw_ctx *hctx, char *page)
  166. {
  167. ssize_t ret;
  168. spin_lock(&hctx->lock);
  169. ret = sprintf(page, "%u\n", !!(hctx->flags & BLK_MQ_F_SHOULD_IPI));
  170. spin_unlock(&hctx->lock);
  171. return ret;
  172. }
  173. static ssize_t blk_mq_hw_sysfs_ipi_store(struct blk_mq_hw_ctx *hctx,
  174. const char *page, size_t len)
  175. {
  176. struct blk_mq_ctx *ctx;
  177. unsigned long ret;
  178. unsigned int i;
  179. if (kstrtoul(page, 10, &ret)) {
  180. pr_err("blk-mq-sysfs: invalid input '%s'\n", page);
  181. return -EINVAL;
  182. }
  183. spin_lock(&hctx->lock);
  184. if (ret)
  185. hctx->flags |= BLK_MQ_F_SHOULD_IPI;
  186. else
  187. hctx->flags &= ~BLK_MQ_F_SHOULD_IPI;
  188. spin_unlock(&hctx->lock);
  189. hctx_for_each_ctx(hctx, ctx, i)
  190. ctx->ipi_redirect = !!ret;
  191. return len;
  192. }
  193. static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
  194. {
  195. return blk_mq_tag_sysfs_show(hctx->tags, page);
  196. }
  197. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
  198. .attr = {.name = "dispatched", .mode = S_IRUGO },
  199. .show = blk_mq_sysfs_dispatched_show,
  200. };
  201. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
  202. .attr = {.name = "merged", .mode = S_IRUGO },
  203. .show = blk_mq_sysfs_merged_show,
  204. };
  205. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
  206. .attr = {.name = "completed", .mode = S_IRUGO },
  207. .show = blk_mq_sysfs_completed_show,
  208. };
  209. static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
  210. .attr = {.name = "rq_list", .mode = S_IRUGO },
  211. .show = blk_mq_sysfs_rq_list_show,
  212. };
  213. static struct attribute *default_ctx_attrs[] = {
  214. &blk_mq_sysfs_dispatched.attr,
  215. &blk_mq_sysfs_merged.attr,
  216. &blk_mq_sysfs_completed.attr,
  217. &blk_mq_sysfs_rq_list.attr,
  218. NULL,
  219. };
  220. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = {
  221. .attr = {.name = "queued", .mode = S_IRUGO },
  222. .show = blk_mq_hw_sysfs_queued_show,
  223. };
  224. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = {
  225. .attr = {.name = "run", .mode = S_IRUGO },
  226. .show = blk_mq_hw_sysfs_run_show,
  227. };
  228. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
  229. .attr = {.name = "dispatched", .mode = S_IRUGO },
  230. .show = blk_mq_hw_sysfs_dispatched_show,
  231. };
  232. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
  233. .attr = {.name = "pending", .mode = S_IRUGO },
  234. .show = blk_mq_hw_sysfs_rq_list_show,
  235. };
  236. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_ipi = {
  237. .attr = {.name = "ipi_redirect", .mode = S_IRUGO | S_IWUSR},
  238. .show = blk_mq_hw_sysfs_ipi_show,
  239. .store = blk_mq_hw_sysfs_ipi_store,
  240. };
  241. static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
  242. .attr = {.name = "tags", .mode = S_IRUGO },
  243. .show = blk_mq_hw_sysfs_tags_show,
  244. };
  245. static struct attribute *default_hw_ctx_attrs[] = {
  246. &blk_mq_hw_sysfs_queued.attr,
  247. &blk_mq_hw_sysfs_run.attr,
  248. &blk_mq_hw_sysfs_dispatched.attr,
  249. &blk_mq_hw_sysfs_pending.attr,
  250. &blk_mq_hw_sysfs_ipi.attr,
  251. &blk_mq_hw_sysfs_tags.attr,
  252. NULL,
  253. };
  254. static const struct sysfs_ops blk_mq_sysfs_ops = {
  255. .show = blk_mq_sysfs_show,
  256. .store = blk_mq_sysfs_store,
  257. };
  258. static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
  259. .show = blk_mq_hw_sysfs_show,
  260. .store = blk_mq_hw_sysfs_store,
  261. };
  262. static struct kobj_type blk_mq_ktype = {
  263. .sysfs_ops = &blk_mq_sysfs_ops,
  264. .release = blk_mq_sysfs_release,
  265. };
  266. static struct kobj_type blk_mq_ctx_ktype = {
  267. .sysfs_ops = &blk_mq_sysfs_ops,
  268. .default_attrs = default_ctx_attrs,
  269. .release = blk_mq_sysfs_release,
  270. };
  271. static struct kobj_type blk_mq_hw_ktype = {
  272. .sysfs_ops = &blk_mq_hw_sysfs_ops,
  273. .default_attrs = default_hw_ctx_attrs,
  274. .release = blk_mq_sysfs_release,
  275. };
  276. void blk_mq_unregister_disk(struct gendisk *disk)
  277. {
  278. struct request_queue *q = disk->queue;
  279. kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
  280. kobject_del(&q->mq_kobj);
  281. kobject_put(&disk_to_dev(disk)->kobj);
  282. }
  283. int blk_mq_register_disk(struct gendisk *disk)
  284. {
  285. struct device *dev = disk_to_dev(disk);
  286. struct request_queue *q = disk->queue;
  287. struct blk_mq_hw_ctx *hctx;
  288. struct blk_mq_ctx *ctx;
  289. int ret, i, j;
  290. kobject_init(&q->mq_kobj, &blk_mq_ktype);
  291. ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
  292. if (ret < 0)
  293. return ret;
  294. kobject_uevent(&q->mq_kobj, KOBJ_ADD);
  295. queue_for_each_hw_ctx(q, hctx, i) {
  296. kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
  297. ret = kobject_add(&hctx->kobj, &q->mq_kobj, "%u", i);
  298. if (ret)
  299. break;
  300. if (!hctx->nr_ctx)
  301. continue;
  302. hctx_for_each_ctx(hctx, ctx, j) {
  303. kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
  304. ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
  305. if (ret)
  306. break;
  307. }
  308. }
  309. if (ret) {
  310. blk_mq_unregister_disk(disk);
  311. return ret;
  312. }
  313. return 0;
  314. }