blk-sysfs.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465
  1. /*
  2. * Functions related to sysfs handling
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include <linux/blktrace_api.h>
  9. #include "blk.h"
  10. struct queue_sysfs_entry {
  11. struct attribute attr;
  12. ssize_t (*show)(struct request_queue *, char *);
  13. ssize_t (*store)(struct request_queue *, const char *, size_t);
  14. };
  15. static ssize_t
  16. queue_var_show(unsigned int var, char *page)
  17. {
  18. return sprintf(page, "%d\n", var);
  19. }
  20. static ssize_t
  21. queue_var_store(unsigned long *var, const char *page, size_t count)
  22. {
  23. char *p = (char *) page;
  24. *var = simple_strtoul(p, &p, 10);
  25. return count;
  26. }
  27. static ssize_t queue_requests_show(struct request_queue *q, char *page)
  28. {
  29. return queue_var_show(q->nr_requests, (page));
  30. }
  31. static ssize_t
  32. queue_requests_store(struct request_queue *q, const char *page, size_t count)
  33. {
  34. struct request_list *rl = &q->rq;
  35. unsigned long nr;
  36. int ret = queue_var_store(&nr, page, count);
  37. if (nr < BLKDEV_MIN_RQ)
  38. nr = BLKDEV_MIN_RQ;
  39. spin_lock_irq(q->queue_lock);
  40. q->nr_requests = nr;
  41. blk_queue_congestion_threshold(q);
  42. if (rl->count[BLK_RW_SYNC] >= queue_congestion_on_threshold(q))
  43. blk_set_queue_congested(q, BLK_RW_SYNC);
  44. else if (rl->count[BLK_RW_SYNC] < queue_congestion_off_threshold(q))
  45. blk_clear_queue_congested(q, BLK_RW_SYNC);
  46. if (rl->count[BLK_RW_ASYNC] >= queue_congestion_on_threshold(q))
  47. blk_set_queue_congested(q, BLK_RW_ASYNC);
  48. else if (rl->count[BLK_RW_ASYNC] < queue_congestion_off_threshold(q))
  49. blk_clear_queue_congested(q, BLK_RW_ASYNC);
  50. if (rl->count[BLK_RW_SYNC] >= q->nr_requests) {
  51. blk_set_queue_full(q, BLK_RW_SYNC);
  52. } else if (rl->count[BLK_RW_SYNC]+1 <= q->nr_requests) {
  53. blk_clear_queue_full(q, BLK_RW_SYNC);
  54. wake_up(&rl->wait[BLK_RW_SYNC]);
  55. }
  56. if (rl->count[BLK_RW_ASYNC] >= q->nr_requests) {
  57. blk_set_queue_full(q, BLK_RW_ASYNC);
  58. } else if (rl->count[BLK_RW_ASYNC]+1 <= q->nr_requests) {
  59. blk_clear_queue_full(q, BLK_RW_ASYNC);
  60. wake_up(&rl->wait[BLK_RW_ASYNC]);
  61. }
  62. spin_unlock_irq(q->queue_lock);
  63. return ret;
  64. }
  65. static ssize_t queue_ra_show(struct request_queue *q, char *page)
  66. {
  67. int ra_kb = q->backing_dev_info.ra_pages << (PAGE_CACHE_SHIFT - 10);
  68. return queue_var_show(ra_kb, (page));
  69. }
  70. static ssize_t
  71. queue_ra_store(struct request_queue *q, const char *page, size_t count)
  72. {
  73. unsigned long ra_kb;
  74. ssize_t ret = queue_var_store(&ra_kb, page, count);
  75. q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
  76. return ret;
  77. }
  78. static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
  79. {
  80. int max_sectors_kb = queue_max_sectors(q) >> 1;
  81. return queue_var_show(max_sectors_kb, (page));
  82. }
  83. static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
  84. {
  85. return queue_var_show(queue_logical_block_size(q), page);
  86. }
  87. static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page)
  88. {
  89. return queue_var_show(queue_physical_block_size(q), page);
  90. }
  91. static ssize_t queue_io_min_show(struct request_queue *q, char *page)
  92. {
  93. return queue_var_show(queue_io_min(q), page);
  94. }
  95. static ssize_t queue_io_opt_show(struct request_queue *q, char *page)
  96. {
  97. return queue_var_show(queue_io_opt(q), page);
  98. }
  99. static ssize_t
  100. queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
  101. {
  102. unsigned long max_sectors_kb,
  103. max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1,
  104. page_kb = 1 << (PAGE_CACHE_SHIFT - 10);
  105. ssize_t ret = queue_var_store(&max_sectors_kb, page, count);
  106. if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
  107. return -EINVAL;
  108. spin_lock_irq(q->queue_lock);
  109. blk_queue_max_sectors(q, max_sectors_kb << 1);
  110. spin_unlock_irq(q->queue_lock);
  111. return ret;
  112. }
  113. static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page)
  114. {
  115. int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1;
  116. return queue_var_show(max_hw_sectors_kb, (page));
  117. }
  118. static ssize_t queue_nonrot_show(struct request_queue *q, char *page)
  119. {
  120. return queue_var_show(!blk_queue_nonrot(q), page);
  121. }
  122. static ssize_t queue_nonrot_store(struct request_queue *q, const char *page,
  123. size_t count)
  124. {
  125. unsigned long nm;
  126. ssize_t ret = queue_var_store(&nm, page, count);
  127. spin_lock_irq(q->queue_lock);
  128. if (nm)
  129. queue_flag_clear(QUEUE_FLAG_NONROT, q);
  130. else
  131. queue_flag_set(QUEUE_FLAG_NONROT, q);
  132. spin_unlock_irq(q->queue_lock);
  133. return ret;
  134. }
  135. static ssize_t queue_nomerges_show(struct request_queue *q, char *page)
  136. {
  137. return queue_var_show(blk_queue_nomerges(q), page);
  138. }
  139. static ssize_t queue_nomerges_store(struct request_queue *q, const char *page,
  140. size_t count)
  141. {
  142. unsigned long nm;
  143. ssize_t ret = queue_var_store(&nm, page, count);
  144. spin_lock_irq(q->queue_lock);
  145. if (nm)
  146. queue_flag_set(QUEUE_FLAG_NOMERGES, q);
  147. else
  148. queue_flag_clear(QUEUE_FLAG_NOMERGES, q);
  149. spin_unlock_irq(q->queue_lock);
  150. return ret;
  151. }
  152. static ssize_t queue_rq_affinity_show(struct request_queue *q, char *page)
  153. {
  154. unsigned int set = test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags);
  155. return queue_var_show(set != 0, page);
  156. }
  157. static ssize_t
  158. queue_rq_affinity_store(struct request_queue *q, const char *page, size_t count)
  159. {
  160. ssize_t ret = -EINVAL;
  161. #if defined(CONFIG_USE_GENERIC_SMP_HELPERS)
  162. unsigned long val;
  163. ret = queue_var_store(&val, page, count);
  164. spin_lock_irq(q->queue_lock);
  165. if (val)
  166. queue_flag_set(QUEUE_FLAG_SAME_COMP, q);
  167. else
  168. queue_flag_clear(QUEUE_FLAG_SAME_COMP, q);
  169. spin_unlock_irq(q->queue_lock);
  170. #endif
  171. return ret;
  172. }
  173. static ssize_t queue_iostats_show(struct request_queue *q, char *page)
  174. {
  175. return queue_var_show(blk_queue_io_stat(q), page);
  176. }
  177. static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
  178. size_t count)
  179. {
  180. unsigned long stats;
  181. ssize_t ret = queue_var_store(&stats, page, count);
  182. spin_lock_irq(q->queue_lock);
  183. if (stats)
  184. queue_flag_set(QUEUE_FLAG_IO_STAT, q);
  185. else
  186. queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
  187. spin_unlock_irq(q->queue_lock);
  188. return ret;
  189. }
  190. static struct queue_sysfs_entry queue_requests_entry = {
  191. .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR },
  192. .show = queue_requests_show,
  193. .store = queue_requests_store,
  194. };
  195. static struct queue_sysfs_entry queue_ra_entry = {
  196. .attr = {.name = "read_ahead_kb", .mode = S_IRUGO | S_IWUSR },
  197. .show = queue_ra_show,
  198. .store = queue_ra_store,
  199. };
  200. static struct queue_sysfs_entry queue_max_sectors_entry = {
  201. .attr = {.name = "max_sectors_kb", .mode = S_IRUGO | S_IWUSR },
  202. .show = queue_max_sectors_show,
  203. .store = queue_max_sectors_store,
  204. };
  205. static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
  206. .attr = {.name = "max_hw_sectors_kb", .mode = S_IRUGO },
  207. .show = queue_max_hw_sectors_show,
  208. };
  209. static struct queue_sysfs_entry queue_iosched_entry = {
  210. .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
  211. .show = elv_iosched_show,
  212. .store = elv_iosched_store,
  213. };
  214. static struct queue_sysfs_entry queue_hw_sector_size_entry = {
  215. .attr = {.name = "hw_sector_size", .mode = S_IRUGO },
  216. .show = queue_logical_block_size_show,
  217. };
  218. static struct queue_sysfs_entry queue_logical_block_size_entry = {
  219. .attr = {.name = "logical_block_size", .mode = S_IRUGO },
  220. .show = queue_logical_block_size_show,
  221. };
  222. static struct queue_sysfs_entry queue_physical_block_size_entry = {
  223. .attr = {.name = "physical_block_size", .mode = S_IRUGO },
  224. .show = queue_physical_block_size_show,
  225. };
  226. static struct queue_sysfs_entry queue_io_min_entry = {
  227. .attr = {.name = "minimum_io_size", .mode = S_IRUGO },
  228. .show = queue_io_min_show,
  229. };
  230. static struct queue_sysfs_entry queue_io_opt_entry = {
  231. .attr = {.name = "optimal_io_size", .mode = S_IRUGO },
  232. .show = queue_io_opt_show,
  233. };
  234. static struct queue_sysfs_entry queue_nonrot_entry = {
  235. .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR },
  236. .show = queue_nonrot_show,
  237. .store = queue_nonrot_store,
  238. };
  239. static struct queue_sysfs_entry queue_nomerges_entry = {
  240. .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR },
  241. .show = queue_nomerges_show,
  242. .store = queue_nomerges_store,
  243. };
  244. static struct queue_sysfs_entry queue_rq_affinity_entry = {
  245. .attr = {.name = "rq_affinity", .mode = S_IRUGO | S_IWUSR },
  246. .show = queue_rq_affinity_show,
  247. .store = queue_rq_affinity_store,
  248. };
  249. static struct queue_sysfs_entry queue_iostats_entry = {
  250. .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR },
  251. .show = queue_iostats_show,
  252. .store = queue_iostats_store,
  253. };
  254. static struct attribute *default_attrs[] = {
  255. &queue_requests_entry.attr,
  256. &queue_ra_entry.attr,
  257. &queue_max_hw_sectors_entry.attr,
  258. &queue_max_sectors_entry.attr,
  259. &queue_iosched_entry.attr,
  260. &queue_hw_sector_size_entry.attr,
  261. &queue_logical_block_size_entry.attr,
  262. &queue_physical_block_size_entry.attr,
  263. &queue_io_min_entry.attr,
  264. &queue_io_opt_entry.attr,
  265. &queue_nonrot_entry.attr,
  266. &queue_nomerges_entry.attr,
  267. &queue_rq_affinity_entry.attr,
  268. &queue_iostats_entry.attr,
  269. NULL,
  270. };
  271. #define to_queue(atr) container_of((atr), struct queue_sysfs_entry, attr)
  272. static ssize_t
  273. queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  274. {
  275. struct queue_sysfs_entry *entry = to_queue(attr);
  276. struct request_queue *q =
  277. container_of(kobj, struct request_queue, kobj);
  278. ssize_t res;
  279. if (!entry->show)
  280. return -EIO;
  281. mutex_lock(&q->sysfs_lock);
  282. if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
  283. mutex_unlock(&q->sysfs_lock);
  284. return -ENOENT;
  285. }
  286. res = entry->show(q, page);
  287. mutex_unlock(&q->sysfs_lock);
  288. return res;
  289. }
  290. static ssize_t
  291. queue_attr_store(struct kobject *kobj, struct attribute *attr,
  292. const char *page, size_t length)
  293. {
  294. struct queue_sysfs_entry *entry = to_queue(attr);
  295. struct request_queue *q;
  296. ssize_t res;
  297. if (!entry->store)
  298. return -EIO;
  299. q = container_of(kobj, struct request_queue, kobj);
  300. mutex_lock(&q->sysfs_lock);
  301. if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
  302. mutex_unlock(&q->sysfs_lock);
  303. return -ENOENT;
  304. }
  305. res = entry->store(q, page, length);
  306. mutex_unlock(&q->sysfs_lock);
  307. return res;
  308. }
  309. /**
  310. * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed
  311. * @kobj: the kobj belonging of the request queue to be released
  312. *
  313. * Description:
  314. * blk_cleanup_queue is the pair to blk_init_queue() or
  315. * blk_queue_make_request(). It should be called when a request queue is
  316. * being released; typically when a block device is being de-registered.
  317. * Currently, its primary task it to free all the &struct request
  318. * structures that were allocated to the queue and the queue itself.
  319. *
  320. * Caveat:
  321. * Hopefully the low level driver will have finished any
  322. * outstanding requests first...
  323. **/
  324. static void blk_release_queue(struct kobject *kobj)
  325. {
  326. struct request_queue *q =
  327. container_of(kobj, struct request_queue, kobj);
  328. struct request_list *rl = &q->rq;
  329. blk_sync_queue(q);
  330. if (rl->rq_pool)
  331. mempool_destroy(rl->rq_pool);
  332. if (q->queue_tags)
  333. __blk_queue_free_tags(q);
  334. blk_trace_shutdown(q);
  335. bdi_destroy(&q->backing_dev_info);
  336. kmem_cache_free(blk_requestq_cachep, q);
  337. }
  338. static struct sysfs_ops queue_sysfs_ops = {
  339. .show = queue_attr_show,
  340. .store = queue_attr_store,
  341. };
  342. struct kobj_type blk_queue_ktype = {
  343. .sysfs_ops = &queue_sysfs_ops,
  344. .default_attrs = default_attrs,
  345. .release = blk_release_queue,
  346. };
  347. int blk_register_queue(struct gendisk *disk)
  348. {
  349. int ret;
  350. struct request_queue *q = disk->queue;
  351. if (WARN_ON(!q))
  352. return -ENXIO;
  353. ret = kobject_add(&q->kobj, kobject_get(&disk_to_dev(disk)->kobj),
  354. "%s", "queue");
  355. if (ret < 0)
  356. return ret;
  357. kobject_uevent(&q->kobj, KOBJ_ADD);
  358. if (!q->request_fn)
  359. return 0;
  360. ret = elv_register_queue(q);
  361. if (ret) {
  362. kobject_uevent(&q->kobj, KOBJ_REMOVE);
  363. kobject_del(&q->kobj);
  364. return ret;
  365. }
  366. return 0;
  367. }
  368. void blk_unregister_queue(struct gendisk *disk)
  369. {
  370. struct request_queue *q = disk->queue;
  371. if (WARN_ON(!q))
  372. return;
  373. if (q->request_fn) {
  374. elv_unregister_queue(q);
  375. kobject_uevent(&q->kobj, KOBJ_REMOVE);
  376. kobject_del(&q->kobj);
  377. kobject_put(&disk_to_dev(disk)->kobj);
  378. }
  379. }