elevator.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017
  1. /*
  2. * Block device elevator/IO-scheduler.
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. *
  6. * 30042000 Jens Axboe <axboe@kernel.dk> :
  7. *
  8. * Split the elevator a bit so that it is possible to choose a different
  9. * one or even write a new "plug in". There are three pieces:
  10. * - elevator_fn, inserts a new request in the queue list
  11. * - elevator_merge_fn, decides whether a new buffer can be merged with
  12. * an existing request
  13. * - elevator_dequeue_fn, called when a request is taken off the active list
  14. *
  15. * 20082000 Dave Jones <davej@suse.de> :
  16. * Removed tests for max-bomb-segments, which was breaking elvtune
  17. * when run without -bN
  18. *
  19. * Jens:
  20. * - Rework again to work with bio instead of buffer_heads
  21. * - loose bi_dev comparisons, partition handling is right now
  22. * - completely modularize elevator setup and teardown
  23. *
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/fs.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/elevator.h>
  29. #include <linux/bio.h>
  30. #include <linux/module.h>
  31. #include <linux/slab.h>
  32. #include <linux/init.h>
  33. #include <linux/compiler.h>
  34. #include <linux/blktrace_api.h>
  35. #include <linux/hash.h>
  36. #include <linux/uaccess.h>
  37. #include <trace/events/block.h>
  38. #include "blk.h"
  39. #include "blk-cgroup.h"
  40. static DEFINE_SPINLOCK(elv_list_lock);
  41. static LIST_HEAD(elv_list);
  42. /*
  43. * Merge hash stuff.
  44. */
  45. #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
  46. /*
  47. * Query io scheduler to see if the current process issuing bio may be
  48. * merged with rq.
  49. */
  50. static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  51. {
  52. struct request_queue *q = rq->q;
  53. struct elevator_queue *e = q->elevator;
  54. if (e->type->ops.elevator_allow_merge_fn)
  55. return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
  56. return 1;
  57. }
  58. /*
  59. * can we safely merge with this request?
  60. */
  61. bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
  62. {
  63. if (!blk_rq_merge_ok(rq, bio))
  64. return 0;
  65. if (!elv_iosched_allow_merge(rq, bio))
  66. return 0;
  67. return 1;
  68. }
  69. EXPORT_SYMBOL(elv_rq_merge_ok);
  70. static struct elevator_type *elevator_find(const char *name)
  71. {
  72. struct elevator_type *e;
  73. list_for_each_entry(e, &elv_list, list) {
  74. if (!strcmp(e->elevator_name, name))
  75. return e;
  76. }
  77. return NULL;
  78. }
  79. static void elevator_put(struct elevator_type *e)
  80. {
  81. module_put(e->elevator_owner);
  82. }
  83. static struct elevator_type *elevator_get(const char *name)
  84. {
  85. struct elevator_type *e;
  86. spin_lock(&elv_list_lock);
  87. e = elevator_find(name);
  88. if (!e) {
  89. spin_unlock(&elv_list_lock);
  90. request_module("%s-iosched", name);
  91. spin_lock(&elv_list_lock);
  92. e = elevator_find(name);
  93. }
  94. if (e && !try_module_get(e->elevator_owner))
  95. e = NULL;
  96. spin_unlock(&elv_list_lock);
  97. return e;
  98. }
  99. static char chosen_elevator[ELV_NAME_MAX];
  100. static int __init elevator_setup(char *str)
  101. {
  102. /*
  103. * Be backwards-compatible with previous kernels, so users
  104. * won't get the wrong elevator.
  105. */
  106. strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
  107. return 1;
  108. }
  109. __setup("elevator=", elevator_setup);
  110. static struct kobj_type elv_ktype;
  111. static struct elevator_queue *elevator_alloc(struct request_queue *q,
  112. struct elevator_type *e)
  113. {
  114. struct elevator_queue *eq;
  115. eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
  116. if (unlikely(!eq))
  117. goto err;
  118. eq->type = e;
  119. kobject_init(&eq->kobj, &elv_ktype);
  120. mutex_init(&eq->sysfs_lock);
  121. hash_init(eq->hash);
  122. return eq;
  123. err:
  124. kfree(eq);
  125. elevator_put(e);
  126. return NULL;
  127. }
  128. static void elevator_release(struct kobject *kobj)
  129. {
  130. struct elevator_queue *e;
  131. e = container_of(kobj, struct elevator_queue, kobj);
  132. elevator_put(e->type);
  133. kfree(e);
  134. }
  135. int elevator_init(struct request_queue *q, char *name)
  136. {
  137. struct elevator_type *e = NULL;
  138. int err;
  139. if (unlikely(q->elevator))
  140. return 0;
  141. INIT_LIST_HEAD(&q->queue_head);
  142. q->last_merge = NULL;
  143. q->end_sector = 0;
  144. q->boundary_rq = NULL;
  145. if (name) {
  146. e = elevator_get(name);
  147. if (!e)
  148. return -EINVAL;
  149. }
  150. if (!e && *chosen_elevator) {
  151. e = elevator_get(chosen_elevator);
  152. if (!e)
  153. printk(KERN_ERR "I/O scheduler %s not found\n",
  154. chosen_elevator);
  155. }
  156. if (!e) {
  157. e = elevator_get(CONFIG_DEFAULT_IOSCHED);
  158. if (!e) {
  159. printk(KERN_ERR
  160. "Default I/O scheduler not found. " \
  161. "Using noop.\n");
  162. e = elevator_get("noop");
  163. }
  164. }
  165. q->elevator = elevator_alloc(q, e);
  166. if (!q->elevator)
  167. return -ENOMEM;
  168. err = e->ops.elevator_init_fn(q);
  169. if (err) {
  170. kobject_put(&q->elevator->kobj);
  171. return err;
  172. }
  173. return 0;
  174. }
  175. EXPORT_SYMBOL(elevator_init);
  176. void elevator_exit(struct elevator_queue *e)
  177. {
  178. mutex_lock(&e->sysfs_lock);
  179. if (e->type->ops.elevator_exit_fn)
  180. e->type->ops.elevator_exit_fn(e);
  181. mutex_unlock(&e->sysfs_lock);
  182. kobject_put(&e->kobj);
  183. }
  184. EXPORT_SYMBOL(elevator_exit);
  185. static inline void __elv_rqhash_del(struct request *rq)
  186. {
  187. hash_del(&rq->hash);
  188. }
  189. static void elv_rqhash_del(struct request_queue *q, struct request *rq)
  190. {
  191. if (ELV_ON_HASH(rq))
  192. __elv_rqhash_del(rq);
  193. }
  194. static void elv_rqhash_add(struct request_queue *q, struct request *rq)
  195. {
  196. struct elevator_queue *e = q->elevator;
  197. BUG_ON(ELV_ON_HASH(rq));
  198. hash_add(e->hash, &rq->hash, rq_hash_key(rq));
  199. }
  200. static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
  201. {
  202. __elv_rqhash_del(rq);
  203. elv_rqhash_add(q, rq);
  204. }
  205. static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  206. {
  207. struct elevator_queue *e = q->elevator;
  208. struct hlist_node *entry, *next;
  209. struct request *rq;
  210. hash_for_each_possible_safe(e->hash, rq, entry, next, hash, offset) {
  211. BUG_ON(!ELV_ON_HASH(rq));
  212. if (unlikely(!rq_mergeable(rq))) {
  213. __elv_rqhash_del(rq);
  214. continue;
  215. }
  216. if (rq_hash_key(rq) == offset)
  217. return rq;
  218. }
  219. return NULL;
  220. }
  221. /*
  222. * RB-tree support functions for inserting/lookup/removal of requests
  223. * in a sorted RB tree.
  224. */
  225. void elv_rb_add(struct rb_root *root, struct request *rq)
  226. {
  227. struct rb_node **p = &root->rb_node;
  228. struct rb_node *parent = NULL;
  229. struct request *__rq;
  230. while (*p) {
  231. parent = *p;
  232. __rq = rb_entry(parent, struct request, rb_node);
  233. if (blk_rq_pos(rq) < blk_rq_pos(__rq))
  234. p = &(*p)->rb_left;
  235. else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
  236. p = &(*p)->rb_right;
  237. }
  238. rb_link_node(&rq->rb_node, parent, p);
  239. rb_insert_color(&rq->rb_node, root);
  240. }
  241. EXPORT_SYMBOL(elv_rb_add);
  242. void elv_rb_del(struct rb_root *root, struct request *rq)
  243. {
  244. BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  245. rb_erase(&rq->rb_node, root);
  246. RB_CLEAR_NODE(&rq->rb_node);
  247. }
  248. EXPORT_SYMBOL(elv_rb_del);
  249. struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  250. {
  251. struct rb_node *n = root->rb_node;
  252. struct request *rq;
  253. while (n) {
  254. rq = rb_entry(n, struct request, rb_node);
  255. if (sector < blk_rq_pos(rq))
  256. n = n->rb_left;
  257. else if (sector > blk_rq_pos(rq))
  258. n = n->rb_right;
  259. else
  260. return rq;
  261. }
  262. return NULL;
  263. }
  264. EXPORT_SYMBOL(elv_rb_find);
  265. /*
  266. * Insert rq into dispatch queue of q. Queue lock must be held on
  267. * entry. rq is sort instead into the dispatch queue. To be used by
  268. * specific elevators.
  269. */
  270. void elv_dispatch_sort(struct request_queue *q, struct request *rq)
  271. {
  272. sector_t boundary;
  273. struct list_head *entry;
  274. int stop_flags;
  275. if (q->last_merge == rq)
  276. q->last_merge = NULL;
  277. elv_rqhash_del(q, rq);
  278. q->nr_sorted--;
  279. boundary = q->end_sector;
  280. stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
  281. list_for_each_prev(entry, &q->queue_head) {
  282. struct request *pos = list_entry_rq(entry);
  283. if ((rq->cmd_flags & REQ_DISCARD) !=
  284. (pos->cmd_flags & REQ_DISCARD))
  285. break;
  286. if (rq_data_dir(rq) != rq_data_dir(pos))
  287. break;
  288. if (pos->cmd_flags & stop_flags)
  289. break;
  290. if (blk_rq_pos(rq) >= boundary) {
  291. if (blk_rq_pos(pos) < boundary)
  292. continue;
  293. } else {
  294. if (blk_rq_pos(pos) >= boundary)
  295. break;
  296. }
  297. if (blk_rq_pos(rq) >= blk_rq_pos(pos))
  298. break;
  299. }
  300. list_add(&rq->queuelist, entry);
  301. }
  302. EXPORT_SYMBOL(elv_dispatch_sort);
  303. /*
  304. * Insert rq into dispatch queue of q. Queue lock must be held on
  305. * entry. rq is added to the back of the dispatch queue. To be used by
  306. * specific elevators.
  307. */
  308. void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
  309. {
  310. if (q->last_merge == rq)
  311. q->last_merge = NULL;
  312. elv_rqhash_del(q, rq);
  313. q->nr_sorted--;
  314. q->end_sector = rq_end_sector(rq);
  315. q->boundary_rq = rq;
  316. list_add_tail(&rq->queuelist, &q->queue_head);
  317. }
  318. EXPORT_SYMBOL(elv_dispatch_add_tail);
  319. int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
  320. {
  321. struct elevator_queue *e = q->elevator;
  322. struct request *__rq;
  323. int ret;
  324. /*
  325. * Levels of merges:
  326. * nomerges: No merges at all attempted
  327. * noxmerges: Only simple one-hit cache try
  328. * merges: All merge tries attempted
  329. */
  330. if (blk_queue_nomerges(q))
  331. return ELEVATOR_NO_MERGE;
  332. /*
  333. * First try one-hit cache.
  334. */
  335. if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
  336. ret = blk_try_merge(q->last_merge, bio);
  337. if (ret != ELEVATOR_NO_MERGE) {
  338. *req = q->last_merge;
  339. return ret;
  340. }
  341. }
  342. if (blk_queue_noxmerges(q))
  343. return ELEVATOR_NO_MERGE;
  344. /*
  345. * See if our hash lookup can find a potential backmerge.
  346. */
  347. __rq = elv_rqhash_find(q, bio->bi_sector);
  348. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  349. *req = __rq;
  350. return ELEVATOR_BACK_MERGE;
  351. }
  352. if (e->type->ops.elevator_merge_fn)
  353. return e->type->ops.elevator_merge_fn(q, req, bio);
  354. return ELEVATOR_NO_MERGE;
  355. }
  356. /*
  357. * Attempt to do an insertion back merge. Only check for the case where
  358. * we can append 'rq' to an existing request, so we can throw 'rq' away
  359. * afterwards.
  360. *
  361. * Returns true if we merged, false otherwise
  362. */
  363. static bool elv_attempt_insert_merge(struct request_queue *q,
  364. struct request *rq)
  365. {
  366. struct request *__rq;
  367. bool ret;
  368. if (blk_queue_nomerges(q))
  369. return false;
  370. /*
  371. * First try one-hit cache.
  372. */
  373. if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
  374. return true;
  375. if (blk_queue_noxmerges(q))
  376. return false;
  377. ret = false;
  378. /*
  379. * See if our hash lookup can find a potential backmerge.
  380. */
  381. while (1) {
  382. __rq = elv_rqhash_find(q, blk_rq_pos(rq));
  383. if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
  384. break;
  385. /* The merged request could be merged with others, try again */
  386. ret = true;
  387. rq = __rq;
  388. }
  389. return ret;
  390. }
  391. void elv_merged_request(struct request_queue *q, struct request *rq, int type)
  392. {
  393. struct elevator_queue *e = q->elevator;
  394. if (e->type->ops.elevator_merged_fn)
  395. e->type->ops.elevator_merged_fn(q, rq, type);
  396. if (type == ELEVATOR_BACK_MERGE)
  397. elv_rqhash_reposition(q, rq);
  398. q->last_merge = rq;
  399. }
  400. void elv_merge_requests(struct request_queue *q, struct request *rq,
  401. struct request *next)
  402. {
  403. struct elevator_queue *e = q->elevator;
  404. const int next_sorted = next->cmd_flags & REQ_SORTED;
  405. if (next_sorted && e->type->ops.elevator_merge_req_fn)
  406. e->type->ops.elevator_merge_req_fn(q, rq, next);
  407. elv_rqhash_reposition(q, rq);
  408. if (next_sorted) {
  409. elv_rqhash_del(q, next);
  410. q->nr_sorted--;
  411. }
  412. q->last_merge = rq;
  413. }
  414. void elv_bio_merged(struct request_queue *q, struct request *rq,
  415. struct bio *bio)
  416. {
  417. struct elevator_queue *e = q->elevator;
  418. if (e->type->ops.elevator_bio_merged_fn)
  419. e->type->ops.elevator_bio_merged_fn(q, rq, bio);
  420. }
  421. void elv_requeue_request(struct request_queue *q, struct request *rq)
  422. {
  423. /*
  424. * it already went through dequeue, we need to decrement the
  425. * in_flight count again
  426. */
  427. if (blk_account_rq(rq)) {
  428. q->in_flight[rq_is_sync(rq)]--;
  429. if (rq->cmd_flags & REQ_SORTED)
  430. elv_deactivate_rq(q, rq);
  431. }
  432. rq->cmd_flags &= ~REQ_STARTED;
  433. __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
  434. }
  435. void elv_drain_elevator(struct request_queue *q)
  436. {
  437. static int printed;
  438. lockdep_assert_held(q->queue_lock);
  439. while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
  440. ;
  441. if (q->nr_sorted && printed++ < 10) {
  442. printk(KERN_ERR "%s: forced dispatching is broken "
  443. "(nr_sorted=%u), please report this\n",
  444. q->elevator->type->elevator_name, q->nr_sorted);
  445. }
  446. }
  447. void __elv_add_request(struct request_queue *q, struct request *rq, int where)
  448. {
  449. trace_block_rq_insert(q, rq);
  450. rq->q = q;
  451. if (rq->cmd_flags & REQ_SOFTBARRIER) {
  452. /* barriers are scheduling boundary, update end_sector */
  453. if (rq->cmd_type == REQ_TYPE_FS) {
  454. q->end_sector = rq_end_sector(rq);
  455. q->boundary_rq = rq;
  456. }
  457. } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
  458. (where == ELEVATOR_INSERT_SORT ||
  459. where == ELEVATOR_INSERT_SORT_MERGE))
  460. where = ELEVATOR_INSERT_BACK;
  461. switch (where) {
  462. case ELEVATOR_INSERT_REQUEUE:
  463. case ELEVATOR_INSERT_FRONT:
  464. rq->cmd_flags |= REQ_SOFTBARRIER;
  465. list_add(&rq->queuelist, &q->queue_head);
  466. break;
  467. case ELEVATOR_INSERT_BACK:
  468. rq->cmd_flags |= REQ_SOFTBARRIER;
  469. elv_drain_elevator(q);
  470. list_add_tail(&rq->queuelist, &q->queue_head);
  471. /*
  472. * We kick the queue here for the following reasons.
  473. * - The elevator might have returned NULL previously
  474. * to delay requests and returned them now. As the
  475. * queue wasn't empty before this request, ll_rw_blk
  476. * won't run the queue on return, resulting in hang.
  477. * - Usually, back inserted requests won't be merged
  478. * with anything. There's no point in delaying queue
  479. * processing.
  480. */
  481. __blk_run_queue(q);
  482. break;
  483. case ELEVATOR_INSERT_SORT_MERGE:
  484. /*
  485. * If we succeed in merging this request with one in the
  486. * queue already, we are done - rq has now been freed,
  487. * so no need to do anything further.
  488. */
  489. if (elv_attempt_insert_merge(q, rq))
  490. break;
  491. case ELEVATOR_INSERT_SORT:
  492. BUG_ON(rq->cmd_type != REQ_TYPE_FS);
  493. rq->cmd_flags |= REQ_SORTED;
  494. q->nr_sorted++;
  495. if (rq_mergeable(rq)) {
  496. elv_rqhash_add(q, rq);
  497. if (!q->last_merge)
  498. q->last_merge = rq;
  499. }
  500. /*
  501. * Some ioscheds (cfq) run q->request_fn directly, so
  502. * rq cannot be accessed after calling
  503. * elevator_add_req_fn.
  504. */
  505. q->elevator->type->ops.elevator_add_req_fn(q, rq);
  506. break;
  507. case ELEVATOR_INSERT_FLUSH:
  508. rq->cmd_flags |= REQ_SOFTBARRIER;
  509. blk_insert_flush(rq);
  510. break;
  511. default:
  512. printk(KERN_ERR "%s: bad insertion point %d\n",
  513. __func__, where);
  514. BUG();
  515. }
  516. }
  517. EXPORT_SYMBOL(__elv_add_request);
  518. void elv_add_request(struct request_queue *q, struct request *rq, int where)
  519. {
  520. unsigned long flags;
  521. spin_lock_irqsave(q->queue_lock, flags);
  522. __elv_add_request(q, rq, where);
  523. spin_unlock_irqrestore(q->queue_lock, flags);
  524. }
  525. EXPORT_SYMBOL(elv_add_request);
  526. struct request *elv_latter_request(struct request_queue *q, struct request *rq)
  527. {
  528. struct elevator_queue *e = q->elevator;
  529. if (e->type->ops.elevator_latter_req_fn)
  530. return e->type->ops.elevator_latter_req_fn(q, rq);
  531. return NULL;
  532. }
  533. struct request *elv_former_request(struct request_queue *q, struct request *rq)
  534. {
  535. struct elevator_queue *e = q->elevator;
  536. if (e->type->ops.elevator_former_req_fn)
  537. return e->type->ops.elevator_former_req_fn(q, rq);
  538. return NULL;
  539. }
  540. int elv_set_request(struct request_queue *q, struct request *rq,
  541. struct bio *bio, gfp_t gfp_mask)
  542. {
  543. struct elevator_queue *e = q->elevator;
  544. if (e->type->ops.elevator_set_req_fn)
  545. return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
  546. return 0;
  547. }
  548. void elv_put_request(struct request_queue *q, struct request *rq)
  549. {
  550. struct elevator_queue *e = q->elevator;
  551. if (e->type->ops.elevator_put_req_fn)
  552. e->type->ops.elevator_put_req_fn(rq);
  553. }
  554. int elv_may_queue(struct request_queue *q, int rw)
  555. {
  556. struct elevator_queue *e = q->elevator;
  557. if (e->type->ops.elevator_may_queue_fn)
  558. return e->type->ops.elevator_may_queue_fn(q, rw);
  559. return ELV_MQUEUE_MAY;
  560. }
  561. void elv_abort_queue(struct request_queue *q)
  562. {
  563. struct request *rq;
  564. blk_abort_flushes(q);
  565. while (!list_empty(&q->queue_head)) {
  566. rq = list_entry_rq(q->queue_head.next);
  567. rq->cmd_flags |= REQ_QUIET;
  568. trace_block_rq_abort(q, rq);
  569. /*
  570. * Mark this request as started so we don't trigger
  571. * any debug logic in the end I/O path.
  572. */
  573. blk_start_request(rq);
  574. __blk_end_request_all(rq, -EIO);
  575. }
  576. }
  577. EXPORT_SYMBOL(elv_abort_queue);
  578. void elv_completed_request(struct request_queue *q, struct request *rq)
  579. {
  580. struct elevator_queue *e = q->elevator;
  581. /*
  582. * request is released from the driver, io must be done
  583. */
  584. if (blk_account_rq(rq)) {
  585. q->in_flight[rq_is_sync(rq)]--;
  586. if ((rq->cmd_flags & REQ_SORTED) &&
  587. e->type->ops.elevator_completed_req_fn)
  588. e->type->ops.elevator_completed_req_fn(q, rq);
  589. }
  590. }
  591. #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
  592. static ssize_t
  593. elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  594. {
  595. struct elv_fs_entry *entry = to_elv(attr);
  596. struct elevator_queue *e;
  597. ssize_t error;
  598. if (!entry->show)
  599. return -EIO;
  600. e = container_of(kobj, struct elevator_queue, kobj);
  601. mutex_lock(&e->sysfs_lock);
  602. error = e->type ? entry->show(e, page) : -ENOENT;
  603. mutex_unlock(&e->sysfs_lock);
  604. return error;
  605. }
  606. static ssize_t
  607. elv_attr_store(struct kobject *kobj, struct attribute *attr,
  608. const char *page, size_t length)
  609. {
  610. struct elv_fs_entry *entry = to_elv(attr);
  611. struct elevator_queue *e;
  612. ssize_t error;
  613. if (!entry->store)
  614. return -EIO;
  615. e = container_of(kobj, struct elevator_queue, kobj);
  616. mutex_lock(&e->sysfs_lock);
  617. error = e->type ? entry->store(e, page, length) : -ENOENT;
  618. mutex_unlock(&e->sysfs_lock);
  619. return error;
  620. }
  621. static const struct sysfs_ops elv_sysfs_ops = {
  622. .show = elv_attr_show,
  623. .store = elv_attr_store,
  624. };
  625. static struct kobj_type elv_ktype = {
  626. .sysfs_ops = &elv_sysfs_ops,
  627. .release = elevator_release,
  628. };
  629. int elv_register_queue(struct request_queue *q)
  630. {
  631. struct elevator_queue *e = q->elevator;
  632. int error;
  633. error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
  634. if (!error) {
  635. struct elv_fs_entry *attr = e->type->elevator_attrs;
  636. if (attr) {
  637. while (attr->attr.name) {
  638. if (sysfs_create_file(&e->kobj, &attr->attr))
  639. break;
  640. attr++;
  641. }
  642. }
  643. kobject_uevent(&e->kobj, KOBJ_ADD);
  644. e->registered = 1;
  645. }
  646. return error;
  647. }
  648. EXPORT_SYMBOL(elv_register_queue);
  649. void elv_unregister_queue(struct request_queue *q)
  650. {
  651. if (q) {
  652. struct elevator_queue *e = q->elevator;
  653. kobject_uevent(&e->kobj, KOBJ_REMOVE);
  654. kobject_del(&e->kobj);
  655. e->registered = 0;
  656. }
  657. }
  658. EXPORT_SYMBOL(elv_unregister_queue);
  659. int elv_register(struct elevator_type *e)
  660. {
  661. char *def = "";
  662. /* create icq_cache if requested */
  663. if (e->icq_size) {
  664. if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
  665. WARN_ON(e->icq_align < __alignof__(struct io_cq)))
  666. return -EINVAL;
  667. snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
  668. "%s_io_cq", e->elevator_name);
  669. e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
  670. e->icq_align, 0, NULL);
  671. if (!e->icq_cache)
  672. return -ENOMEM;
  673. }
  674. /* register, don't allow duplicate names */
  675. spin_lock(&elv_list_lock);
  676. if (elevator_find(e->elevator_name)) {
  677. spin_unlock(&elv_list_lock);
  678. if (e->icq_cache)
  679. kmem_cache_destroy(e->icq_cache);
  680. return -EBUSY;
  681. }
  682. list_add_tail(&e->list, &elv_list);
  683. spin_unlock(&elv_list_lock);
  684. /* print pretty message */
  685. if (!strcmp(e->elevator_name, chosen_elevator) ||
  686. (!*chosen_elevator &&
  687. !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
  688. def = " (default)";
  689. printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
  690. def);
  691. return 0;
  692. }
  693. EXPORT_SYMBOL_GPL(elv_register);
  694. void elv_unregister(struct elevator_type *e)
  695. {
  696. /* unregister */
  697. spin_lock(&elv_list_lock);
  698. list_del_init(&e->list);
  699. spin_unlock(&elv_list_lock);
  700. /*
  701. * Destroy icq_cache if it exists. icq's are RCU managed. Make
  702. * sure all RCU operations are complete before proceeding.
  703. */
  704. if (e->icq_cache) {
  705. rcu_barrier();
  706. kmem_cache_destroy(e->icq_cache);
  707. e->icq_cache = NULL;
  708. }
  709. }
  710. EXPORT_SYMBOL_GPL(elv_unregister);
  711. /*
  712. * switch to new_e io scheduler. be careful not to introduce deadlocks -
  713. * we don't free the old io scheduler, before we have allocated what we
  714. * need for the new one. this way we have a chance of going back to the old
  715. * one, if the new one fails init for some reason.
  716. */
  717. static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
  718. {
  719. struct elevator_queue *old = q->elevator;
  720. bool registered = old->registered;
  721. int err;
  722. /*
  723. * Turn on BYPASS and drain all requests w/ elevator private data.
  724. * Block layer doesn't call into a quiesced elevator - all requests
  725. * are directly put on the dispatch list without elevator data
  726. * using INSERT_BACK. All requests have SOFTBARRIER set and no
  727. * merge happens either.
  728. */
  729. blk_queue_bypass_start(q);
  730. /* unregister and clear all auxiliary data of the old elevator */
  731. if (registered)
  732. elv_unregister_queue(q);
  733. spin_lock_irq(q->queue_lock);
  734. ioc_clear_queue(q);
  735. spin_unlock_irq(q->queue_lock);
  736. /* allocate, init and register new elevator */
  737. err = -ENOMEM;
  738. q->elevator = elevator_alloc(q, new_e);
  739. if (!q->elevator)
  740. goto fail_init;
  741. err = new_e->ops.elevator_init_fn(q);
  742. if (err) {
  743. kobject_put(&q->elevator->kobj);
  744. goto fail_init;
  745. }
  746. if (registered) {
  747. err = elv_register_queue(q);
  748. if (err)
  749. goto fail_register;
  750. }
  751. /* done, kill the old one and finish */
  752. elevator_exit(old);
  753. blk_queue_bypass_end(q);
  754. blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
  755. return 0;
  756. fail_register:
  757. elevator_exit(q->elevator);
  758. fail_init:
  759. /* switch failed, restore and re-register old elevator */
  760. q->elevator = old;
  761. elv_register_queue(q);
  762. blk_queue_bypass_end(q);
  763. return err;
  764. }
  765. /*
  766. * Switch this queue to the given IO scheduler.
  767. */
  768. int elevator_change(struct request_queue *q, const char *name)
  769. {
  770. char elevator_name[ELV_NAME_MAX];
  771. struct elevator_type *e;
  772. if (!q->elevator)
  773. return -ENXIO;
  774. strlcpy(elevator_name, name, sizeof(elevator_name));
  775. e = elevator_get(strstrip(elevator_name));
  776. if (!e) {
  777. printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
  778. return -EINVAL;
  779. }
  780. if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
  781. elevator_put(e);
  782. return 0;
  783. }
  784. return elevator_switch(q, e);
  785. }
  786. EXPORT_SYMBOL(elevator_change);
  787. ssize_t elv_iosched_store(struct request_queue *q, const char *name,
  788. size_t count)
  789. {
  790. int ret;
  791. if (!q->elevator)
  792. return count;
  793. ret = elevator_change(q, name);
  794. if (!ret)
  795. return count;
  796. printk(KERN_ERR "elevator: switch to %s failed\n", name);
  797. return ret;
  798. }
  799. ssize_t elv_iosched_show(struct request_queue *q, char *name)
  800. {
  801. struct elevator_queue *e = q->elevator;
  802. struct elevator_type *elv;
  803. struct elevator_type *__e;
  804. int len = 0;
  805. if (!q->elevator || !blk_queue_stackable(q))
  806. return sprintf(name, "none\n");
  807. elv = e->type;
  808. spin_lock(&elv_list_lock);
  809. list_for_each_entry(__e, &elv_list, list) {
  810. if (!strcmp(elv->elevator_name, __e->elevator_name))
  811. len += sprintf(name+len, "[%s] ", elv->elevator_name);
  812. else
  813. len += sprintf(name+len, "%s ", __e->elevator_name);
  814. }
  815. spin_unlock(&elv_list_lock);
  816. len += sprintf(len+name, "\n");
  817. return len;
  818. }
  819. struct request *elv_rb_former_request(struct request_queue *q,
  820. struct request *rq)
  821. {
  822. struct rb_node *rbprev = rb_prev(&rq->rb_node);
  823. if (rbprev)
  824. return rb_entry_rq(rbprev);
  825. return NULL;
  826. }
  827. EXPORT_SYMBOL(elv_rb_former_request);
  828. struct request *elv_rb_latter_request(struct request_queue *q,
  829. struct request *rq)
  830. {
  831. struct rb_node *rbnext = rb_next(&rq->rb_node);
  832. if (rbnext)
  833. return rb_entry_rq(rbnext);
  834. return NULL;
  835. }
  836. EXPORT_SYMBOL(elv_rb_latter_request);