elevator.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140
  1. /*
  2. * Block device elevator/IO-scheduler.
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. *
  6. * 30042000 Jens Axboe <axboe@kernel.dk> :
  7. *
  8. * Split the elevator a bit so that it is possible to choose a different
  9. * one or even write a new "plug in". There are three pieces:
  10. * - elevator_fn, inserts a new request in the queue list
  11. * - elevator_merge_fn, decides whether a new buffer can be merged with
  12. * an existing request
  13. * - elevator_dequeue_fn, called when a request is taken off the active list
  14. *
  15. * 20082000 Dave Jones <davej@suse.de> :
  16. * Removed tests for max-bomb-segments, which was breaking elvtune
  17. * when run without -bN
  18. *
  19. * Jens:
  20. * - Rework again to work with bio instead of buffer_heads
  21. * - loose bi_dev comparisons, partition handling is right now
  22. * - completely modularize elevator setup and teardown
  23. *
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/fs.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/elevator.h>
  29. #include <linux/bio.h>
  30. #include <linux/module.h>
  31. #include <linux/slab.h>
  32. #include <linux/init.h>
  33. #include <linux/compiler.h>
  34. #include <linux/delay.h>
  35. #include <linux/blktrace_api.h>
  36. #include <linux/hash.h>
  37. #include <linux/uaccess.h>
  38. #include <trace/events/block.h>
  39. #include "blk.h"
  40. static DEFINE_SPINLOCK(elv_list_lock);
  41. static LIST_HEAD(elv_list);
  42. /*
  43. * Merge hash stuff.
  44. */
  45. static const int elv_hash_shift = 6;
  46. #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
  47. #define ELV_HASH_FN(sec) \
  48. (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
  49. #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
  50. #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
  51. /*
  52. * Query io scheduler to see if the current process issuing bio may be
  53. * merged with rq.
  54. */
  55. static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  56. {
  57. struct request_queue *q = rq->q;
  58. struct elevator_queue *e = q->elevator;
  59. if (e->ops->elevator_allow_merge_fn)
  60. return e->ops->elevator_allow_merge_fn(q, rq, bio);
  61. return 1;
  62. }
  63. /*
  64. * can we safely merge with this request?
  65. */
  66. int elv_rq_merge_ok(struct request *rq, struct bio *bio)
  67. {
  68. if (!rq_mergeable(rq))
  69. return 0;
  70. /*
  71. * Don't merge file system requests and discard requests
  72. */
  73. if (bio_discard(bio) != bio_discard(rq->bio))
  74. return 0;
  75. /*
  76. * different data direction or already started, don't merge
  77. */
  78. if (bio_data_dir(bio) != rq_data_dir(rq))
  79. return 0;
  80. /*
  81. * must be same device and not a special request
  82. */
  83. if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
  84. return 0;
  85. /*
  86. * only merge integrity protected bio into ditto rq
  87. */
  88. if (bio_integrity(bio) != blk_integrity_rq(rq))
  89. return 0;
  90. /*
  91. * Don't merge if failfast settings don't match.
  92. *
  93. * FIXME: The negation in front of each condition is necessary
  94. * because bio and request flags use different bit positions
  95. * and the accessors return those bits directly. This
  96. * ugliness will soon go away.
  97. */
  98. if (!bio_failfast_dev(bio) != !blk_failfast_dev(rq) ||
  99. !bio_failfast_transport(bio) != !blk_failfast_transport(rq) ||
  100. !bio_failfast_driver(bio) != !blk_failfast_driver(rq))
  101. return 0;
  102. if (!elv_iosched_allow_merge(rq, bio))
  103. return 0;
  104. return 1;
  105. }
  106. EXPORT_SYMBOL(elv_rq_merge_ok);
  107. static inline int elv_try_merge(struct request *__rq, struct bio *bio)
  108. {
  109. int ret = ELEVATOR_NO_MERGE;
  110. /*
  111. * we can merge and sequence is ok, check if it's possible
  112. */
  113. if (elv_rq_merge_ok(__rq, bio)) {
  114. if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
  115. ret = ELEVATOR_BACK_MERGE;
  116. else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
  117. ret = ELEVATOR_FRONT_MERGE;
  118. }
  119. return ret;
  120. }
  121. static struct elevator_type *elevator_find(const char *name)
  122. {
  123. struct elevator_type *e;
  124. list_for_each_entry(e, &elv_list, list) {
  125. if (!strcmp(e->elevator_name, name))
  126. return e;
  127. }
  128. return NULL;
  129. }
  130. static void elevator_put(struct elevator_type *e)
  131. {
  132. module_put(e->elevator_owner);
  133. }
  134. static struct elevator_type *elevator_get(const char *name)
  135. {
  136. struct elevator_type *e;
  137. spin_lock(&elv_list_lock);
  138. e = elevator_find(name);
  139. if (!e) {
  140. char elv[ELV_NAME_MAX + strlen("-iosched")];
  141. spin_unlock(&elv_list_lock);
  142. if (!strcmp(name, "anticipatory"))
  143. sprintf(elv, "as-iosched");
  144. else
  145. sprintf(elv, "%s-iosched", name);
  146. request_module("%s", elv);
  147. spin_lock(&elv_list_lock);
  148. e = elevator_find(name);
  149. }
  150. if (e && !try_module_get(e->elevator_owner))
  151. e = NULL;
  152. spin_unlock(&elv_list_lock);
  153. return e;
  154. }
  155. static void *elevator_init_queue(struct request_queue *q,
  156. struct elevator_queue *eq)
  157. {
  158. return eq->ops->elevator_init_fn(q);
  159. }
  160. static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
  161. void *data)
  162. {
  163. q->elevator = eq;
  164. eq->elevator_data = data;
  165. }
  166. static char chosen_elevator[16];
  167. static int __init elevator_setup(char *str)
  168. {
  169. /*
  170. * Be backwards-compatible with previous kernels, so users
  171. * won't get the wrong elevator.
  172. */
  173. if (!strcmp(str, "as"))
  174. strcpy(chosen_elevator, "anticipatory");
  175. else
  176. strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
  177. return 1;
  178. }
  179. __setup("elevator=", elevator_setup);
  180. static struct kobj_type elv_ktype;
  181. static struct elevator_queue *elevator_alloc(struct request_queue *q,
  182. struct elevator_type *e)
  183. {
  184. struct elevator_queue *eq;
  185. int i;
  186. eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
  187. if (unlikely(!eq))
  188. goto err;
  189. eq->ops = &e->ops;
  190. eq->elevator_type = e;
  191. kobject_init(&eq->kobj, &elv_ktype);
  192. mutex_init(&eq->sysfs_lock);
  193. eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
  194. GFP_KERNEL, q->node);
  195. if (!eq->hash)
  196. goto err;
  197. for (i = 0; i < ELV_HASH_ENTRIES; i++)
  198. INIT_HLIST_HEAD(&eq->hash[i]);
  199. return eq;
  200. err:
  201. kfree(eq);
  202. elevator_put(e);
  203. return NULL;
  204. }
  205. static void elevator_release(struct kobject *kobj)
  206. {
  207. struct elevator_queue *e;
  208. e = container_of(kobj, struct elevator_queue, kobj);
  209. elevator_put(e->elevator_type);
  210. kfree(e->hash);
  211. kfree(e);
  212. }
  213. int elevator_init(struct request_queue *q, char *name)
  214. {
  215. struct elevator_type *e = NULL;
  216. struct elevator_queue *eq;
  217. int ret = 0;
  218. void *data;
  219. INIT_LIST_HEAD(&q->queue_head);
  220. q->last_merge = NULL;
  221. q->end_sector = 0;
  222. q->boundary_rq = NULL;
  223. if (name) {
  224. e = elevator_get(name);
  225. if (!e)
  226. return -EINVAL;
  227. }
  228. if (!e && *chosen_elevator) {
  229. e = elevator_get(chosen_elevator);
  230. if (!e)
  231. printk(KERN_ERR "I/O scheduler %s not found\n",
  232. chosen_elevator);
  233. }
  234. if (!e) {
  235. e = elevator_get(CONFIG_DEFAULT_IOSCHED);
  236. if (!e) {
  237. printk(KERN_ERR
  238. "Default I/O scheduler not found. " \
  239. "Using noop.\n");
  240. e = elevator_get("noop");
  241. }
  242. }
  243. eq = elevator_alloc(q, e);
  244. if (!eq)
  245. return -ENOMEM;
  246. data = elevator_init_queue(q, eq);
  247. if (!data) {
  248. kobject_put(&eq->kobj);
  249. return -ENOMEM;
  250. }
  251. elevator_attach(q, eq, data);
  252. return ret;
  253. }
  254. EXPORT_SYMBOL(elevator_init);
  255. void elevator_exit(struct elevator_queue *e)
  256. {
  257. mutex_lock(&e->sysfs_lock);
  258. if (e->ops->elevator_exit_fn)
  259. e->ops->elevator_exit_fn(e);
  260. e->ops = NULL;
  261. mutex_unlock(&e->sysfs_lock);
  262. kobject_put(&e->kobj);
  263. }
  264. EXPORT_SYMBOL(elevator_exit);
  265. static inline void __elv_rqhash_del(struct request *rq)
  266. {
  267. hlist_del_init(&rq->hash);
  268. }
  269. static void elv_rqhash_del(struct request_queue *q, struct request *rq)
  270. {
  271. if (ELV_ON_HASH(rq))
  272. __elv_rqhash_del(rq);
  273. }
  274. static void elv_rqhash_add(struct request_queue *q, struct request *rq)
  275. {
  276. struct elevator_queue *e = q->elevator;
  277. BUG_ON(ELV_ON_HASH(rq));
  278. hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
  279. }
  280. static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
  281. {
  282. __elv_rqhash_del(rq);
  283. elv_rqhash_add(q, rq);
  284. }
  285. static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  286. {
  287. struct elevator_queue *e = q->elevator;
  288. struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
  289. struct hlist_node *entry, *next;
  290. struct request *rq;
  291. hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
  292. BUG_ON(!ELV_ON_HASH(rq));
  293. if (unlikely(!rq_mergeable(rq))) {
  294. __elv_rqhash_del(rq);
  295. continue;
  296. }
  297. if (rq_hash_key(rq) == offset)
  298. return rq;
  299. }
  300. return NULL;
  301. }
  302. /*
  303. * RB-tree support functions for inserting/lookup/removal of requests
  304. * in a sorted RB tree.
  305. */
  306. struct request *elv_rb_add(struct rb_root *root, struct request *rq)
  307. {
  308. struct rb_node **p = &root->rb_node;
  309. struct rb_node *parent = NULL;
  310. struct request *__rq;
  311. while (*p) {
  312. parent = *p;
  313. __rq = rb_entry(parent, struct request, rb_node);
  314. if (blk_rq_pos(rq) < blk_rq_pos(__rq))
  315. p = &(*p)->rb_left;
  316. else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
  317. p = &(*p)->rb_right;
  318. else
  319. return __rq;
  320. }
  321. rb_link_node(&rq->rb_node, parent, p);
  322. rb_insert_color(&rq->rb_node, root);
  323. return NULL;
  324. }
  325. EXPORT_SYMBOL(elv_rb_add);
  326. void elv_rb_del(struct rb_root *root, struct request *rq)
  327. {
  328. BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  329. rb_erase(&rq->rb_node, root);
  330. RB_CLEAR_NODE(&rq->rb_node);
  331. }
  332. EXPORT_SYMBOL(elv_rb_del);
  333. struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  334. {
  335. struct rb_node *n = root->rb_node;
  336. struct request *rq;
  337. while (n) {
  338. rq = rb_entry(n, struct request, rb_node);
  339. if (sector < blk_rq_pos(rq))
  340. n = n->rb_left;
  341. else if (sector > blk_rq_pos(rq))
  342. n = n->rb_right;
  343. else
  344. return rq;
  345. }
  346. return NULL;
  347. }
  348. EXPORT_SYMBOL(elv_rb_find);
  349. /*
  350. * Insert rq into dispatch queue of q. Queue lock must be held on
  351. * entry. rq is sort instead into the dispatch queue. To be used by
  352. * specific elevators.
  353. */
  354. void elv_dispatch_sort(struct request_queue *q, struct request *rq)
  355. {
  356. sector_t boundary;
  357. struct list_head *entry;
  358. int stop_flags;
  359. if (q->last_merge == rq)
  360. q->last_merge = NULL;
  361. elv_rqhash_del(q, rq);
  362. q->nr_sorted--;
  363. boundary = q->end_sector;
  364. stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
  365. list_for_each_prev(entry, &q->queue_head) {
  366. struct request *pos = list_entry_rq(entry);
  367. if (blk_discard_rq(rq) != blk_discard_rq(pos))
  368. break;
  369. if (rq_data_dir(rq) != rq_data_dir(pos))
  370. break;
  371. if (pos->cmd_flags & stop_flags)
  372. break;
  373. if (blk_rq_pos(rq) >= boundary) {
  374. if (blk_rq_pos(pos) < boundary)
  375. continue;
  376. } else {
  377. if (blk_rq_pos(pos) >= boundary)
  378. break;
  379. }
  380. if (blk_rq_pos(rq) >= blk_rq_pos(pos))
  381. break;
  382. }
  383. list_add(&rq->queuelist, entry);
  384. }
  385. EXPORT_SYMBOL(elv_dispatch_sort);
  386. /*
  387. * Insert rq into dispatch queue of q. Queue lock must be held on
  388. * entry. rq is added to the back of the dispatch queue. To be used by
  389. * specific elevators.
  390. */
  391. void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
  392. {
  393. if (q->last_merge == rq)
  394. q->last_merge = NULL;
  395. elv_rqhash_del(q, rq);
  396. q->nr_sorted--;
  397. q->end_sector = rq_end_sector(rq);
  398. q->boundary_rq = rq;
  399. list_add_tail(&rq->queuelist, &q->queue_head);
  400. }
  401. EXPORT_SYMBOL(elv_dispatch_add_tail);
  402. int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
  403. {
  404. struct elevator_queue *e = q->elevator;
  405. struct request *__rq;
  406. int ret;
  407. /*
  408. * First try one-hit cache.
  409. */
  410. if (q->last_merge) {
  411. ret = elv_try_merge(q->last_merge, bio);
  412. if (ret != ELEVATOR_NO_MERGE) {
  413. *req = q->last_merge;
  414. return ret;
  415. }
  416. }
  417. if (blk_queue_nomerges(q))
  418. return ELEVATOR_NO_MERGE;
  419. /*
  420. * See if our hash lookup can find a potential backmerge.
  421. */
  422. __rq = elv_rqhash_find(q, bio->bi_sector);
  423. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  424. *req = __rq;
  425. return ELEVATOR_BACK_MERGE;
  426. }
  427. if (e->ops->elevator_merge_fn)
  428. return e->ops->elevator_merge_fn(q, req, bio);
  429. return ELEVATOR_NO_MERGE;
  430. }
  431. void elv_merged_request(struct request_queue *q, struct request *rq, int type)
  432. {
  433. struct elevator_queue *e = q->elevator;
  434. if (e->ops->elevator_merged_fn)
  435. e->ops->elevator_merged_fn(q, rq, type);
  436. if (type == ELEVATOR_BACK_MERGE)
  437. elv_rqhash_reposition(q, rq);
  438. q->last_merge = rq;
  439. }
  440. void elv_merge_requests(struct request_queue *q, struct request *rq,
  441. struct request *next)
  442. {
  443. struct elevator_queue *e = q->elevator;
  444. if (e->ops->elevator_merge_req_fn)
  445. e->ops->elevator_merge_req_fn(q, rq, next);
  446. elv_rqhash_reposition(q, rq);
  447. elv_rqhash_del(q, next);
  448. q->nr_sorted--;
  449. q->last_merge = rq;
  450. }
  451. void elv_requeue_request(struct request_queue *q, struct request *rq)
  452. {
  453. /*
  454. * it already went through dequeue, we need to decrement the
  455. * in_flight count again
  456. */
  457. if (blk_account_rq(rq)) {
  458. q->in_flight[rq_is_sync(rq)]--;
  459. if (blk_sorted_rq(rq))
  460. elv_deactivate_rq(q, rq);
  461. }
  462. rq->cmd_flags &= ~REQ_STARTED;
  463. elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
  464. }
  465. void elv_drain_elevator(struct request_queue *q)
  466. {
  467. static int printed;
  468. while (q->elevator->ops->elevator_dispatch_fn(q, 1))
  469. ;
  470. if (q->nr_sorted == 0)
  471. return;
  472. if (printed++ < 10) {
  473. printk(KERN_ERR "%s: forced dispatching is broken "
  474. "(nr_sorted=%u), please report this\n",
  475. q->elevator->elevator_type->elevator_name, q->nr_sorted);
  476. }
  477. }
  478. /*
  479. * Call with queue lock held, interrupts disabled
  480. */
  481. void elv_quiesce_start(struct request_queue *q)
  482. {
  483. if (!q->elevator)
  484. return;
  485. queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
  486. /*
  487. * make sure we don't have any requests in flight
  488. */
  489. elv_drain_elevator(q);
  490. while (q->rq.elvpriv) {
  491. __blk_run_queue(q);
  492. spin_unlock_irq(q->queue_lock);
  493. msleep(10);
  494. spin_lock_irq(q->queue_lock);
  495. elv_drain_elevator(q);
  496. }
  497. }
  498. void elv_quiesce_end(struct request_queue *q)
  499. {
  500. queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
  501. }
  502. void elv_insert(struct request_queue *q, struct request *rq, int where)
  503. {
  504. struct list_head *pos;
  505. unsigned ordseq;
  506. int unplug_it = 1;
  507. trace_block_rq_insert(q, rq);
  508. rq->q = q;
  509. switch (where) {
  510. case ELEVATOR_INSERT_FRONT:
  511. rq->cmd_flags |= REQ_SOFTBARRIER;
  512. list_add(&rq->queuelist, &q->queue_head);
  513. break;
  514. case ELEVATOR_INSERT_BACK:
  515. rq->cmd_flags |= REQ_SOFTBARRIER;
  516. elv_drain_elevator(q);
  517. list_add_tail(&rq->queuelist, &q->queue_head);
  518. /*
  519. * We kick the queue here for the following reasons.
  520. * - The elevator might have returned NULL previously
  521. * to delay requests and returned them now. As the
  522. * queue wasn't empty before this request, ll_rw_blk
  523. * won't run the queue on return, resulting in hang.
  524. * - Usually, back inserted requests won't be merged
  525. * with anything. There's no point in delaying queue
  526. * processing.
  527. */
  528. __blk_run_queue(q);
  529. break;
  530. case ELEVATOR_INSERT_SORT:
  531. BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
  532. rq->cmd_flags |= REQ_SORTED;
  533. q->nr_sorted++;
  534. if (rq_mergeable(rq)) {
  535. elv_rqhash_add(q, rq);
  536. if (!q->last_merge)
  537. q->last_merge = rq;
  538. }
  539. /*
  540. * Some ioscheds (cfq) run q->request_fn directly, so
  541. * rq cannot be accessed after calling
  542. * elevator_add_req_fn.
  543. */
  544. q->elevator->ops->elevator_add_req_fn(q, rq);
  545. break;
  546. case ELEVATOR_INSERT_REQUEUE:
  547. /*
  548. * If ordered flush isn't in progress, we do front
  549. * insertion; otherwise, requests should be requeued
  550. * in ordseq order.
  551. */
  552. rq->cmd_flags |= REQ_SOFTBARRIER;
  553. /*
  554. * Most requeues happen because of a busy condition,
  555. * don't force unplug of the queue for that case.
  556. */
  557. unplug_it = 0;
  558. if (q->ordseq == 0) {
  559. list_add(&rq->queuelist, &q->queue_head);
  560. break;
  561. }
  562. ordseq = blk_ordered_req_seq(rq);
  563. list_for_each(pos, &q->queue_head) {
  564. struct request *pos_rq = list_entry_rq(pos);
  565. if (ordseq <= blk_ordered_req_seq(pos_rq))
  566. break;
  567. }
  568. list_add_tail(&rq->queuelist, pos);
  569. break;
  570. default:
  571. printk(KERN_ERR "%s: bad insertion point %d\n",
  572. __func__, where);
  573. BUG();
  574. }
  575. if (unplug_it && blk_queue_plugged(q)) {
  576. int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
  577. - queue_in_flight(q);
  578. if (nrq >= q->unplug_thresh)
  579. __generic_unplug_device(q);
  580. }
  581. }
  582. void __elv_add_request(struct request_queue *q, struct request *rq, int where,
  583. int plug)
  584. {
  585. if (q->ordcolor)
  586. rq->cmd_flags |= REQ_ORDERED_COLOR;
  587. if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
  588. /*
  589. * toggle ordered color
  590. */
  591. if (blk_barrier_rq(rq))
  592. q->ordcolor ^= 1;
  593. /*
  594. * barriers implicitly indicate back insertion
  595. */
  596. if (where == ELEVATOR_INSERT_SORT)
  597. where = ELEVATOR_INSERT_BACK;
  598. /*
  599. * this request is scheduling boundary, update
  600. * end_sector
  601. */
  602. if (blk_fs_request(rq) || blk_discard_rq(rq)) {
  603. q->end_sector = rq_end_sector(rq);
  604. q->boundary_rq = rq;
  605. }
  606. } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
  607. where == ELEVATOR_INSERT_SORT)
  608. where = ELEVATOR_INSERT_BACK;
  609. if (plug)
  610. blk_plug_device(q);
  611. elv_insert(q, rq, where);
  612. }
  613. EXPORT_SYMBOL(__elv_add_request);
  614. void elv_add_request(struct request_queue *q, struct request *rq, int where,
  615. int plug)
  616. {
  617. unsigned long flags;
  618. spin_lock_irqsave(q->queue_lock, flags);
  619. __elv_add_request(q, rq, where, plug);
  620. spin_unlock_irqrestore(q->queue_lock, flags);
  621. }
  622. EXPORT_SYMBOL(elv_add_request);
  623. int elv_queue_empty(struct request_queue *q)
  624. {
  625. struct elevator_queue *e = q->elevator;
  626. if (!list_empty(&q->queue_head))
  627. return 0;
  628. if (e->ops->elevator_queue_empty_fn)
  629. return e->ops->elevator_queue_empty_fn(q);
  630. return 1;
  631. }
  632. EXPORT_SYMBOL(elv_queue_empty);
  633. struct request *elv_latter_request(struct request_queue *q, struct request *rq)
  634. {
  635. struct elevator_queue *e = q->elevator;
  636. if (e->ops->elevator_latter_req_fn)
  637. return e->ops->elevator_latter_req_fn(q, rq);
  638. return NULL;
  639. }
  640. struct request *elv_former_request(struct request_queue *q, struct request *rq)
  641. {
  642. struct elevator_queue *e = q->elevator;
  643. if (e->ops->elevator_former_req_fn)
  644. return e->ops->elevator_former_req_fn(q, rq);
  645. return NULL;
  646. }
  647. int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
  648. {
  649. struct elevator_queue *e = q->elevator;
  650. if (e->ops->elevator_set_req_fn)
  651. return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
  652. rq->elevator_private = NULL;
  653. return 0;
  654. }
  655. void elv_put_request(struct request_queue *q, struct request *rq)
  656. {
  657. struct elevator_queue *e = q->elevator;
  658. if (e->ops->elevator_put_req_fn)
  659. e->ops->elevator_put_req_fn(rq);
  660. }
  661. int elv_may_queue(struct request_queue *q, int rw)
  662. {
  663. struct elevator_queue *e = q->elevator;
  664. if (e->ops->elevator_may_queue_fn)
  665. return e->ops->elevator_may_queue_fn(q, rw);
  666. return ELV_MQUEUE_MAY;
  667. }
  668. void elv_abort_queue(struct request_queue *q)
  669. {
  670. struct request *rq;
  671. while (!list_empty(&q->queue_head)) {
  672. rq = list_entry_rq(q->queue_head.next);
  673. rq->cmd_flags |= REQ_QUIET;
  674. trace_block_rq_abort(q, rq);
  675. /*
  676. * Mark this request as started so we don't trigger
  677. * any debug logic in the end I/O path.
  678. */
  679. blk_start_request(rq);
  680. __blk_end_request_all(rq, -EIO);
  681. }
  682. }
  683. EXPORT_SYMBOL(elv_abort_queue);
  684. void elv_completed_request(struct request_queue *q, struct request *rq)
  685. {
  686. struct elevator_queue *e = q->elevator;
  687. /*
  688. * request is released from the driver, io must be done
  689. */
  690. if (blk_account_rq(rq)) {
  691. q->in_flight[rq_is_sync(rq)]--;
  692. if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
  693. e->ops->elevator_completed_req_fn(q, rq);
  694. }
  695. /*
  696. * Check if the queue is waiting for fs requests to be
  697. * drained for flush sequence.
  698. */
  699. if (unlikely(q->ordseq)) {
  700. struct request *next = NULL;
  701. if (!list_empty(&q->queue_head))
  702. next = list_entry_rq(q->queue_head.next);
  703. if (!queue_in_flight(q) &&
  704. blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
  705. (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
  706. blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
  707. __blk_run_queue(q);
  708. }
  709. }
  710. }
  711. #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
  712. static ssize_t
  713. elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  714. {
  715. struct elv_fs_entry *entry = to_elv(attr);
  716. struct elevator_queue *e;
  717. ssize_t error;
  718. if (!entry->show)
  719. return -EIO;
  720. e = container_of(kobj, struct elevator_queue, kobj);
  721. mutex_lock(&e->sysfs_lock);
  722. error = e->ops ? entry->show(e, page) : -ENOENT;
  723. mutex_unlock(&e->sysfs_lock);
  724. return error;
  725. }
  726. static ssize_t
  727. elv_attr_store(struct kobject *kobj, struct attribute *attr,
  728. const char *page, size_t length)
  729. {
  730. struct elv_fs_entry *entry = to_elv(attr);
  731. struct elevator_queue *e;
  732. ssize_t error;
  733. if (!entry->store)
  734. return -EIO;
  735. e = container_of(kobj, struct elevator_queue, kobj);
  736. mutex_lock(&e->sysfs_lock);
  737. error = e->ops ? entry->store(e, page, length) : -ENOENT;
  738. mutex_unlock(&e->sysfs_lock);
  739. return error;
  740. }
  741. static struct sysfs_ops elv_sysfs_ops = {
  742. .show = elv_attr_show,
  743. .store = elv_attr_store,
  744. };
  745. static struct kobj_type elv_ktype = {
  746. .sysfs_ops = &elv_sysfs_ops,
  747. .release = elevator_release,
  748. };
  749. int elv_register_queue(struct request_queue *q)
  750. {
  751. struct elevator_queue *e = q->elevator;
  752. int error;
  753. error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
  754. if (!error) {
  755. struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
  756. if (attr) {
  757. while (attr->attr.name) {
  758. if (sysfs_create_file(&e->kobj, &attr->attr))
  759. break;
  760. attr++;
  761. }
  762. }
  763. kobject_uevent(&e->kobj, KOBJ_ADD);
  764. }
  765. return error;
  766. }
  767. static void __elv_unregister_queue(struct elevator_queue *e)
  768. {
  769. kobject_uevent(&e->kobj, KOBJ_REMOVE);
  770. kobject_del(&e->kobj);
  771. }
  772. void elv_unregister_queue(struct request_queue *q)
  773. {
  774. if (q)
  775. __elv_unregister_queue(q->elevator);
  776. }
  777. void elv_register(struct elevator_type *e)
  778. {
  779. char *def = "";
  780. spin_lock(&elv_list_lock);
  781. BUG_ON(elevator_find(e->elevator_name));
  782. list_add_tail(&e->list, &elv_list);
  783. spin_unlock(&elv_list_lock);
  784. if (!strcmp(e->elevator_name, chosen_elevator) ||
  785. (!*chosen_elevator &&
  786. !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
  787. def = " (default)";
  788. printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
  789. def);
  790. }
  791. EXPORT_SYMBOL_GPL(elv_register);
  792. void elv_unregister(struct elevator_type *e)
  793. {
  794. struct task_struct *g, *p;
  795. /*
  796. * Iterate every thread in the process to remove the io contexts.
  797. */
  798. if (e->ops.trim) {
  799. read_lock(&tasklist_lock);
  800. do_each_thread(g, p) {
  801. task_lock(p);
  802. if (p->io_context)
  803. e->ops.trim(p->io_context);
  804. task_unlock(p);
  805. } while_each_thread(g, p);
  806. read_unlock(&tasklist_lock);
  807. }
  808. spin_lock(&elv_list_lock);
  809. list_del_init(&e->list);
  810. spin_unlock(&elv_list_lock);
  811. }
  812. EXPORT_SYMBOL_GPL(elv_unregister);
  813. /*
  814. * switch to new_e io scheduler. be careful not to introduce deadlocks -
  815. * we don't free the old io scheduler, before we have allocated what we
  816. * need for the new one. this way we have a chance of going back to the old
  817. * one, if the new one fails init for some reason.
  818. */
  819. static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
  820. {
  821. struct elevator_queue *old_elevator, *e;
  822. void *data;
  823. /*
  824. * Allocate new elevator
  825. */
  826. e = elevator_alloc(q, new_e);
  827. if (!e)
  828. return 0;
  829. data = elevator_init_queue(q, e);
  830. if (!data) {
  831. kobject_put(&e->kobj);
  832. return 0;
  833. }
  834. /*
  835. * Turn on BYPASS and drain all requests w/ elevator private data
  836. */
  837. spin_lock_irq(q->queue_lock);
  838. elv_quiesce_start(q);
  839. /*
  840. * Remember old elevator.
  841. */
  842. old_elevator = q->elevator;
  843. /*
  844. * attach and start new elevator
  845. */
  846. elevator_attach(q, e, data);
  847. spin_unlock_irq(q->queue_lock);
  848. __elv_unregister_queue(old_elevator);
  849. if (elv_register_queue(q))
  850. goto fail_register;
  851. /*
  852. * finally exit old elevator and turn off BYPASS.
  853. */
  854. elevator_exit(old_elevator);
  855. spin_lock_irq(q->queue_lock);
  856. elv_quiesce_end(q);
  857. spin_unlock_irq(q->queue_lock);
  858. blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
  859. return 1;
  860. fail_register:
  861. /*
  862. * switch failed, exit the new io scheduler and reattach the old
  863. * one again (along with re-adding the sysfs dir)
  864. */
  865. elevator_exit(e);
  866. q->elevator = old_elevator;
  867. elv_register_queue(q);
  868. spin_lock_irq(q->queue_lock);
  869. queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
  870. spin_unlock_irq(q->queue_lock);
  871. return 0;
  872. }
  873. ssize_t elv_iosched_store(struct request_queue *q, const char *name,
  874. size_t count)
  875. {
  876. char elevator_name[ELV_NAME_MAX];
  877. struct elevator_type *e;
  878. if (!q->elevator)
  879. return count;
  880. strlcpy(elevator_name, name, sizeof(elevator_name));
  881. strstrip(elevator_name);
  882. e = elevator_get(elevator_name);
  883. if (!e) {
  884. printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
  885. return -EINVAL;
  886. }
  887. if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
  888. elevator_put(e);
  889. return count;
  890. }
  891. if (!elevator_switch(q, e))
  892. printk(KERN_ERR "elevator: switch to %s failed\n",
  893. elevator_name);
  894. return count;
  895. }
  896. ssize_t elv_iosched_show(struct request_queue *q, char *name)
  897. {
  898. struct elevator_queue *e = q->elevator;
  899. struct elevator_type *elv;
  900. struct elevator_type *__e;
  901. int len = 0;
  902. if (!q->elevator)
  903. return sprintf(name, "none\n");
  904. elv = e->elevator_type;
  905. spin_lock(&elv_list_lock);
  906. list_for_each_entry(__e, &elv_list, list) {
  907. if (!strcmp(elv->elevator_name, __e->elevator_name))
  908. len += sprintf(name+len, "[%s] ", elv->elevator_name);
  909. else
  910. len += sprintf(name+len, "%s ", __e->elevator_name);
  911. }
  912. spin_unlock(&elv_list_lock);
  913. len += sprintf(len+name, "\n");
  914. return len;
  915. }
  916. struct request *elv_rb_former_request(struct request_queue *q,
  917. struct request *rq)
  918. {
  919. struct rb_node *rbprev = rb_prev(&rq->rb_node);
  920. if (rbprev)
  921. return rb_entry_rq(rbprev);
  922. return NULL;
  923. }
  924. EXPORT_SYMBOL(elv_rb_former_request);
  925. struct request *elv_rb_latter_request(struct request_queue *q,
  926. struct request *rq)
  927. {
  928. struct rb_node *rbnext = rb_next(&rq->rb_node);
  929. if (rbnext)
  930. return rb_entry_rq(rbnext);
  931. return NULL;
  932. }
  933. EXPORT_SYMBOL(elv_rb_latter_request);