elevator.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123
  1. /*
  2. * Block device elevator/IO-scheduler.
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. *
  6. * 30042000 Jens Axboe <axboe@kernel.dk> :
  7. *
  8. * Split the elevator a bit so that it is possible to choose a different
  9. * one or even write a new "plug in". There are three pieces:
  10. * - elevator_fn, inserts a new request in the queue list
  11. * - elevator_merge_fn, decides whether a new buffer can be merged with
  12. * an existing request
  13. * - elevator_dequeue_fn, called when a request is taken off the active list
  14. *
  15. * 20082000 Dave Jones <davej@suse.de> :
  16. * Removed tests for max-bomb-segments, which was breaking elvtune
  17. * when run without -bN
  18. *
  19. * Jens:
  20. * - Rework again to work with bio instead of buffer_heads
  21. * - loose bi_dev comparisons, partition handling is right now
  22. * - completely modularize elevator setup and teardown
  23. *
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/fs.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/elevator.h>
  29. #include <linux/bio.h>
  30. #include <linux/module.h>
  31. #include <linux/slab.h>
  32. #include <linux/init.h>
  33. #include <linux/compiler.h>
  34. #include <linux/delay.h>
  35. #include <linux/blktrace_api.h>
  36. #include <linux/hash.h>
  37. #include <linux/uaccess.h>
  38. #include <trace/events/block.h>
  39. #include "blk.h"
  40. static DEFINE_SPINLOCK(elv_list_lock);
  41. static LIST_HEAD(elv_list);
  42. /*
  43. * Merge hash stuff.
  44. */
  45. static const int elv_hash_shift = 6;
  46. #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
  47. #define ELV_HASH_FN(sec) \
  48. (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
  49. #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
  50. #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
  51. /*
  52. * Query io scheduler to see if the current process issuing bio may be
  53. * merged with rq.
  54. */
  55. static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  56. {
  57. struct request_queue *q = rq->q;
  58. struct elevator_queue *e = q->elevator;
  59. if (e->ops->elevator_allow_merge_fn)
  60. return e->ops->elevator_allow_merge_fn(q, rq, bio);
  61. return 1;
  62. }
  63. /*
  64. * can we safely merge with this request?
  65. */
  66. int elv_rq_merge_ok(struct request *rq, struct bio *bio)
  67. {
  68. if (!rq_mergeable(rq))
  69. return 0;
  70. /*
  71. * Don't merge file system requests and discard requests
  72. */
  73. if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
  74. return 0;
  75. /*
  76. * Don't merge discard requests and secure discard requests
  77. */
  78. if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
  79. return 0;
  80. /*
  81. * different data direction or already started, don't merge
  82. */
  83. if (bio_data_dir(bio) != rq_data_dir(rq))
  84. return 0;
  85. /*
  86. * must be same device and not a special request
  87. */
  88. if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
  89. return 0;
  90. /*
  91. * only merge integrity protected bio into ditto rq
  92. */
  93. if (bio_integrity(bio) != blk_integrity_rq(rq))
  94. return 0;
  95. if (!elv_iosched_allow_merge(rq, bio))
  96. return 0;
  97. return 1;
  98. }
  99. EXPORT_SYMBOL(elv_rq_merge_ok);
  100. int elv_try_merge(struct request *__rq, struct bio *bio)
  101. {
  102. int ret = ELEVATOR_NO_MERGE;
  103. /*
  104. * we can merge and sequence is ok, check if it's possible
  105. */
  106. if (elv_rq_merge_ok(__rq, bio)) {
  107. if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector)
  108. ret = ELEVATOR_BACK_MERGE;
  109. else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector)
  110. ret = ELEVATOR_FRONT_MERGE;
  111. }
  112. return ret;
  113. }
  114. static struct elevator_type *elevator_find(const char *name)
  115. {
  116. struct elevator_type *e;
  117. list_for_each_entry(e, &elv_list, list) {
  118. if (!strcmp(e->elevator_name, name))
  119. return e;
  120. }
  121. return NULL;
  122. }
  123. static void elevator_put(struct elevator_type *e)
  124. {
  125. module_put(e->elevator_owner);
  126. }
  127. static struct elevator_type *elevator_get(const char *name)
  128. {
  129. struct elevator_type *e;
  130. spin_lock(&elv_list_lock);
  131. e = elevator_find(name);
  132. if (!e) {
  133. char elv[ELV_NAME_MAX + strlen("-iosched")];
  134. spin_unlock(&elv_list_lock);
  135. snprintf(elv, sizeof(elv), "%s-iosched", name);
  136. request_module("%s", elv);
  137. spin_lock(&elv_list_lock);
  138. e = elevator_find(name);
  139. }
  140. if (e && !try_module_get(e->elevator_owner))
  141. e = NULL;
  142. spin_unlock(&elv_list_lock);
  143. return e;
  144. }
  145. static void *elevator_init_queue(struct request_queue *q,
  146. struct elevator_queue *eq)
  147. {
  148. return eq->ops->elevator_init_fn(q);
  149. }
  150. static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
  151. void *data)
  152. {
  153. q->elevator = eq;
  154. eq->elevator_data = data;
  155. }
  156. static char chosen_elevator[16];
  157. static int __init elevator_setup(char *str)
  158. {
  159. /*
  160. * Be backwards-compatible with previous kernels, so users
  161. * won't get the wrong elevator.
  162. */
  163. strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
  164. return 1;
  165. }
  166. __setup("elevator=", elevator_setup);
  167. static struct kobj_type elv_ktype;
  168. static struct elevator_queue *elevator_alloc(struct request_queue *q,
  169. struct elevator_type *e)
  170. {
  171. struct elevator_queue *eq;
  172. int i;
  173. eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
  174. if (unlikely(!eq))
  175. goto err;
  176. eq->ops = &e->ops;
  177. eq->elevator_type = e;
  178. kobject_init(&eq->kobj, &elv_ktype);
  179. mutex_init(&eq->sysfs_lock);
  180. eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
  181. GFP_KERNEL, q->node);
  182. if (!eq->hash)
  183. goto err;
  184. for (i = 0; i < ELV_HASH_ENTRIES; i++)
  185. INIT_HLIST_HEAD(&eq->hash[i]);
  186. return eq;
  187. err:
  188. kfree(eq);
  189. elevator_put(e);
  190. return NULL;
  191. }
  192. static void elevator_release(struct kobject *kobj)
  193. {
  194. struct elevator_queue *e;
  195. e = container_of(kobj, struct elevator_queue, kobj);
  196. elevator_put(e->elevator_type);
  197. kfree(e->hash);
  198. kfree(e);
  199. }
  200. int elevator_init(struct request_queue *q, char *name)
  201. {
  202. struct elevator_type *e = NULL;
  203. struct elevator_queue *eq;
  204. void *data;
  205. if (unlikely(q->elevator))
  206. return 0;
  207. INIT_LIST_HEAD(&q->queue_head);
  208. q->last_merge = NULL;
  209. q->end_sector = 0;
  210. q->boundary_rq = NULL;
  211. if (name) {
  212. e = elevator_get(name);
  213. if (!e)
  214. return -EINVAL;
  215. }
  216. if (!e && *chosen_elevator) {
  217. e = elevator_get(chosen_elevator);
  218. if (!e)
  219. printk(KERN_ERR "I/O scheduler %s not found\n",
  220. chosen_elevator);
  221. }
  222. if (!e) {
  223. e = elevator_get(CONFIG_DEFAULT_IOSCHED);
  224. if (!e) {
  225. printk(KERN_ERR
  226. "Default I/O scheduler not found. " \
  227. "Using noop.\n");
  228. e = elevator_get("noop");
  229. }
  230. }
  231. eq = elevator_alloc(q, e);
  232. if (!eq)
  233. return -ENOMEM;
  234. data = elevator_init_queue(q, eq);
  235. if (!data) {
  236. kobject_put(&eq->kobj);
  237. return -ENOMEM;
  238. }
  239. elevator_attach(q, eq, data);
  240. return 0;
  241. }
  242. EXPORT_SYMBOL(elevator_init);
  243. void elevator_exit(struct elevator_queue *e)
  244. {
  245. mutex_lock(&e->sysfs_lock);
  246. if (e->ops->elevator_exit_fn)
  247. e->ops->elevator_exit_fn(e);
  248. e->ops = NULL;
  249. mutex_unlock(&e->sysfs_lock);
  250. kobject_put(&e->kobj);
  251. }
  252. EXPORT_SYMBOL(elevator_exit);
  253. static inline void __elv_rqhash_del(struct request *rq)
  254. {
  255. hlist_del_init(&rq->hash);
  256. }
  257. static void elv_rqhash_del(struct request_queue *q, struct request *rq)
  258. {
  259. if (ELV_ON_HASH(rq))
  260. __elv_rqhash_del(rq);
  261. }
  262. static void elv_rqhash_add(struct request_queue *q, struct request *rq)
  263. {
  264. struct elevator_queue *e = q->elevator;
  265. BUG_ON(ELV_ON_HASH(rq));
  266. hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
  267. }
  268. static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
  269. {
  270. __elv_rqhash_del(rq);
  271. elv_rqhash_add(q, rq);
  272. }
  273. static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  274. {
  275. struct elevator_queue *e = q->elevator;
  276. struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
  277. struct hlist_node *entry, *next;
  278. struct request *rq;
  279. hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
  280. BUG_ON(!ELV_ON_HASH(rq));
  281. if (unlikely(!rq_mergeable(rq))) {
  282. __elv_rqhash_del(rq);
  283. continue;
  284. }
  285. if (rq_hash_key(rq) == offset)
  286. return rq;
  287. }
  288. return NULL;
  289. }
  290. /*
  291. * RB-tree support functions for inserting/lookup/removal of requests
  292. * in a sorted RB tree.
  293. */
  294. struct request *elv_rb_add(struct rb_root *root, struct request *rq)
  295. {
  296. struct rb_node **p = &root->rb_node;
  297. struct rb_node *parent = NULL;
  298. struct request *__rq;
  299. while (*p) {
  300. parent = *p;
  301. __rq = rb_entry(parent, struct request, rb_node);
  302. if (blk_rq_pos(rq) < blk_rq_pos(__rq))
  303. p = &(*p)->rb_left;
  304. else if (blk_rq_pos(rq) > blk_rq_pos(__rq))
  305. p = &(*p)->rb_right;
  306. else
  307. return __rq;
  308. }
  309. rb_link_node(&rq->rb_node, parent, p);
  310. rb_insert_color(&rq->rb_node, root);
  311. return NULL;
  312. }
  313. EXPORT_SYMBOL(elv_rb_add);
  314. void elv_rb_del(struct rb_root *root, struct request *rq)
  315. {
  316. BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  317. rb_erase(&rq->rb_node, root);
  318. RB_CLEAR_NODE(&rq->rb_node);
  319. }
  320. EXPORT_SYMBOL(elv_rb_del);
  321. struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  322. {
  323. struct rb_node *n = root->rb_node;
  324. struct request *rq;
  325. while (n) {
  326. rq = rb_entry(n, struct request, rb_node);
  327. if (sector < blk_rq_pos(rq))
  328. n = n->rb_left;
  329. else if (sector > blk_rq_pos(rq))
  330. n = n->rb_right;
  331. else
  332. return rq;
  333. }
  334. return NULL;
  335. }
  336. EXPORT_SYMBOL(elv_rb_find);
  337. /*
  338. * Insert rq into dispatch queue of q. Queue lock must be held on
  339. * entry. rq is sort instead into the dispatch queue. To be used by
  340. * specific elevators.
  341. */
  342. void elv_dispatch_sort(struct request_queue *q, struct request *rq)
  343. {
  344. sector_t boundary;
  345. struct list_head *entry;
  346. int stop_flags;
  347. BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
  348. if (q->last_merge == rq)
  349. q->last_merge = NULL;
  350. elv_rqhash_del(q, rq);
  351. q->nr_sorted--;
  352. boundary = q->end_sector;
  353. stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
  354. list_for_each_prev(entry, &q->queue_head) {
  355. struct request *pos = list_entry_rq(entry);
  356. if ((rq->cmd_flags & REQ_DISCARD) !=
  357. (pos->cmd_flags & REQ_DISCARD))
  358. break;
  359. if (rq_data_dir(rq) != rq_data_dir(pos))
  360. break;
  361. if (pos->cmd_flags & stop_flags)
  362. break;
  363. if (blk_rq_pos(rq) >= boundary) {
  364. if (blk_rq_pos(pos) < boundary)
  365. continue;
  366. } else {
  367. if (blk_rq_pos(pos) >= boundary)
  368. break;
  369. }
  370. if (blk_rq_pos(rq) >= blk_rq_pos(pos))
  371. break;
  372. }
  373. list_add(&rq->queuelist, entry);
  374. }
  375. EXPORT_SYMBOL(elv_dispatch_sort);
  376. /*
  377. * Insert rq into dispatch queue of q. Queue lock must be held on
  378. * entry. rq is added to the back of the dispatch queue. To be used by
  379. * specific elevators.
  380. */
  381. void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
  382. {
  383. if (q->last_merge == rq)
  384. q->last_merge = NULL;
  385. elv_rqhash_del(q, rq);
  386. q->nr_sorted--;
  387. q->end_sector = rq_end_sector(rq);
  388. q->boundary_rq = rq;
  389. list_add_tail(&rq->queuelist, &q->queue_head);
  390. }
  391. EXPORT_SYMBOL(elv_dispatch_add_tail);
  392. int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
  393. {
  394. struct elevator_queue *e = q->elevator;
  395. struct request *__rq;
  396. int ret;
  397. /*
  398. * Levels of merges:
  399. * nomerges: No merges at all attempted
  400. * noxmerges: Only simple one-hit cache try
  401. * merges: All merge tries attempted
  402. */
  403. if (blk_queue_nomerges(q))
  404. return ELEVATOR_NO_MERGE;
  405. /*
  406. * First try one-hit cache.
  407. */
  408. if (q->last_merge) {
  409. ret = elv_try_merge(q->last_merge, bio);
  410. if (ret != ELEVATOR_NO_MERGE) {
  411. *req = q->last_merge;
  412. return ret;
  413. }
  414. }
  415. if (blk_queue_noxmerges(q))
  416. return ELEVATOR_NO_MERGE;
  417. /*
  418. * See if our hash lookup can find a potential backmerge.
  419. */
  420. __rq = elv_rqhash_find(q, bio->bi_sector);
  421. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  422. *req = __rq;
  423. return ELEVATOR_BACK_MERGE;
  424. }
  425. if (e->ops->elevator_merge_fn)
  426. return e->ops->elevator_merge_fn(q, req, bio);
  427. return ELEVATOR_NO_MERGE;
  428. }
  429. void elv_merged_request(struct request_queue *q, struct request *rq, int type)
  430. {
  431. struct elevator_queue *e = q->elevator;
  432. if (e->ops->elevator_merged_fn)
  433. e->ops->elevator_merged_fn(q, rq, type);
  434. if (type == ELEVATOR_BACK_MERGE)
  435. elv_rqhash_reposition(q, rq);
  436. q->last_merge = rq;
  437. }
  438. void elv_merge_requests(struct request_queue *q, struct request *rq,
  439. struct request *next)
  440. {
  441. struct elevator_queue *e = q->elevator;
  442. if (e->ops->elevator_merge_req_fn)
  443. e->ops->elevator_merge_req_fn(q, rq, next);
  444. elv_rqhash_reposition(q, rq);
  445. elv_rqhash_del(q, next);
  446. q->nr_sorted--;
  447. q->last_merge = rq;
  448. }
  449. void elv_bio_merged(struct request_queue *q, struct request *rq,
  450. struct bio *bio)
  451. {
  452. struct elevator_queue *e = q->elevator;
  453. if (e->ops->elevator_bio_merged_fn)
  454. e->ops->elevator_bio_merged_fn(q, rq, bio);
  455. }
  456. void elv_requeue_request(struct request_queue *q, struct request *rq)
  457. {
  458. /*
  459. * it already went through dequeue, we need to decrement the
  460. * in_flight count again
  461. */
  462. if (blk_account_rq(rq)) {
  463. q->in_flight[rq_is_sync(rq)]--;
  464. if (rq->cmd_flags & REQ_SORTED)
  465. elv_deactivate_rq(q, rq);
  466. }
  467. rq->cmd_flags &= ~REQ_STARTED;
  468. elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
  469. }
  470. void elv_drain_elevator(struct request_queue *q)
  471. {
  472. static int printed;
  473. while (q->elevator->ops->elevator_dispatch_fn(q, 1))
  474. ;
  475. if (q->nr_sorted == 0)
  476. return;
  477. if (printed++ < 10) {
  478. printk(KERN_ERR "%s: forced dispatching is broken "
  479. "(nr_sorted=%u), please report this\n",
  480. q->elevator->elevator_type->elevator_name, q->nr_sorted);
  481. }
  482. }
  483. /*
  484. * Call with queue lock held, interrupts disabled
  485. */
  486. void elv_quiesce_start(struct request_queue *q)
  487. {
  488. if (!q->elevator)
  489. return;
  490. queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
  491. /*
  492. * make sure we don't have any requests in flight
  493. */
  494. elv_drain_elevator(q);
  495. while (q->rq.elvpriv) {
  496. __blk_run_queue(q);
  497. spin_unlock_irq(q->queue_lock);
  498. msleep(10);
  499. spin_lock_irq(q->queue_lock);
  500. elv_drain_elevator(q);
  501. }
  502. }
  503. void elv_quiesce_end(struct request_queue *q)
  504. {
  505. queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
  506. }
  507. void elv_insert(struct request_queue *q, struct request *rq, int where)
  508. {
  509. int unplug_it = 1;
  510. trace_block_rq_insert(q, rq);
  511. rq->q = q;
  512. switch (where) {
  513. case ELEVATOR_INSERT_REQUEUE:
  514. /*
  515. * Most requeues happen because of a busy condition,
  516. * don't force unplug of the queue for that case.
  517. * Clear unplug_it and fall through.
  518. */
  519. unplug_it = 0;
  520. case ELEVATOR_INSERT_FRONT:
  521. rq->cmd_flags |= REQ_SOFTBARRIER;
  522. list_add(&rq->queuelist, &q->queue_head);
  523. break;
  524. case ELEVATOR_INSERT_BACK:
  525. rq->cmd_flags |= REQ_SOFTBARRIER;
  526. elv_drain_elevator(q);
  527. list_add_tail(&rq->queuelist, &q->queue_head);
  528. /*
  529. * We kick the queue here for the following reasons.
  530. * - The elevator might have returned NULL previously
  531. * to delay requests and returned them now. As the
  532. * queue wasn't empty before this request, ll_rw_blk
  533. * won't run the queue on return, resulting in hang.
  534. * - Usually, back inserted requests won't be merged
  535. * with anything. There's no point in delaying queue
  536. * processing.
  537. */
  538. __blk_run_queue(q);
  539. break;
  540. case ELEVATOR_INSERT_SORT:
  541. BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
  542. !(rq->cmd_flags & REQ_DISCARD));
  543. rq->cmd_flags |= REQ_SORTED;
  544. q->nr_sorted++;
  545. if (rq_mergeable(rq)) {
  546. elv_rqhash_add(q, rq);
  547. if (!q->last_merge)
  548. q->last_merge = rq;
  549. }
  550. /*
  551. * Some ioscheds (cfq) run q->request_fn directly, so
  552. * rq cannot be accessed after calling
  553. * elevator_add_req_fn.
  554. */
  555. q->elevator->ops->elevator_add_req_fn(q, rq);
  556. break;
  557. case ELEVATOR_INSERT_FLUSH:
  558. rq->cmd_flags |= REQ_SOFTBARRIER;
  559. blk_insert_flush(rq);
  560. break;
  561. default:
  562. printk(KERN_ERR "%s: bad insertion point %d\n",
  563. __func__, where);
  564. BUG();
  565. }
  566. if (unplug_it && blk_queue_plugged(q)) {
  567. int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC]
  568. - queue_in_flight(q);
  569. if (nrq >= q->unplug_thresh)
  570. __generic_unplug_device(q);
  571. }
  572. }
  573. void __elv_add_request(struct request_queue *q, struct request *rq, int where,
  574. int plug)
  575. {
  576. BUG_ON(rq->cmd_flags & REQ_ON_PLUG);
  577. if (rq->cmd_flags & REQ_SOFTBARRIER) {
  578. /* barriers are scheduling boundary, update end_sector */
  579. if (rq->cmd_type == REQ_TYPE_FS ||
  580. (rq->cmd_flags & REQ_DISCARD)) {
  581. q->end_sector = rq_end_sector(rq);
  582. q->boundary_rq = rq;
  583. }
  584. } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
  585. where == ELEVATOR_INSERT_SORT)
  586. where = ELEVATOR_INSERT_BACK;
  587. if (plug)
  588. blk_plug_device(q);
  589. elv_insert(q, rq, where);
  590. }
  591. EXPORT_SYMBOL(__elv_add_request);
  592. void elv_add_request(struct request_queue *q, struct request *rq, int where,
  593. int plug)
  594. {
  595. unsigned long flags;
  596. spin_lock_irqsave(q->queue_lock, flags);
  597. __elv_add_request(q, rq, where, plug);
  598. spin_unlock_irqrestore(q->queue_lock, flags);
  599. }
  600. EXPORT_SYMBOL(elv_add_request);
  601. int elv_queue_empty(struct request_queue *q)
  602. {
  603. struct elevator_queue *e = q->elevator;
  604. if (!list_empty(&q->queue_head))
  605. return 0;
  606. if (e->ops->elevator_queue_empty_fn)
  607. return e->ops->elevator_queue_empty_fn(q);
  608. return 1;
  609. }
  610. EXPORT_SYMBOL(elv_queue_empty);
  611. struct request *elv_latter_request(struct request_queue *q, struct request *rq)
  612. {
  613. struct elevator_queue *e = q->elevator;
  614. if (e->ops->elevator_latter_req_fn)
  615. return e->ops->elevator_latter_req_fn(q, rq);
  616. return NULL;
  617. }
  618. struct request *elv_former_request(struct request_queue *q, struct request *rq)
  619. {
  620. struct elevator_queue *e = q->elevator;
  621. if (e->ops->elevator_former_req_fn)
  622. return e->ops->elevator_former_req_fn(q, rq);
  623. return NULL;
  624. }
  625. int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
  626. {
  627. struct elevator_queue *e = q->elevator;
  628. if (e->ops->elevator_set_req_fn)
  629. return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
  630. rq->elevator_private[0] = NULL;
  631. return 0;
  632. }
  633. void elv_put_request(struct request_queue *q, struct request *rq)
  634. {
  635. struct elevator_queue *e = q->elevator;
  636. if (e->ops->elevator_put_req_fn)
  637. e->ops->elevator_put_req_fn(rq);
  638. }
  639. int elv_may_queue(struct request_queue *q, int rw)
  640. {
  641. struct elevator_queue *e = q->elevator;
  642. if (e->ops->elevator_may_queue_fn)
  643. return e->ops->elevator_may_queue_fn(q, rw);
  644. return ELV_MQUEUE_MAY;
  645. }
  646. void elv_abort_queue(struct request_queue *q)
  647. {
  648. struct request *rq;
  649. blk_abort_flushes(q);
  650. while (!list_empty(&q->queue_head)) {
  651. rq = list_entry_rq(q->queue_head.next);
  652. rq->cmd_flags |= REQ_QUIET;
  653. trace_block_rq_abort(q, rq);
  654. /*
  655. * Mark this request as started so we don't trigger
  656. * any debug logic in the end I/O path.
  657. */
  658. blk_start_request(rq);
  659. __blk_end_request_all(rq, -EIO);
  660. }
  661. }
  662. EXPORT_SYMBOL(elv_abort_queue);
  663. void elv_completed_request(struct request_queue *q, struct request *rq)
  664. {
  665. struct elevator_queue *e = q->elevator;
  666. /*
  667. * request is released from the driver, io must be done
  668. */
  669. if (blk_account_rq(rq)) {
  670. q->in_flight[rq_is_sync(rq)]--;
  671. if ((rq->cmd_flags & REQ_SORTED) &&
  672. e->ops->elevator_completed_req_fn)
  673. e->ops->elevator_completed_req_fn(q, rq);
  674. }
  675. }
  676. #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
  677. static ssize_t
  678. elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  679. {
  680. struct elv_fs_entry *entry = to_elv(attr);
  681. struct elevator_queue *e;
  682. ssize_t error;
  683. if (!entry->show)
  684. return -EIO;
  685. e = container_of(kobj, struct elevator_queue, kobj);
  686. mutex_lock(&e->sysfs_lock);
  687. error = e->ops ? entry->show(e, page) : -ENOENT;
  688. mutex_unlock(&e->sysfs_lock);
  689. return error;
  690. }
  691. static ssize_t
  692. elv_attr_store(struct kobject *kobj, struct attribute *attr,
  693. const char *page, size_t length)
  694. {
  695. struct elv_fs_entry *entry = to_elv(attr);
  696. struct elevator_queue *e;
  697. ssize_t error;
  698. if (!entry->store)
  699. return -EIO;
  700. e = container_of(kobj, struct elevator_queue, kobj);
  701. mutex_lock(&e->sysfs_lock);
  702. error = e->ops ? entry->store(e, page, length) : -ENOENT;
  703. mutex_unlock(&e->sysfs_lock);
  704. return error;
  705. }
  706. static const struct sysfs_ops elv_sysfs_ops = {
  707. .show = elv_attr_show,
  708. .store = elv_attr_store,
  709. };
  710. static struct kobj_type elv_ktype = {
  711. .sysfs_ops = &elv_sysfs_ops,
  712. .release = elevator_release,
  713. };
  714. int elv_register_queue(struct request_queue *q)
  715. {
  716. struct elevator_queue *e = q->elevator;
  717. int error;
  718. error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
  719. if (!error) {
  720. struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
  721. if (attr) {
  722. while (attr->attr.name) {
  723. if (sysfs_create_file(&e->kobj, &attr->attr))
  724. break;
  725. attr++;
  726. }
  727. }
  728. kobject_uevent(&e->kobj, KOBJ_ADD);
  729. e->registered = 1;
  730. }
  731. return error;
  732. }
  733. EXPORT_SYMBOL(elv_register_queue);
  734. static void __elv_unregister_queue(struct elevator_queue *e)
  735. {
  736. kobject_uevent(&e->kobj, KOBJ_REMOVE);
  737. kobject_del(&e->kobj);
  738. e->registered = 0;
  739. }
  740. void elv_unregister_queue(struct request_queue *q)
  741. {
  742. if (q)
  743. __elv_unregister_queue(q->elevator);
  744. }
  745. EXPORT_SYMBOL(elv_unregister_queue);
  746. void elv_register(struct elevator_type *e)
  747. {
  748. char *def = "";
  749. spin_lock(&elv_list_lock);
  750. BUG_ON(elevator_find(e->elevator_name));
  751. list_add_tail(&e->list, &elv_list);
  752. spin_unlock(&elv_list_lock);
  753. if (!strcmp(e->elevator_name, chosen_elevator) ||
  754. (!*chosen_elevator &&
  755. !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
  756. def = " (default)";
  757. printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
  758. def);
  759. }
  760. EXPORT_SYMBOL_GPL(elv_register);
  761. void elv_unregister(struct elevator_type *e)
  762. {
  763. struct task_struct *g, *p;
  764. /*
  765. * Iterate every thread in the process to remove the io contexts.
  766. */
  767. if (e->ops.trim) {
  768. read_lock(&tasklist_lock);
  769. do_each_thread(g, p) {
  770. task_lock(p);
  771. if (p->io_context)
  772. e->ops.trim(p->io_context);
  773. task_unlock(p);
  774. } while_each_thread(g, p);
  775. read_unlock(&tasklist_lock);
  776. }
  777. spin_lock(&elv_list_lock);
  778. list_del_init(&e->list);
  779. spin_unlock(&elv_list_lock);
  780. }
  781. EXPORT_SYMBOL_GPL(elv_unregister);
  782. /*
  783. * switch to new_e io scheduler. be careful not to introduce deadlocks -
  784. * we don't free the old io scheduler, before we have allocated what we
  785. * need for the new one. this way we have a chance of going back to the old
  786. * one, if the new one fails init for some reason.
  787. */
  788. static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
  789. {
  790. struct elevator_queue *old_elevator, *e;
  791. void *data;
  792. int err;
  793. /*
  794. * Allocate new elevator
  795. */
  796. e = elevator_alloc(q, new_e);
  797. if (!e)
  798. return -ENOMEM;
  799. data = elevator_init_queue(q, e);
  800. if (!data) {
  801. kobject_put(&e->kobj);
  802. return -ENOMEM;
  803. }
  804. /*
  805. * Turn on BYPASS and drain all requests w/ elevator private data
  806. */
  807. spin_lock_irq(q->queue_lock);
  808. elv_quiesce_start(q);
  809. /*
  810. * Remember old elevator.
  811. */
  812. old_elevator = q->elevator;
  813. /*
  814. * attach and start new elevator
  815. */
  816. elevator_attach(q, e, data);
  817. spin_unlock_irq(q->queue_lock);
  818. if (old_elevator->registered) {
  819. __elv_unregister_queue(old_elevator);
  820. err = elv_register_queue(q);
  821. if (err)
  822. goto fail_register;
  823. }
  824. /*
  825. * finally exit old elevator and turn off BYPASS.
  826. */
  827. elevator_exit(old_elevator);
  828. spin_lock_irq(q->queue_lock);
  829. elv_quiesce_end(q);
  830. spin_unlock_irq(q->queue_lock);
  831. blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
  832. return 0;
  833. fail_register:
  834. /*
  835. * switch failed, exit the new io scheduler and reattach the old
  836. * one again (along with re-adding the sysfs dir)
  837. */
  838. elevator_exit(e);
  839. q->elevator = old_elevator;
  840. elv_register_queue(q);
  841. spin_lock_irq(q->queue_lock);
  842. queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
  843. spin_unlock_irq(q->queue_lock);
  844. return err;
  845. }
  846. /*
  847. * Switch this queue to the given IO scheduler.
  848. */
  849. int elevator_change(struct request_queue *q, const char *name)
  850. {
  851. char elevator_name[ELV_NAME_MAX];
  852. struct elevator_type *e;
  853. if (!q->elevator)
  854. return -ENXIO;
  855. strlcpy(elevator_name, name, sizeof(elevator_name));
  856. e = elevator_get(strstrip(elevator_name));
  857. if (!e) {
  858. printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
  859. return -EINVAL;
  860. }
  861. if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
  862. elevator_put(e);
  863. return 0;
  864. }
  865. return elevator_switch(q, e);
  866. }
  867. EXPORT_SYMBOL(elevator_change);
  868. ssize_t elv_iosched_store(struct request_queue *q, const char *name,
  869. size_t count)
  870. {
  871. int ret;
  872. if (!q->elevator)
  873. return count;
  874. ret = elevator_change(q, name);
  875. if (!ret)
  876. return count;
  877. printk(KERN_ERR "elevator: switch to %s failed\n", name);
  878. return ret;
  879. }
  880. ssize_t elv_iosched_show(struct request_queue *q, char *name)
  881. {
  882. struct elevator_queue *e = q->elevator;
  883. struct elevator_type *elv;
  884. struct elevator_type *__e;
  885. int len = 0;
  886. if (!q->elevator || !blk_queue_stackable(q))
  887. return sprintf(name, "none\n");
  888. elv = e->elevator_type;
  889. spin_lock(&elv_list_lock);
  890. list_for_each_entry(__e, &elv_list, list) {
  891. if (!strcmp(elv->elevator_name, __e->elevator_name))
  892. len += sprintf(name+len, "[%s] ", elv->elevator_name);
  893. else
  894. len += sprintf(name+len, "%s ", __e->elevator_name);
  895. }
  896. spin_unlock(&elv_list_lock);
  897. len += sprintf(len+name, "\n");
  898. return len;
  899. }
  900. struct request *elv_rb_former_request(struct request_queue *q,
  901. struct request *rq)
  902. {
  903. struct rb_node *rbprev = rb_prev(&rq->rb_node);
  904. if (rbprev)
  905. return rb_entry_rq(rbprev);
  906. return NULL;
  907. }
  908. EXPORT_SYMBOL(elv_rb_former_request);
  909. struct request *elv_rb_latter_request(struct request_queue *q,
  910. struct request *rq)
  911. {
  912. struct rb_node *rbnext = rb_next(&rq->rb_node);
  913. if (rbnext)
  914. return rb_entry_rq(rbnext);
  915. return NULL;
  916. }
  917. EXPORT_SYMBOL(elv_rb_latter_request);