elevator.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231
  1. /*
  2. * Block device elevator/IO-scheduler.
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. *
  6. * 30042000 Jens Axboe <axboe@kernel.dk> :
  7. *
  8. * Split the elevator a bit so that it is possible to choose a different
  9. * one or even write a new "plug in". There are three pieces:
  10. * - elevator_fn, inserts a new request in the queue list
  11. * - elevator_merge_fn, decides whether a new buffer can be merged with
  12. * an existing request
  13. * - elevator_dequeue_fn, called when a request is taken off the active list
  14. *
  15. * 20082000 Dave Jones <davej@suse.de> :
  16. * Removed tests for max-bomb-segments, which was breaking elvtune
  17. * when run without -bN
  18. *
  19. * Jens:
  20. * - Rework again to work with bio instead of buffer_heads
  21. * - loose bi_dev comparisons, partition handling is right now
  22. * - completely modularize elevator setup and teardown
  23. *
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/fs.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/elevator.h>
  29. #include <linux/bio.h>
  30. #include <linux/module.h>
  31. #include <linux/slab.h>
  32. #include <linux/init.h>
  33. #include <linux/compiler.h>
  34. #include <linux/delay.h>
  35. #include <linux/blktrace_api.h>
  36. #include <trace/block.h>
  37. #include <linux/hash.h>
  38. #include <linux/uaccess.h>
  39. #include "blk.h"
  40. static DEFINE_SPINLOCK(elv_list_lock);
  41. static LIST_HEAD(elv_list);
  42. DEFINE_TRACE(block_rq_abort);
  43. /*
  44. * Merge hash stuff.
  45. */
  46. static const int elv_hash_shift = 6;
  47. #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
  48. #define ELV_HASH_FN(sec) \
  49. (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
  50. #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
  51. #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
  52. #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
  53. DEFINE_TRACE(block_rq_insert);
  54. DEFINE_TRACE(block_rq_issue);
  55. /*
  56. * Query io scheduler to see if the current process issuing bio may be
  57. * merged with rq.
  58. */
  59. static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  60. {
  61. struct request_queue *q = rq->q;
  62. elevator_t *e = q->elevator;
  63. if (e->ops->elevator_allow_merge_fn)
  64. return e->ops->elevator_allow_merge_fn(q, rq, bio);
  65. return 1;
  66. }
  67. /*
  68. * can we safely merge with this request?
  69. */
  70. int elv_rq_merge_ok(struct request *rq, struct bio *bio)
  71. {
  72. if (!rq_mergeable(rq))
  73. return 0;
  74. /*
  75. * Don't merge file system requests and discard requests
  76. */
  77. if (bio_discard(bio) != bio_discard(rq->bio))
  78. return 0;
  79. /*
  80. * different data direction or already started, don't merge
  81. */
  82. if (bio_data_dir(bio) != rq_data_dir(rq))
  83. return 0;
  84. /*
  85. * must be same device and not a special request
  86. */
  87. if (rq->rq_disk != bio->bi_bdev->bd_disk || rq->special)
  88. return 0;
  89. /*
  90. * only merge integrity protected bio into ditto rq
  91. */
  92. if (bio_integrity(bio) != blk_integrity_rq(rq))
  93. return 0;
  94. if (!elv_iosched_allow_merge(rq, bio))
  95. return 0;
  96. return 1;
  97. }
  98. EXPORT_SYMBOL(elv_rq_merge_ok);
  99. static inline int elv_try_merge(struct request *__rq, struct bio *bio)
  100. {
  101. int ret = ELEVATOR_NO_MERGE;
  102. /*
  103. * we can merge and sequence is ok, check if it's possible
  104. */
  105. if (elv_rq_merge_ok(__rq, bio)) {
  106. if (__rq->sector + __rq->nr_sectors == bio->bi_sector)
  107. ret = ELEVATOR_BACK_MERGE;
  108. else if (__rq->sector - bio_sectors(bio) == bio->bi_sector)
  109. ret = ELEVATOR_FRONT_MERGE;
  110. }
  111. return ret;
  112. }
  113. static struct elevator_type *elevator_find(const char *name)
  114. {
  115. struct elevator_type *e;
  116. list_for_each_entry(e, &elv_list, list) {
  117. if (!strcmp(e->elevator_name, name))
  118. return e;
  119. }
  120. return NULL;
  121. }
  122. static void elevator_put(struct elevator_type *e)
  123. {
  124. module_put(e->elevator_owner);
  125. }
  126. static struct elevator_type *elevator_get(const char *name)
  127. {
  128. struct elevator_type *e;
  129. spin_lock(&elv_list_lock);
  130. e = elevator_find(name);
  131. if (!e) {
  132. char elv[ELV_NAME_MAX + strlen("-iosched")];
  133. spin_unlock(&elv_list_lock);
  134. if (!strcmp(name, "anticipatory"))
  135. sprintf(elv, "as-iosched");
  136. else
  137. sprintf(elv, "%s-iosched", name);
  138. request_module("%s", elv);
  139. spin_lock(&elv_list_lock);
  140. e = elevator_find(name);
  141. }
  142. if (e && !try_module_get(e->elevator_owner))
  143. e = NULL;
  144. spin_unlock(&elv_list_lock);
  145. return e;
  146. }
  147. static void *elevator_init_queue(struct request_queue *q,
  148. struct elevator_queue *eq)
  149. {
  150. return eq->ops->elevator_init_fn(q);
  151. }
  152. static void elevator_attach(struct request_queue *q, struct elevator_queue *eq,
  153. void *data)
  154. {
  155. q->elevator = eq;
  156. eq->elevator_data = data;
  157. }
  158. static char chosen_elevator[16];
  159. static int __init elevator_setup(char *str)
  160. {
  161. /*
  162. * Be backwards-compatible with previous kernels, so users
  163. * won't get the wrong elevator.
  164. */
  165. if (!strcmp(str, "as"))
  166. strcpy(chosen_elevator, "anticipatory");
  167. else
  168. strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
  169. return 1;
  170. }
  171. __setup("elevator=", elevator_setup);
  172. static struct kobj_type elv_ktype;
  173. static elevator_t *elevator_alloc(struct request_queue *q,
  174. struct elevator_type *e)
  175. {
  176. elevator_t *eq;
  177. int i;
  178. eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
  179. if (unlikely(!eq))
  180. goto err;
  181. eq->ops = &e->ops;
  182. eq->elevator_type = e;
  183. kobject_init(&eq->kobj, &elv_ktype);
  184. mutex_init(&eq->sysfs_lock);
  185. eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
  186. GFP_KERNEL, q->node);
  187. if (!eq->hash)
  188. goto err;
  189. for (i = 0; i < ELV_HASH_ENTRIES; i++)
  190. INIT_HLIST_HEAD(&eq->hash[i]);
  191. return eq;
  192. err:
  193. kfree(eq);
  194. elevator_put(e);
  195. return NULL;
  196. }
  197. static void elevator_release(struct kobject *kobj)
  198. {
  199. elevator_t *e = container_of(kobj, elevator_t, kobj);
  200. elevator_put(e->elevator_type);
  201. kfree(e->hash);
  202. kfree(e);
  203. }
  204. int elevator_init(struct request_queue *q, char *name)
  205. {
  206. struct elevator_type *e = NULL;
  207. struct elevator_queue *eq;
  208. int ret = 0;
  209. void *data;
  210. INIT_LIST_HEAD(&q->queue_head);
  211. q->last_merge = NULL;
  212. q->end_sector = 0;
  213. q->boundary_rq = NULL;
  214. if (name) {
  215. e = elevator_get(name);
  216. if (!e)
  217. return -EINVAL;
  218. }
  219. if (!e && *chosen_elevator) {
  220. e = elevator_get(chosen_elevator);
  221. if (!e)
  222. printk(KERN_ERR "I/O scheduler %s not found\n",
  223. chosen_elevator);
  224. }
  225. if (!e) {
  226. e = elevator_get(CONFIG_DEFAULT_IOSCHED);
  227. if (!e) {
  228. printk(KERN_ERR
  229. "Default I/O scheduler not found. " \
  230. "Using noop.\n");
  231. e = elevator_get("noop");
  232. }
  233. }
  234. eq = elevator_alloc(q, e);
  235. if (!eq)
  236. return -ENOMEM;
  237. data = elevator_init_queue(q, eq);
  238. if (!data) {
  239. kobject_put(&eq->kobj);
  240. return -ENOMEM;
  241. }
  242. elevator_attach(q, eq, data);
  243. return ret;
  244. }
  245. EXPORT_SYMBOL(elevator_init);
  246. void elevator_exit(elevator_t *e)
  247. {
  248. mutex_lock(&e->sysfs_lock);
  249. if (e->ops->elevator_exit_fn)
  250. e->ops->elevator_exit_fn(e);
  251. e->ops = NULL;
  252. mutex_unlock(&e->sysfs_lock);
  253. kobject_put(&e->kobj);
  254. }
  255. EXPORT_SYMBOL(elevator_exit);
  256. static void elv_activate_rq(struct request_queue *q, struct request *rq)
  257. {
  258. elevator_t *e = q->elevator;
  259. if (e->ops->elevator_activate_req_fn)
  260. e->ops->elevator_activate_req_fn(q, rq);
  261. }
  262. static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
  263. {
  264. elevator_t *e = q->elevator;
  265. if (e->ops->elevator_deactivate_req_fn)
  266. e->ops->elevator_deactivate_req_fn(q, rq);
  267. }
  268. static inline void __elv_rqhash_del(struct request *rq)
  269. {
  270. hlist_del_init(&rq->hash);
  271. }
  272. static void elv_rqhash_del(struct request_queue *q, struct request *rq)
  273. {
  274. if (ELV_ON_HASH(rq))
  275. __elv_rqhash_del(rq);
  276. }
  277. static void elv_rqhash_add(struct request_queue *q, struct request *rq)
  278. {
  279. elevator_t *e = q->elevator;
  280. BUG_ON(ELV_ON_HASH(rq));
  281. hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
  282. }
  283. static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
  284. {
  285. __elv_rqhash_del(rq);
  286. elv_rqhash_add(q, rq);
  287. }
  288. static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  289. {
  290. elevator_t *e = q->elevator;
  291. struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
  292. struct hlist_node *entry, *next;
  293. struct request *rq;
  294. hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
  295. BUG_ON(!ELV_ON_HASH(rq));
  296. if (unlikely(!rq_mergeable(rq))) {
  297. __elv_rqhash_del(rq);
  298. continue;
  299. }
  300. if (rq_hash_key(rq) == offset)
  301. return rq;
  302. }
  303. return NULL;
  304. }
  305. /*
  306. * RB-tree support functions for inserting/lookup/removal of requests
  307. * in a sorted RB tree.
  308. */
  309. struct request *elv_rb_add(struct rb_root *root, struct request *rq)
  310. {
  311. struct rb_node **p = &root->rb_node;
  312. struct rb_node *parent = NULL;
  313. struct request *__rq;
  314. while (*p) {
  315. parent = *p;
  316. __rq = rb_entry(parent, struct request, rb_node);
  317. if (rq->sector < __rq->sector)
  318. p = &(*p)->rb_left;
  319. else if (rq->sector > __rq->sector)
  320. p = &(*p)->rb_right;
  321. else
  322. return __rq;
  323. }
  324. rb_link_node(&rq->rb_node, parent, p);
  325. rb_insert_color(&rq->rb_node, root);
  326. return NULL;
  327. }
  328. EXPORT_SYMBOL(elv_rb_add);
  329. void elv_rb_del(struct rb_root *root, struct request *rq)
  330. {
  331. BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  332. rb_erase(&rq->rb_node, root);
  333. RB_CLEAR_NODE(&rq->rb_node);
  334. }
  335. EXPORT_SYMBOL(elv_rb_del);
  336. struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  337. {
  338. struct rb_node *n = root->rb_node;
  339. struct request *rq;
  340. while (n) {
  341. rq = rb_entry(n, struct request, rb_node);
  342. if (sector < rq->sector)
  343. n = n->rb_left;
  344. else if (sector > rq->sector)
  345. n = n->rb_right;
  346. else
  347. return rq;
  348. }
  349. return NULL;
  350. }
  351. EXPORT_SYMBOL(elv_rb_find);
  352. /*
  353. * Insert rq into dispatch queue of q. Queue lock must be held on
  354. * entry. rq is sort instead into the dispatch queue. To be used by
  355. * specific elevators.
  356. */
  357. void elv_dispatch_sort(struct request_queue *q, struct request *rq)
  358. {
  359. sector_t boundary;
  360. struct list_head *entry;
  361. int stop_flags;
  362. if (q->last_merge == rq)
  363. q->last_merge = NULL;
  364. elv_rqhash_del(q, rq);
  365. q->nr_sorted--;
  366. boundary = q->end_sector;
  367. stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED;
  368. list_for_each_prev(entry, &q->queue_head) {
  369. struct request *pos = list_entry_rq(entry);
  370. if (blk_discard_rq(rq) != blk_discard_rq(pos))
  371. break;
  372. if (rq_data_dir(rq) != rq_data_dir(pos))
  373. break;
  374. if (pos->cmd_flags & stop_flags)
  375. break;
  376. if (rq->sector >= boundary) {
  377. if (pos->sector < boundary)
  378. continue;
  379. } else {
  380. if (pos->sector >= boundary)
  381. break;
  382. }
  383. if (rq->sector >= pos->sector)
  384. break;
  385. }
  386. list_add(&rq->queuelist, entry);
  387. }
  388. EXPORT_SYMBOL(elv_dispatch_sort);
  389. /*
  390. * Insert rq into dispatch queue of q. Queue lock must be held on
  391. * entry. rq is added to the back of the dispatch queue. To be used by
  392. * specific elevators.
  393. */
  394. void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
  395. {
  396. if (q->last_merge == rq)
  397. q->last_merge = NULL;
  398. elv_rqhash_del(q, rq);
  399. q->nr_sorted--;
  400. q->end_sector = rq_end_sector(rq);
  401. q->boundary_rq = rq;
  402. list_add_tail(&rq->queuelist, &q->queue_head);
  403. }
  404. EXPORT_SYMBOL(elv_dispatch_add_tail);
  405. int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
  406. {
  407. elevator_t *e = q->elevator;
  408. struct request *__rq;
  409. int ret;
  410. /*
  411. * First try one-hit cache.
  412. */
  413. if (q->last_merge) {
  414. ret = elv_try_merge(q->last_merge, bio);
  415. if (ret != ELEVATOR_NO_MERGE) {
  416. *req = q->last_merge;
  417. return ret;
  418. }
  419. }
  420. if (blk_queue_nomerges(q))
  421. return ELEVATOR_NO_MERGE;
  422. /*
  423. * See if our hash lookup can find a potential backmerge.
  424. */
  425. __rq = elv_rqhash_find(q, bio->bi_sector);
  426. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  427. *req = __rq;
  428. return ELEVATOR_BACK_MERGE;
  429. }
  430. if (e->ops->elevator_merge_fn)
  431. return e->ops->elevator_merge_fn(q, req, bio);
  432. return ELEVATOR_NO_MERGE;
  433. }
  434. void elv_merged_request(struct request_queue *q, struct request *rq, int type)
  435. {
  436. elevator_t *e = q->elevator;
  437. if (e->ops->elevator_merged_fn)
  438. e->ops->elevator_merged_fn(q, rq, type);
  439. if (type == ELEVATOR_BACK_MERGE)
  440. elv_rqhash_reposition(q, rq);
  441. q->last_merge = rq;
  442. }
  443. void elv_merge_requests(struct request_queue *q, struct request *rq,
  444. struct request *next)
  445. {
  446. elevator_t *e = q->elevator;
  447. if (e->ops->elevator_merge_req_fn)
  448. e->ops->elevator_merge_req_fn(q, rq, next);
  449. elv_rqhash_reposition(q, rq);
  450. elv_rqhash_del(q, next);
  451. q->nr_sorted--;
  452. q->last_merge = rq;
  453. }
  454. void elv_requeue_request(struct request_queue *q, struct request *rq)
  455. {
  456. /*
  457. * it already went through dequeue, we need to decrement the
  458. * in_flight count again
  459. */
  460. if (blk_account_rq(rq)) {
  461. q->in_flight--;
  462. if (blk_sorted_rq(rq))
  463. elv_deactivate_rq(q, rq);
  464. }
  465. rq->cmd_flags &= ~REQ_STARTED;
  466. elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
  467. }
  468. static void elv_drain_elevator(struct request_queue *q)
  469. {
  470. static int printed;
  471. while (q->elevator->ops->elevator_dispatch_fn(q, 1))
  472. ;
  473. if (q->nr_sorted == 0)
  474. return;
  475. if (printed++ < 10) {
  476. printk(KERN_ERR "%s: forced dispatching is broken "
  477. "(nr_sorted=%u), please report this\n",
  478. q->elevator->elevator_type->elevator_name, q->nr_sorted);
  479. }
  480. }
  481. void elv_insert(struct request_queue *q, struct request *rq, int where)
  482. {
  483. struct list_head *pos;
  484. unsigned ordseq;
  485. int unplug_it = 1;
  486. trace_block_rq_insert(q, rq);
  487. rq->q = q;
  488. switch (where) {
  489. case ELEVATOR_INSERT_FRONT:
  490. rq->cmd_flags |= REQ_SOFTBARRIER;
  491. list_add(&rq->queuelist, &q->queue_head);
  492. break;
  493. case ELEVATOR_INSERT_BACK:
  494. rq->cmd_flags |= REQ_SOFTBARRIER;
  495. elv_drain_elevator(q);
  496. list_add_tail(&rq->queuelist, &q->queue_head);
  497. /*
  498. * We kick the queue here for the following reasons.
  499. * - The elevator might have returned NULL previously
  500. * to delay requests and returned them now. As the
  501. * queue wasn't empty before this request, ll_rw_blk
  502. * won't run the queue on return, resulting in hang.
  503. * - Usually, back inserted requests won't be merged
  504. * with anything. There's no point in delaying queue
  505. * processing.
  506. */
  507. blk_remove_plug(q);
  508. blk_start_queueing(q);
  509. break;
  510. case ELEVATOR_INSERT_SORT:
  511. BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq));
  512. rq->cmd_flags |= REQ_SORTED;
  513. q->nr_sorted++;
  514. if (rq_mergeable(rq)) {
  515. elv_rqhash_add(q, rq);
  516. if (!q->last_merge)
  517. q->last_merge = rq;
  518. }
  519. /*
  520. * Some ioscheds (cfq) run q->request_fn directly, so
  521. * rq cannot be accessed after calling
  522. * elevator_add_req_fn.
  523. */
  524. q->elevator->ops->elevator_add_req_fn(q, rq);
  525. break;
  526. case ELEVATOR_INSERT_REQUEUE:
  527. /*
  528. * If ordered flush isn't in progress, we do front
  529. * insertion; otherwise, requests should be requeued
  530. * in ordseq order.
  531. */
  532. rq->cmd_flags |= REQ_SOFTBARRIER;
  533. /*
  534. * Most requeues happen because of a busy condition,
  535. * don't force unplug of the queue for that case.
  536. */
  537. unplug_it = 0;
  538. if (q->ordseq == 0) {
  539. list_add(&rq->queuelist, &q->queue_head);
  540. break;
  541. }
  542. ordseq = blk_ordered_req_seq(rq);
  543. list_for_each(pos, &q->queue_head) {
  544. struct request *pos_rq = list_entry_rq(pos);
  545. if (ordseq <= blk_ordered_req_seq(pos_rq))
  546. break;
  547. }
  548. list_add_tail(&rq->queuelist, pos);
  549. break;
  550. default:
  551. printk(KERN_ERR "%s: bad insertion point %d\n",
  552. __func__, where);
  553. BUG();
  554. }
  555. if (unplug_it && blk_queue_plugged(q)) {
  556. int nrq = q->rq.count[READ] + q->rq.count[WRITE]
  557. - q->in_flight;
  558. if (nrq >= q->unplug_thresh)
  559. __generic_unplug_device(q);
  560. }
  561. }
  562. void __elv_add_request(struct request_queue *q, struct request *rq, int where,
  563. int plug)
  564. {
  565. if (q->ordcolor)
  566. rq->cmd_flags |= REQ_ORDERED_COLOR;
  567. if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) {
  568. /*
  569. * toggle ordered color
  570. */
  571. if (blk_barrier_rq(rq))
  572. q->ordcolor ^= 1;
  573. /*
  574. * barriers implicitly indicate back insertion
  575. */
  576. if (where == ELEVATOR_INSERT_SORT)
  577. where = ELEVATOR_INSERT_BACK;
  578. /*
  579. * this request is scheduling boundary, update
  580. * end_sector
  581. */
  582. if (blk_fs_request(rq) || blk_discard_rq(rq)) {
  583. q->end_sector = rq_end_sector(rq);
  584. q->boundary_rq = rq;
  585. }
  586. } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
  587. where == ELEVATOR_INSERT_SORT)
  588. where = ELEVATOR_INSERT_BACK;
  589. if (plug)
  590. blk_plug_device(q);
  591. elv_insert(q, rq, where);
  592. }
  593. EXPORT_SYMBOL(__elv_add_request);
  594. void elv_add_request(struct request_queue *q, struct request *rq, int where,
  595. int plug)
  596. {
  597. unsigned long flags;
  598. spin_lock_irqsave(q->queue_lock, flags);
  599. __elv_add_request(q, rq, where, plug);
  600. spin_unlock_irqrestore(q->queue_lock, flags);
  601. }
  602. EXPORT_SYMBOL(elv_add_request);
  603. static inline struct request *__elv_next_request(struct request_queue *q)
  604. {
  605. struct request *rq;
  606. while (1) {
  607. while (!list_empty(&q->queue_head)) {
  608. rq = list_entry_rq(q->queue_head.next);
  609. if (blk_do_ordered(q, &rq))
  610. return rq;
  611. }
  612. if (!q->elevator->ops->elevator_dispatch_fn(q, 0))
  613. return NULL;
  614. }
  615. }
  616. struct request *elv_next_request(struct request_queue *q)
  617. {
  618. struct request *rq;
  619. int ret;
  620. while ((rq = __elv_next_request(q)) != NULL) {
  621. /*
  622. * Kill the empty barrier place holder, the driver must
  623. * not ever see it.
  624. */
  625. if (blk_empty_barrier(rq)) {
  626. __blk_end_request(rq, 0, blk_rq_bytes(rq));
  627. continue;
  628. }
  629. if (!(rq->cmd_flags & REQ_STARTED)) {
  630. /*
  631. * This is the first time the device driver
  632. * sees this request (possibly after
  633. * requeueing). Notify IO scheduler.
  634. */
  635. if (blk_sorted_rq(rq))
  636. elv_activate_rq(q, rq);
  637. /*
  638. * just mark as started even if we don't start
  639. * it, a request that has been delayed should
  640. * not be passed by new incoming requests
  641. */
  642. rq->cmd_flags |= REQ_STARTED;
  643. trace_block_rq_issue(q, rq);
  644. }
  645. if (!q->boundary_rq || q->boundary_rq == rq) {
  646. q->end_sector = rq_end_sector(rq);
  647. q->boundary_rq = NULL;
  648. }
  649. if (rq->cmd_flags & REQ_DONTPREP)
  650. break;
  651. if (q->dma_drain_size && rq->data_len) {
  652. /*
  653. * make sure space for the drain appears we
  654. * know we can do this because max_hw_segments
  655. * has been adjusted to be one fewer than the
  656. * device can handle
  657. */
  658. rq->nr_phys_segments++;
  659. }
  660. if (!q->prep_rq_fn)
  661. break;
  662. ret = q->prep_rq_fn(q, rq);
  663. if (ret == BLKPREP_OK) {
  664. break;
  665. } else if (ret == BLKPREP_DEFER) {
  666. /*
  667. * the request may have been (partially) prepped.
  668. * we need to keep this request in the front to
  669. * avoid resource deadlock. REQ_STARTED will
  670. * prevent other fs requests from passing this one.
  671. */
  672. if (q->dma_drain_size && rq->data_len &&
  673. !(rq->cmd_flags & REQ_DONTPREP)) {
  674. /*
  675. * remove the space for the drain we added
  676. * so that we don't add it again
  677. */
  678. --rq->nr_phys_segments;
  679. }
  680. rq = NULL;
  681. break;
  682. } else if (ret == BLKPREP_KILL) {
  683. rq->cmd_flags |= REQ_QUIET;
  684. __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
  685. } else {
  686. printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
  687. break;
  688. }
  689. }
  690. return rq;
  691. }
  692. EXPORT_SYMBOL(elv_next_request);
  693. void elv_dequeue_request(struct request_queue *q, struct request *rq)
  694. {
  695. BUG_ON(list_empty(&rq->queuelist));
  696. BUG_ON(ELV_ON_HASH(rq));
  697. list_del_init(&rq->queuelist);
  698. /*
  699. * the time frame between a request being removed from the lists
  700. * and to it is freed is accounted as io that is in progress at
  701. * the driver side.
  702. */
  703. if (blk_account_rq(rq))
  704. q->in_flight++;
  705. }
  706. int elv_queue_empty(struct request_queue *q)
  707. {
  708. elevator_t *e = q->elevator;
  709. if (!list_empty(&q->queue_head))
  710. return 0;
  711. if (e->ops->elevator_queue_empty_fn)
  712. return e->ops->elevator_queue_empty_fn(q);
  713. return 1;
  714. }
  715. EXPORT_SYMBOL(elv_queue_empty);
  716. struct request *elv_latter_request(struct request_queue *q, struct request *rq)
  717. {
  718. elevator_t *e = q->elevator;
  719. if (e->ops->elevator_latter_req_fn)
  720. return e->ops->elevator_latter_req_fn(q, rq);
  721. return NULL;
  722. }
  723. struct request *elv_former_request(struct request_queue *q, struct request *rq)
  724. {
  725. elevator_t *e = q->elevator;
  726. if (e->ops->elevator_former_req_fn)
  727. return e->ops->elevator_former_req_fn(q, rq);
  728. return NULL;
  729. }
  730. int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
  731. {
  732. elevator_t *e = q->elevator;
  733. if (e->ops->elevator_set_req_fn)
  734. return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
  735. rq->elevator_private = NULL;
  736. return 0;
  737. }
  738. void elv_put_request(struct request_queue *q, struct request *rq)
  739. {
  740. elevator_t *e = q->elevator;
  741. if (e->ops->elevator_put_req_fn)
  742. e->ops->elevator_put_req_fn(rq);
  743. }
  744. int elv_may_queue(struct request_queue *q, int rw)
  745. {
  746. elevator_t *e = q->elevator;
  747. if (e->ops->elevator_may_queue_fn)
  748. return e->ops->elevator_may_queue_fn(q, rw);
  749. return ELV_MQUEUE_MAY;
  750. }
  751. void elv_abort_queue(struct request_queue *q)
  752. {
  753. struct request *rq;
  754. while (!list_empty(&q->queue_head)) {
  755. rq = list_entry_rq(q->queue_head.next);
  756. rq->cmd_flags |= REQ_QUIET;
  757. trace_block_rq_abort(q, rq);
  758. __blk_end_request(rq, -EIO, blk_rq_bytes(rq));
  759. }
  760. }
  761. EXPORT_SYMBOL(elv_abort_queue);
  762. void elv_completed_request(struct request_queue *q, struct request *rq)
  763. {
  764. elevator_t *e = q->elevator;
  765. /*
  766. * request is released from the driver, io must be done
  767. */
  768. if (blk_account_rq(rq)) {
  769. q->in_flight--;
  770. if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
  771. e->ops->elevator_completed_req_fn(q, rq);
  772. }
  773. /*
  774. * Check if the queue is waiting for fs requests to be
  775. * drained for flush sequence.
  776. */
  777. if (unlikely(q->ordseq)) {
  778. struct request *first_rq = list_entry_rq(q->queue_head.next);
  779. if (q->in_flight == 0 &&
  780. blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
  781. blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
  782. blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
  783. blk_start_queueing(q);
  784. }
  785. }
  786. }
  787. #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
  788. static ssize_t
  789. elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  790. {
  791. elevator_t *e = container_of(kobj, elevator_t, kobj);
  792. struct elv_fs_entry *entry = to_elv(attr);
  793. ssize_t error;
  794. if (!entry->show)
  795. return -EIO;
  796. mutex_lock(&e->sysfs_lock);
  797. error = e->ops ? entry->show(e, page) : -ENOENT;
  798. mutex_unlock(&e->sysfs_lock);
  799. return error;
  800. }
  801. static ssize_t
  802. elv_attr_store(struct kobject *kobj, struct attribute *attr,
  803. const char *page, size_t length)
  804. {
  805. elevator_t *e = container_of(kobj, elevator_t, kobj);
  806. struct elv_fs_entry *entry = to_elv(attr);
  807. ssize_t error;
  808. if (!entry->store)
  809. return -EIO;
  810. mutex_lock(&e->sysfs_lock);
  811. error = e->ops ? entry->store(e, page, length) : -ENOENT;
  812. mutex_unlock(&e->sysfs_lock);
  813. return error;
  814. }
  815. static struct sysfs_ops elv_sysfs_ops = {
  816. .show = elv_attr_show,
  817. .store = elv_attr_store,
  818. };
  819. static struct kobj_type elv_ktype = {
  820. .sysfs_ops = &elv_sysfs_ops,
  821. .release = elevator_release,
  822. };
  823. int elv_register_queue(struct request_queue *q)
  824. {
  825. elevator_t *e = q->elevator;
  826. int error;
  827. error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
  828. if (!error) {
  829. struct elv_fs_entry *attr = e->elevator_type->elevator_attrs;
  830. if (attr) {
  831. while (attr->attr.name) {
  832. if (sysfs_create_file(&e->kobj, &attr->attr))
  833. break;
  834. attr++;
  835. }
  836. }
  837. kobject_uevent(&e->kobj, KOBJ_ADD);
  838. }
  839. return error;
  840. }
  841. static void __elv_unregister_queue(elevator_t *e)
  842. {
  843. kobject_uevent(&e->kobj, KOBJ_REMOVE);
  844. kobject_del(&e->kobj);
  845. }
  846. void elv_unregister_queue(struct request_queue *q)
  847. {
  848. if (q)
  849. __elv_unregister_queue(q->elevator);
  850. }
  851. void elv_register(struct elevator_type *e)
  852. {
  853. char *def = "";
  854. spin_lock(&elv_list_lock);
  855. BUG_ON(elevator_find(e->elevator_name));
  856. list_add_tail(&e->list, &elv_list);
  857. spin_unlock(&elv_list_lock);
  858. if (!strcmp(e->elevator_name, chosen_elevator) ||
  859. (!*chosen_elevator &&
  860. !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
  861. def = " (default)";
  862. printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
  863. def);
  864. }
  865. EXPORT_SYMBOL_GPL(elv_register);
  866. void elv_unregister(struct elevator_type *e)
  867. {
  868. struct task_struct *g, *p;
  869. /*
  870. * Iterate every thread in the process to remove the io contexts.
  871. */
  872. if (e->ops.trim) {
  873. read_lock(&tasklist_lock);
  874. do_each_thread(g, p) {
  875. task_lock(p);
  876. if (p->io_context)
  877. e->ops.trim(p->io_context);
  878. task_unlock(p);
  879. } while_each_thread(g, p);
  880. read_unlock(&tasklist_lock);
  881. }
  882. spin_lock(&elv_list_lock);
  883. list_del_init(&e->list);
  884. spin_unlock(&elv_list_lock);
  885. }
  886. EXPORT_SYMBOL_GPL(elv_unregister);
  887. /*
  888. * switch to new_e io scheduler. be careful not to introduce deadlocks -
  889. * we don't free the old io scheduler, before we have allocated what we
  890. * need for the new one. this way we have a chance of going back to the old
  891. * one, if the new one fails init for some reason.
  892. */
  893. static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
  894. {
  895. elevator_t *old_elevator, *e;
  896. void *data;
  897. /*
  898. * Allocate new elevator
  899. */
  900. e = elevator_alloc(q, new_e);
  901. if (!e)
  902. return 0;
  903. data = elevator_init_queue(q, e);
  904. if (!data) {
  905. kobject_put(&e->kobj);
  906. return 0;
  907. }
  908. /*
  909. * Turn on BYPASS and drain all requests w/ elevator private data
  910. */
  911. spin_lock_irq(q->queue_lock);
  912. queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
  913. elv_drain_elevator(q);
  914. while (q->rq.elvpriv) {
  915. blk_start_queueing(q);
  916. spin_unlock_irq(q->queue_lock);
  917. msleep(10);
  918. spin_lock_irq(q->queue_lock);
  919. elv_drain_elevator(q);
  920. }
  921. /*
  922. * Remember old elevator.
  923. */
  924. old_elevator = q->elevator;
  925. /*
  926. * attach and start new elevator
  927. */
  928. elevator_attach(q, e, data);
  929. spin_unlock_irq(q->queue_lock);
  930. __elv_unregister_queue(old_elevator);
  931. if (elv_register_queue(q))
  932. goto fail_register;
  933. /*
  934. * finally exit old elevator and turn off BYPASS.
  935. */
  936. elevator_exit(old_elevator);
  937. spin_lock_irq(q->queue_lock);
  938. queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
  939. spin_unlock_irq(q->queue_lock);
  940. blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
  941. return 1;
  942. fail_register:
  943. /*
  944. * switch failed, exit the new io scheduler and reattach the old
  945. * one again (along with re-adding the sysfs dir)
  946. */
  947. elevator_exit(e);
  948. q->elevator = old_elevator;
  949. elv_register_queue(q);
  950. spin_lock_irq(q->queue_lock);
  951. queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
  952. spin_unlock_irq(q->queue_lock);
  953. return 0;
  954. }
  955. ssize_t elv_iosched_store(struct request_queue *q, const char *name,
  956. size_t count)
  957. {
  958. char elevator_name[ELV_NAME_MAX];
  959. struct elevator_type *e;
  960. strlcpy(elevator_name, name, sizeof(elevator_name));
  961. strstrip(elevator_name);
  962. e = elevator_get(elevator_name);
  963. if (!e) {
  964. printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
  965. return -EINVAL;
  966. }
  967. if (!strcmp(elevator_name, q->elevator->elevator_type->elevator_name)) {
  968. elevator_put(e);
  969. return count;
  970. }
  971. if (!elevator_switch(q, e))
  972. printk(KERN_ERR "elevator: switch to %s failed\n",
  973. elevator_name);
  974. return count;
  975. }
  976. ssize_t elv_iosched_show(struct request_queue *q, char *name)
  977. {
  978. elevator_t *e = q->elevator;
  979. struct elevator_type *elv = e->elevator_type;
  980. struct elevator_type *__e;
  981. int len = 0;
  982. spin_lock(&elv_list_lock);
  983. list_for_each_entry(__e, &elv_list, list) {
  984. if (!strcmp(elv->elevator_name, __e->elevator_name))
  985. len += sprintf(name+len, "[%s] ", elv->elevator_name);
  986. else
  987. len += sprintf(name+len, "%s ", __e->elevator_name);
  988. }
  989. spin_unlock(&elv_list_lock);
  990. len += sprintf(len+name, "\n");
  991. return len;
  992. }
  993. struct request *elv_rb_former_request(struct request_queue *q,
  994. struct request *rq)
  995. {
  996. struct rb_node *rbprev = rb_prev(&rq->rb_node);
  997. if (rbprev)
  998. return rb_entry_rq(rbprev);
  999. return NULL;
  1000. }
  1001. EXPORT_SYMBOL(elv_rb_former_request);
  1002. struct request *elv_rb_latter_request(struct request_queue *q,
  1003. struct request *rq)
  1004. {
  1005. struct rb_node *rbnext = rb_next(&rq->rb_node);
  1006. if (rbnext)
  1007. return rb_entry_rq(rbnext);
  1008. return NULL;
  1009. }
  1010. EXPORT_SYMBOL(elv_rb_latter_request);