elevator.c 24 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048
  1. /*
  2. * Block device elevator/IO-scheduler.
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. *
  6. * 30042000 Jens Axboe <axboe@kernel.dk> :
  7. *
  8. * Split the elevator a bit so that it is possible to choose a different
  9. * one or even write a new "plug in". There are three pieces:
  10. * - elevator_fn, inserts a new request in the queue list
  11. * - elevator_merge_fn, decides whether a new buffer can be merged with
  12. * an existing request
  13. * - elevator_dequeue_fn, called when a request is taken off the active list
  14. *
  15. * 20082000 Dave Jones <davej@suse.de> :
  16. * Removed tests for max-bomb-segments, which was breaking elvtune
  17. * when run without -bN
  18. *
  19. * Jens:
  20. * - Rework again to work with bio instead of buffer_heads
  21. * - loose bi_dev comparisons, partition handling is right now
  22. * - completely modularize elevator setup and teardown
  23. *
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/fs.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/elevator.h>
  29. #include <linux/bio.h>
  30. #include <linux/module.h>
  31. #include <linux/slab.h>
  32. #include <linux/init.h>
  33. #include <linux/compiler.h>
  34. #include <linux/blktrace_api.h>
  35. #include <linux/hash.h>
  36. #include <linux/uaccess.h>
  37. #include <trace/events/block.h>
  38. #include "blk.h"
  39. #include "blk-cgroup.h"
  40. static DEFINE_SPINLOCK(elv_list_lock);
  41. static LIST_HEAD(elv_list);
  42. /*
  43. * Merge hash stuff.
  44. */
  45. static const int elv_hash_shift = 6;
  46. #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
  47. #define ELV_HASH_FN(sec) \
  48. (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
  49. #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
  50. #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
  51. /*
  52. * Query io scheduler to see if the current process issuing bio may be
  53. * merged with rq.
  54. */
  55. static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  56. {
  57. struct request_queue *q = rq->q;
  58. struct elevator_queue *e = q->elevator;
  59. if (e->type->ops.elevator_allow_merge_fn)
  60. return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
  61. return 1;
  62. }
  63. /*
  64. * can we safely merge with this request?
  65. */
  66. bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
  67. {
  68. if (!blk_rq_merge_ok(rq, bio))
  69. return 0;
  70. if (!elv_iosched_allow_merge(rq, bio))
  71. return 0;
  72. return 1;
  73. }
  74. EXPORT_SYMBOL(elv_rq_merge_ok);
  75. static struct elevator_type *elevator_find(const char *name)
  76. {
  77. struct elevator_type *e;
  78. list_for_each_entry(e, &elv_list, list) {
  79. if (!strcmp(e->elevator_name, name))
  80. return e;
  81. }
  82. return NULL;
  83. }
  84. static void elevator_put(struct elevator_type *e)
  85. {
  86. module_put(e->elevator_owner);
  87. }
  88. static struct elevator_type *elevator_get(const char *name)
  89. {
  90. struct elevator_type *e;
  91. spin_lock(&elv_list_lock);
  92. e = elevator_find(name);
  93. if (!e) {
  94. spin_unlock(&elv_list_lock);
  95. request_module("%s-iosched", name);
  96. spin_lock(&elv_list_lock);
  97. e = elevator_find(name);
  98. }
  99. if (e && !try_module_get(e->elevator_owner))
  100. e = NULL;
  101. spin_unlock(&elv_list_lock);
  102. return e;
  103. }
  104. static char chosen_elevator[ELV_NAME_MAX];
  105. static int __init elevator_setup(char *str)
  106. {
  107. /*
  108. * Be backwards-compatible with previous kernels, so users
  109. * won't get the wrong elevator.
  110. */
  111. strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
  112. return 1;
  113. }
  114. __setup("elevator=", elevator_setup);
  115. /* called during boot to load the elevator chosen by the elevator param */
  116. void __init load_default_elevator_module(void)
  117. {
  118. struct elevator_type *e;
  119. if (!chosen_elevator[0])
  120. return;
  121. spin_lock(&elv_list_lock);
  122. e = elevator_find(chosen_elevator);
  123. spin_unlock(&elv_list_lock);
  124. if (!e)
  125. request_module("%s-iosched", chosen_elevator);
  126. }
  127. static struct kobj_type elv_ktype;
  128. static struct elevator_queue *elevator_alloc(struct request_queue *q,
  129. struct elevator_type *e)
  130. {
  131. struct elevator_queue *eq;
  132. int i;
  133. eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
  134. if (unlikely(!eq))
  135. goto err;
  136. eq->type = e;
  137. kobject_init(&eq->kobj, &elv_ktype);
  138. mutex_init(&eq->sysfs_lock);
  139. eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
  140. GFP_KERNEL, q->node);
  141. if (!eq->hash)
  142. goto err;
  143. for (i = 0; i < ELV_HASH_ENTRIES; i++)
  144. INIT_HLIST_HEAD(&eq->hash[i]);
  145. return eq;
  146. err:
  147. kfree(eq);
  148. elevator_put(e);
  149. return NULL;
  150. }
  151. static void elevator_release(struct kobject *kobj)
  152. {
  153. struct elevator_queue *e;
  154. e = container_of(kobj, struct elevator_queue, kobj);
  155. elevator_put(e->type);
  156. kfree(e->hash);
  157. kfree(e);
  158. }
  159. int elevator_init(struct request_queue *q, char *name)
  160. {
  161. struct elevator_type *e = NULL;
  162. int err;
  163. if (unlikely(q->elevator))
  164. return 0;
  165. INIT_LIST_HEAD(&q->queue_head);
  166. q->last_merge = NULL;
  167. q->end_sector = 0;
  168. q->boundary_rq = NULL;
  169. if (name) {
  170. e = elevator_get(name);
  171. if (!e)
  172. return -EINVAL;
  173. }
  174. if (!e && *chosen_elevator) {
  175. e = elevator_get(chosen_elevator);
  176. if (!e)
  177. printk(KERN_ERR "I/O scheduler %s not found\n",
  178. chosen_elevator);
  179. }
  180. if (!e) {
  181. e = elevator_get(CONFIG_DEFAULT_IOSCHED);
  182. if (!e) {
  183. printk(KERN_ERR
  184. "Default I/O scheduler not found. " \
  185. "Using noop.\n");
  186. e = elevator_get("noop");
  187. }
  188. }
  189. q->elevator = elevator_alloc(q, e);
  190. if (!q->elevator)
  191. return -ENOMEM;
  192. err = e->ops.elevator_init_fn(q);
  193. if (err) {
  194. kobject_put(&q->elevator->kobj);
  195. return err;
  196. }
  197. return 0;
  198. }
  199. EXPORT_SYMBOL(elevator_init);
  200. void elevator_exit(struct elevator_queue *e)
  201. {
  202. mutex_lock(&e->sysfs_lock);
  203. if (e->type->ops.elevator_exit_fn)
  204. e->type->ops.elevator_exit_fn(e);
  205. mutex_unlock(&e->sysfs_lock);
  206. kobject_put(&e->kobj);
  207. }
  208. EXPORT_SYMBOL(elevator_exit);
  209. static inline void __elv_rqhash_del(struct request *rq)
  210. {
  211. hlist_del_init(&rq->hash);
  212. }
  213. static void elv_rqhash_del(struct request_queue *q, struct request *rq)
  214. {
  215. if (ELV_ON_HASH(rq))
  216. __elv_rqhash_del(rq);
  217. }
  218. static void elv_rqhash_add(struct request_queue *q, struct request *rq)
  219. {
  220. struct elevator_queue *e = q->elevator;
  221. BUG_ON(ELV_ON_HASH(rq));
  222. hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
  223. }
  224. static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
  225. {
  226. __elv_rqhash_del(rq);
  227. elv_rqhash_add(q, rq);
  228. }
  229. static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  230. {
  231. struct elevator_queue *e = q->elevator;
  232. struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
  233. struct hlist_node *entry, *next;
  234. struct request *rq;
  235. hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
  236. BUG_ON(!ELV_ON_HASH(rq));
  237. if (unlikely(!rq_mergeable(rq))) {
  238. __elv_rqhash_del(rq);
  239. continue;
  240. }
  241. if (rq_hash_key(rq) == offset)
  242. return rq;
  243. }
  244. return NULL;
  245. }
  246. /*
  247. * RB-tree support functions for inserting/lookup/removal of requests
  248. * in a sorted RB tree.
  249. */
  250. void elv_rb_add(struct rb_root *root, struct request *rq)
  251. {
  252. struct rb_node **p = &root->rb_node;
  253. struct rb_node *parent = NULL;
  254. struct request *__rq;
  255. while (*p) {
  256. parent = *p;
  257. __rq = rb_entry(parent, struct request, rb_node);
  258. if (blk_rq_pos(rq) < blk_rq_pos(__rq))
  259. p = &(*p)->rb_left;
  260. else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
  261. p = &(*p)->rb_right;
  262. }
  263. rb_link_node(&rq->rb_node, parent, p);
  264. rb_insert_color(&rq->rb_node, root);
  265. }
  266. EXPORT_SYMBOL(elv_rb_add);
  267. void elv_rb_del(struct rb_root *root, struct request *rq)
  268. {
  269. BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  270. rb_erase(&rq->rb_node, root);
  271. RB_CLEAR_NODE(&rq->rb_node);
  272. }
  273. EXPORT_SYMBOL(elv_rb_del);
  274. struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  275. {
  276. struct rb_node *n = root->rb_node;
  277. struct request *rq;
  278. while (n) {
  279. rq = rb_entry(n, struct request, rb_node);
  280. if (sector < blk_rq_pos(rq))
  281. n = n->rb_left;
  282. else if (sector > blk_rq_pos(rq))
  283. n = n->rb_right;
  284. else
  285. return rq;
  286. }
  287. return NULL;
  288. }
  289. EXPORT_SYMBOL(elv_rb_find);
  290. /*
  291. * Insert rq into dispatch queue of q. Queue lock must be held on
  292. * entry. rq is sort instead into the dispatch queue. To be used by
  293. * specific elevators.
  294. */
  295. void elv_dispatch_sort(struct request_queue *q, struct request *rq)
  296. {
  297. sector_t boundary;
  298. struct list_head *entry;
  299. int stop_flags;
  300. if (q->last_merge == rq)
  301. q->last_merge = NULL;
  302. elv_rqhash_del(q, rq);
  303. q->nr_sorted--;
  304. boundary = q->end_sector;
  305. stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
  306. list_for_each_prev(entry, &q->queue_head) {
  307. struct request *pos = list_entry_rq(entry);
  308. if ((rq->cmd_flags & REQ_DISCARD) !=
  309. (pos->cmd_flags & REQ_DISCARD))
  310. break;
  311. if (rq_data_dir(rq) != rq_data_dir(pos))
  312. break;
  313. if (pos->cmd_flags & stop_flags)
  314. break;
  315. if (blk_rq_pos(rq) >= boundary) {
  316. if (blk_rq_pos(pos) < boundary)
  317. continue;
  318. } else {
  319. if (blk_rq_pos(pos) >= boundary)
  320. break;
  321. }
  322. if (blk_rq_pos(rq) >= blk_rq_pos(pos))
  323. break;
  324. }
  325. list_add(&rq->queuelist, entry);
  326. }
  327. EXPORT_SYMBOL(elv_dispatch_sort);
  328. /*
  329. * Insert rq into dispatch queue of q. Queue lock must be held on
  330. * entry. rq is added to the back of the dispatch queue. To be used by
  331. * specific elevators.
  332. */
  333. void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
  334. {
  335. if (q->last_merge == rq)
  336. q->last_merge = NULL;
  337. elv_rqhash_del(q, rq);
  338. q->nr_sorted--;
  339. q->end_sector = rq_end_sector(rq);
  340. q->boundary_rq = rq;
  341. list_add_tail(&rq->queuelist, &q->queue_head);
  342. }
  343. EXPORT_SYMBOL(elv_dispatch_add_tail);
  344. int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
  345. {
  346. struct elevator_queue *e = q->elevator;
  347. struct request *__rq;
  348. int ret;
  349. /*
  350. * Levels of merges:
  351. * nomerges: No merges at all attempted
  352. * noxmerges: Only simple one-hit cache try
  353. * merges: All merge tries attempted
  354. */
  355. if (blk_queue_nomerges(q))
  356. return ELEVATOR_NO_MERGE;
  357. /*
  358. * First try one-hit cache.
  359. */
  360. if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
  361. ret = blk_try_merge(q->last_merge, bio);
  362. if (ret != ELEVATOR_NO_MERGE) {
  363. *req = q->last_merge;
  364. return ret;
  365. }
  366. }
  367. if (blk_queue_noxmerges(q))
  368. return ELEVATOR_NO_MERGE;
  369. /*
  370. * See if our hash lookup can find a potential backmerge.
  371. */
  372. __rq = elv_rqhash_find(q, bio->bi_sector);
  373. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  374. *req = __rq;
  375. return ELEVATOR_BACK_MERGE;
  376. }
  377. if (e->type->ops.elevator_merge_fn)
  378. return e->type->ops.elevator_merge_fn(q, req, bio);
  379. return ELEVATOR_NO_MERGE;
  380. }
  381. /*
  382. * Attempt to do an insertion back merge. Only check for the case where
  383. * we can append 'rq' to an existing request, so we can throw 'rq' away
  384. * afterwards.
  385. *
  386. * Returns true if we merged, false otherwise
  387. */
  388. static bool elv_attempt_insert_merge(struct request_queue *q,
  389. struct request *rq)
  390. {
  391. struct request *__rq;
  392. bool ret;
  393. if (blk_queue_nomerges(q))
  394. return false;
  395. /*
  396. * First try one-hit cache.
  397. */
  398. if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
  399. return true;
  400. if (blk_queue_noxmerges(q))
  401. return false;
  402. ret = false;
  403. /*
  404. * See if our hash lookup can find a potential backmerge.
  405. */
  406. while (1) {
  407. __rq = elv_rqhash_find(q, blk_rq_pos(rq));
  408. if (!__rq || !blk_attempt_req_merge(q, __rq, rq))
  409. break;
  410. /* The merged request could be merged with others, try again */
  411. ret = true;
  412. rq = __rq;
  413. }
  414. return ret;
  415. }
  416. void elv_merged_request(struct request_queue *q, struct request *rq, int type)
  417. {
  418. struct elevator_queue *e = q->elevator;
  419. if (e->type->ops.elevator_merged_fn)
  420. e->type->ops.elevator_merged_fn(q, rq, type);
  421. if (type == ELEVATOR_BACK_MERGE)
  422. elv_rqhash_reposition(q, rq);
  423. q->last_merge = rq;
  424. }
  425. void elv_merge_requests(struct request_queue *q, struct request *rq,
  426. struct request *next)
  427. {
  428. struct elevator_queue *e = q->elevator;
  429. const int next_sorted = next->cmd_flags & REQ_SORTED;
  430. if (next_sorted && e->type->ops.elevator_merge_req_fn)
  431. e->type->ops.elevator_merge_req_fn(q, rq, next);
  432. elv_rqhash_reposition(q, rq);
  433. if (next_sorted) {
  434. elv_rqhash_del(q, next);
  435. q->nr_sorted--;
  436. }
  437. q->last_merge = rq;
  438. }
  439. void elv_bio_merged(struct request_queue *q, struct request *rq,
  440. struct bio *bio)
  441. {
  442. struct elevator_queue *e = q->elevator;
  443. if (e->type->ops.elevator_bio_merged_fn)
  444. e->type->ops.elevator_bio_merged_fn(q, rq, bio);
  445. }
  446. void elv_requeue_request(struct request_queue *q, struct request *rq)
  447. {
  448. /*
  449. * it already went through dequeue, we need to decrement the
  450. * in_flight count again
  451. */
  452. if (blk_account_rq(rq)) {
  453. q->in_flight[rq_is_sync(rq)]--;
  454. if (rq->cmd_flags & REQ_SORTED)
  455. elv_deactivate_rq(q, rq);
  456. }
  457. rq->cmd_flags &= ~REQ_STARTED;
  458. __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
  459. }
  460. void elv_drain_elevator(struct request_queue *q)
  461. {
  462. static int printed;
  463. lockdep_assert_held(q->queue_lock);
  464. while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
  465. ;
  466. if (q->nr_sorted && printed++ < 10) {
  467. printk(KERN_ERR "%s: forced dispatching is broken "
  468. "(nr_sorted=%u), please report this\n",
  469. q->elevator->type->elevator_name, q->nr_sorted);
  470. }
  471. }
  472. void __elv_add_request(struct request_queue *q, struct request *rq, int where)
  473. {
  474. trace_block_rq_insert(q, rq);
  475. rq->q = q;
  476. if (rq->cmd_flags & REQ_SOFTBARRIER) {
  477. /* barriers are scheduling boundary, update end_sector */
  478. if (rq->cmd_type == REQ_TYPE_FS) {
  479. q->end_sector = rq_end_sector(rq);
  480. q->boundary_rq = rq;
  481. }
  482. } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
  483. (where == ELEVATOR_INSERT_SORT ||
  484. where == ELEVATOR_INSERT_SORT_MERGE))
  485. where = ELEVATOR_INSERT_BACK;
  486. switch (where) {
  487. case ELEVATOR_INSERT_REQUEUE:
  488. case ELEVATOR_INSERT_FRONT:
  489. rq->cmd_flags |= REQ_SOFTBARRIER;
  490. list_add(&rq->queuelist, &q->queue_head);
  491. break;
  492. case ELEVATOR_INSERT_BACK:
  493. rq->cmd_flags |= REQ_SOFTBARRIER;
  494. elv_drain_elevator(q);
  495. list_add_tail(&rq->queuelist, &q->queue_head);
  496. /*
  497. * We kick the queue here for the following reasons.
  498. * - The elevator might have returned NULL previously
  499. * to delay requests and returned them now. As the
  500. * queue wasn't empty before this request, ll_rw_blk
  501. * won't run the queue on return, resulting in hang.
  502. * - Usually, back inserted requests won't be merged
  503. * with anything. There's no point in delaying queue
  504. * processing.
  505. */
  506. __blk_run_queue(q);
  507. break;
  508. case ELEVATOR_INSERT_SORT_MERGE:
  509. /*
  510. * If we succeed in merging this request with one in the
  511. * queue already, we are done - rq has now been freed,
  512. * so no need to do anything further.
  513. */
  514. if (elv_attempt_insert_merge(q, rq))
  515. break;
  516. case ELEVATOR_INSERT_SORT:
  517. BUG_ON(rq->cmd_type != REQ_TYPE_FS);
  518. rq->cmd_flags |= REQ_SORTED;
  519. q->nr_sorted++;
  520. if (rq_mergeable(rq)) {
  521. elv_rqhash_add(q, rq);
  522. if (!q->last_merge)
  523. q->last_merge = rq;
  524. }
  525. /*
  526. * Some ioscheds (cfq) run q->request_fn directly, so
  527. * rq cannot be accessed after calling
  528. * elevator_add_req_fn.
  529. */
  530. q->elevator->type->ops.elevator_add_req_fn(q, rq);
  531. break;
  532. case ELEVATOR_INSERT_FLUSH:
  533. rq->cmd_flags |= REQ_SOFTBARRIER;
  534. blk_insert_flush(rq);
  535. break;
  536. default:
  537. printk(KERN_ERR "%s: bad insertion point %d\n",
  538. __func__, where);
  539. BUG();
  540. }
  541. }
  542. EXPORT_SYMBOL(__elv_add_request);
  543. void elv_add_request(struct request_queue *q, struct request *rq, int where)
  544. {
  545. unsigned long flags;
  546. spin_lock_irqsave(q->queue_lock, flags);
  547. __elv_add_request(q, rq, where);
  548. spin_unlock_irqrestore(q->queue_lock, flags);
  549. }
  550. EXPORT_SYMBOL(elv_add_request);
  551. struct request *elv_latter_request(struct request_queue *q, struct request *rq)
  552. {
  553. struct elevator_queue *e = q->elevator;
  554. if (e->type->ops.elevator_latter_req_fn)
  555. return e->type->ops.elevator_latter_req_fn(q, rq);
  556. return NULL;
  557. }
  558. struct request *elv_former_request(struct request_queue *q, struct request *rq)
  559. {
  560. struct elevator_queue *e = q->elevator;
  561. if (e->type->ops.elevator_former_req_fn)
  562. return e->type->ops.elevator_former_req_fn(q, rq);
  563. return NULL;
  564. }
  565. int elv_set_request(struct request_queue *q, struct request *rq,
  566. struct bio *bio, gfp_t gfp_mask)
  567. {
  568. struct elevator_queue *e = q->elevator;
  569. if (e->type->ops.elevator_set_req_fn)
  570. return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
  571. return 0;
  572. }
  573. void elv_put_request(struct request_queue *q, struct request *rq)
  574. {
  575. struct elevator_queue *e = q->elevator;
  576. if (e->type->ops.elevator_put_req_fn)
  577. e->type->ops.elevator_put_req_fn(rq);
  578. }
  579. int elv_may_queue(struct request_queue *q, int rw)
  580. {
  581. struct elevator_queue *e = q->elevator;
  582. if (e->type->ops.elevator_may_queue_fn)
  583. return e->type->ops.elevator_may_queue_fn(q, rw);
  584. return ELV_MQUEUE_MAY;
  585. }
  586. void elv_abort_queue(struct request_queue *q)
  587. {
  588. struct request *rq;
  589. blk_abort_flushes(q);
  590. while (!list_empty(&q->queue_head)) {
  591. rq = list_entry_rq(q->queue_head.next);
  592. rq->cmd_flags |= REQ_QUIET;
  593. trace_block_rq_abort(q, rq);
  594. /*
  595. * Mark this request as started so we don't trigger
  596. * any debug logic in the end I/O path.
  597. */
  598. blk_start_request(rq);
  599. __blk_end_request_all(rq, -EIO);
  600. }
  601. }
  602. EXPORT_SYMBOL(elv_abort_queue);
  603. void elv_completed_request(struct request_queue *q, struct request *rq)
  604. {
  605. struct elevator_queue *e = q->elevator;
  606. /*
  607. * request is released from the driver, io must be done
  608. */
  609. if (blk_account_rq(rq)) {
  610. q->in_flight[rq_is_sync(rq)]--;
  611. if ((rq->cmd_flags & REQ_SORTED) &&
  612. e->type->ops.elevator_completed_req_fn)
  613. e->type->ops.elevator_completed_req_fn(q, rq);
  614. }
  615. }
  616. #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
  617. static ssize_t
  618. elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  619. {
  620. struct elv_fs_entry *entry = to_elv(attr);
  621. struct elevator_queue *e;
  622. ssize_t error;
  623. if (!entry->show)
  624. return -EIO;
  625. e = container_of(kobj, struct elevator_queue, kobj);
  626. mutex_lock(&e->sysfs_lock);
  627. error = e->type ? entry->show(e, page) : -ENOENT;
  628. mutex_unlock(&e->sysfs_lock);
  629. return error;
  630. }
  631. static ssize_t
  632. elv_attr_store(struct kobject *kobj, struct attribute *attr,
  633. const char *page, size_t length)
  634. {
  635. struct elv_fs_entry *entry = to_elv(attr);
  636. struct elevator_queue *e;
  637. ssize_t error;
  638. if (!entry->store)
  639. return -EIO;
  640. e = container_of(kobj, struct elevator_queue, kobj);
  641. mutex_lock(&e->sysfs_lock);
  642. error = e->type ? entry->store(e, page, length) : -ENOENT;
  643. mutex_unlock(&e->sysfs_lock);
  644. return error;
  645. }
  646. static const struct sysfs_ops elv_sysfs_ops = {
  647. .show = elv_attr_show,
  648. .store = elv_attr_store,
  649. };
  650. static struct kobj_type elv_ktype = {
  651. .sysfs_ops = &elv_sysfs_ops,
  652. .release = elevator_release,
  653. };
  654. int elv_register_queue(struct request_queue *q)
  655. {
  656. struct elevator_queue *e = q->elevator;
  657. int error;
  658. error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
  659. if (!error) {
  660. struct elv_fs_entry *attr = e->type->elevator_attrs;
  661. if (attr) {
  662. while (attr->attr.name) {
  663. if (sysfs_create_file(&e->kobj, &attr->attr))
  664. break;
  665. attr++;
  666. }
  667. }
  668. kobject_uevent(&e->kobj, KOBJ_ADD);
  669. e->registered = 1;
  670. }
  671. return error;
  672. }
  673. EXPORT_SYMBOL(elv_register_queue);
  674. void elv_unregister_queue(struct request_queue *q)
  675. {
  676. if (q) {
  677. struct elevator_queue *e = q->elevator;
  678. kobject_uevent(&e->kobj, KOBJ_REMOVE);
  679. kobject_del(&e->kobj);
  680. e->registered = 0;
  681. }
  682. }
  683. EXPORT_SYMBOL(elv_unregister_queue);
  684. int elv_register(struct elevator_type *e)
  685. {
  686. char *def = "";
  687. /* create icq_cache if requested */
  688. if (e->icq_size) {
  689. if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
  690. WARN_ON(e->icq_align < __alignof__(struct io_cq)))
  691. return -EINVAL;
  692. snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
  693. "%s_io_cq", e->elevator_name);
  694. e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
  695. e->icq_align, 0, NULL);
  696. if (!e->icq_cache)
  697. return -ENOMEM;
  698. }
  699. /* register, don't allow duplicate names */
  700. spin_lock(&elv_list_lock);
  701. if (elevator_find(e->elevator_name)) {
  702. spin_unlock(&elv_list_lock);
  703. if (e->icq_cache)
  704. kmem_cache_destroy(e->icq_cache);
  705. return -EBUSY;
  706. }
  707. list_add_tail(&e->list, &elv_list);
  708. spin_unlock(&elv_list_lock);
  709. /* print pretty message */
  710. if (!strcmp(e->elevator_name, chosen_elevator) ||
  711. (!*chosen_elevator &&
  712. !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
  713. def = " (default)";
  714. printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
  715. def);
  716. return 0;
  717. }
  718. EXPORT_SYMBOL_GPL(elv_register);
  719. void elv_unregister(struct elevator_type *e)
  720. {
  721. /* unregister */
  722. spin_lock(&elv_list_lock);
  723. list_del_init(&e->list);
  724. spin_unlock(&elv_list_lock);
  725. /*
  726. * Destroy icq_cache if it exists. icq's are RCU managed. Make
  727. * sure all RCU operations are complete before proceeding.
  728. */
  729. if (e->icq_cache) {
  730. rcu_barrier();
  731. kmem_cache_destroy(e->icq_cache);
  732. e->icq_cache = NULL;
  733. }
  734. }
  735. EXPORT_SYMBOL_GPL(elv_unregister);
  736. /*
  737. * switch to new_e io scheduler. be careful not to introduce deadlocks -
  738. * we don't free the old io scheduler, before we have allocated what we
  739. * need for the new one. this way we have a chance of going back to the old
  740. * one, if the new one fails init for some reason.
  741. */
  742. static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
  743. {
  744. struct elevator_queue *old = q->elevator;
  745. bool registered = old->registered;
  746. int err;
  747. /*
  748. * Turn on BYPASS and drain all requests w/ elevator private data.
  749. * Block layer doesn't call into a quiesced elevator - all requests
  750. * are directly put on the dispatch list without elevator data
  751. * using INSERT_BACK. All requests have SOFTBARRIER set and no
  752. * merge happens either.
  753. */
  754. blk_queue_bypass_start(q);
  755. /* unregister and clear all auxiliary data of the old elevator */
  756. if (registered)
  757. elv_unregister_queue(q);
  758. spin_lock_irq(q->queue_lock);
  759. ioc_clear_queue(q);
  760. spin_unlock_irq(q->queue_lock);
  761. /* allocate, init and register new elevator */
  762. err = -ENOMEM;
  763. q->elevator = elevator_alloc(q, new_e);
  764. if (!q->elevator)
  765. goto fail_init;
  766. err = new_e->ops.elevator_init_fn(q);
  767. if (err) {
  768. kobject_put(&q->elevator->kobj);
  769. goto fail_init;
  770. }
  771. if (registered) {
  772. err = elv_register_queue(q);
  773. if (err)
  774. goto fail_register;
  775. }
  776. /* done, kill the old one and finish */
  777. elevator_exit(old);
  778. blk_queue_bypass_end(q);
  779. blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
  780. return 0;
  781. fail_register:
  782. elevator_exit(q->elevator);
  783. fail_init:
  784. /* switch failed, restore and re-register old elevator */
  785. q->elevator = old;
  786. elv_register_queue(q);
  787. blk_queue_bypass_end(q);
  788. return err;
  789. }
  790. /*
  791. * Switch this queue to the given IO scheduler.
  792. */
  793. int elevator_change(struct request_queue *q, const char *name)
  794. {
  795. char elevator_name[ELV_NAME_MAX];
  796. struct elevator_type *e;
  797. if (!q->elevator)
  798. return -ENXIO;
  799. strlcpy(elevator_name, name, sizeof(elevator_name));
  800. e = elevator_get(strstrip(elevator_name));
  801. if (!e) {
  802. printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
  803. return -EINVAL;
  804. }
  805. if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
  806. elevator_put(e);
  807. return 0;
  808. }
  809. return elevator_switch(q, e);
  810. }
  811. EXPORT_SYMBOL(elevator_change);
  812. ssize_t elv_iosched_store(struct request_queue *q, const char *name,
  813. size_t count)
  814. {
  815. int ret;
  816. if (!q->elevator)
  817. return count;
  818. ret = elevator_change(q, name);
  819. if (!ret)
  820. return count;
  821. printk(KERN_ERR "elevator: switch to %s failed\n", name);
  822. return ret;
  823. }
  824. ssize_t elv_iosched_show(struct request_queue *q, char *name)
  825. {
  826. struct elevator_queue *e = q->elevator;
  827. struct elevator_type *elv;
  828. struct elevator_type *__e;
  829. int len = 0;
  830. if (!q->elevator || !blk_queue_stackable(q))
  831. return sprintf(name, "none\n");
  832. elv = e->type;
  833. spin_lock(&elv_list_lock);
  834. list_for_each_entry(__e, &elv_list, list) {
  835. if (!strcmp(elv->elevator_name, __e->elevator_name))
  836. len += sprintf(name+len, "[%s] ", elv->elevator_name);
  837. else
  838. len += sprintf(name+len, "%s ", __e->elevator_name);
  839. }
  840. spin_unlock(&elv_list_lock);
  841. len += sprintf(len+name, "\n");
  842. return len;
  843. }
  844. struct request *elv_rb_former_request(struct request_queue *q,
  845. struct request *rq)
  846. {
  847. struct rb_node *rbprev = rb_prev(&rq->rb_node);
  848. if (rbprev)
  849. return rb_entry_rq(rbprev);
  850. return NULL;
  851. }
  852. EXPORT_SYMBOL(elv_rb_former_request);
  853. struct request *elv_rb_latter_request(struct request_queue *q,
  854. struct request *rq)
  855. {
  856. struct rb_node *rbnext = rb_next(&rq->rb_node);
  857. if (rbnext)
  858. return rb_entry_rq(rbnext);
  859. return NULL;
  860. }
  861. EXPORT_SYMBOL(elv_rb_latter_request);