elevator.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024
  1. /*
  2. * Block device elevator/IO-scheduler.
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. *
  6. * 30042000 Jens Axboe <axboe@kernel.dk> :
  7. *
  8. * Split the elevator a bit so that it is possible to choose a different
  9. * one or even write a new "plug in". There are three pieces:
  10. * - elevator_fn, inserts a new request in the queue list
  11. * - elevator_merge_fn, decides whether a new buffer can be merged with
  12. * an existing request
  13. * - elevator_dequeue_fn, called when a request is taken off the active list
  14. *
  15. * 20082000 Dave Jones <davej@suse.de> :
  16. * Removed tests for max-bomb-segments, which was breaking elvtune
  17. * when run without -bN
  18. *
  19. * Jens:
  20. * - Rework again to work with bio instead of buffer_heads
  21. * - loose bi_dev comparisons, partition handling is right now
  22. * - completely modularize elevator setup and teardown
  23. *
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/fs.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/elevator.h>
  29. #include <linux/bio.h>
  30. #include <linux/module.h>
  31. #include <linux/slab.h>
  32. #include <linux/init.h>
  33. #include <linux/compiler.h>
  34. #include <linux/blktrace_api.h>
  35. #include <linux/hash.h>
  36. #include <linux/uaccess.h>
  37. #include <trace/events/block.h>
  38. #include "blk.h"
  39. #include "blk-cgroup.h"
  40. static DEFINE_SPINLOCK(elv_list_lock);
  41. static LIST_HEAD(elv_list);
  42. /*
  43. * Merge hash stuff.
  44. */
  45. static const int elv_hash_shift = 6;
  46. #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
  47. #define ELV_HASH_FN(sec) \
  48. (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
  49. #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
  50. #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
  51. /*
  52. * Query io scheduler to see if the current process issuing bio may be
  53. * merged with rq.
  54. */
  55. static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  56. {
  57. struct request_queue *q = rq->q;
  58. struct elevator_queue *e = q->elevator;
  59. if (e->type->ops.elevator_allow_merge_fn)
  60. return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
  61. return 1;
  62. }
  63. /*
  64. * can we safely merge with this request?
  65. */
  66. bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
  67. {
  68. if (!blk_rq_merge_ok(rq, bio))
  69. return 0;
  70. if (!elv_iosched_allow_merge(rq, bio))
  71. return 0;
  72. return 1;
  73. }
  74. EXPORT_SYMBOL(elv_rq_merge_ok);
  75. static struct elevator_type *elevator_find(const char *name)
  76. {
  77. struct elevator_type *e;
  78. list_for_each_entry(e, &elv_list, list) {
  79. if (!strcmp(e->elevator_name, name))
  80. return e;
  81. }
  82. return NULL;
  83. }
  84. static void elevator_put(struct elevator_type *e)
  85. {
  86. module_put(e->elevator_owner);
  87. }
  88. static struct elevator_type *elevator_get(const char *name)
  89. {
  90. struct elevator_type *e;
  91. spin_lock(&elv_list_lock);
  92. e = elevator_find(name);
  93. if (!e) {
  94. spin_unlock(&elv_list_lock);
  95. request_module("%s-iosched", name);
  96. spin_lock(&elv_list_lock);
  97. e = elevator_find(name);
  98. }
  99. if (e && !try_module_get(e->elevator_owner))
  100. e = NULL;
  101. spin_unlock(&elv_list_lock);
  102. return e;
  103. }
  104. static char chosen_elevator[ELV_NAME_MAX];
  105. static int __init elevator_setup(char *str)
  106. {
  107. /*
  108. * Be backwards-compatible with previous kernels, so users
  109. * won't get the wrong elevator.
  110. */
  111. strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
  112. return 1;
  113. }
  114. __setup("elevator=", elevator_setup);
  115. static struct kobj_type elv_ktype;
  116. static struct elevator_queue *elevator_alloc(struct request_queue *q,
  117. struct elevator_type *e)
  118. {
  119. struct elevator_queue *eq;
  120. int i;
  121. eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
  122. if (unlikely(!eq))
  123. goto err;
  124. eq->type = e;
  125. kobject_init(&eq->kobj, &elv_ktype);
  126. mutex_init(&eq->sysfs_lock);
  127. eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
  128. GFP_KERNEL, q->node);
  129. if (!eq->hash)
  130. goto err;
  131. for (i = 0; i < ELV_HASH_ENTRIES; i++)
  132. INIT_HLIST_HEAD(&eq->hash[i]);
  133. return eq;
  134. err:
  135. kfree(eq);
  136. elevator_put(e);
  137. return NULL;
  138. }
  139. static void elevator_release(struct kobject *kobj)
  140. {
  141. struct elevator_queue *e;
  142. e = container_of(kobj, struct elevator_queue, kobj);
  143. elevator_put(e->type);
  144. kfree(e->hash);
  145. kfree(e);
  146. }
  147. int elevator_init(struct request_queue *q, char *name)
  148. {
  149. struct elevator_type *e = NULL;
  150. int err;
  151. if (unlikely(q->elevator))
  152. return 0;
  153. INIT_LIST_HEAD(&q->queue_head);
  154. q->last_merge = NULL;
  155. q->end_sector = 0;
  156. q->boundary_rq = NULL;
  157. if (name) {
  158. e = elevator_get(name);
  159. if (!e)
  160. return -EINVAL;
  161. }
  162. if (!e && *chosen_elevator) {
  163. e = elevator_get(chosen_elevator);
  164. if (!e)
  165. printk(KERN_ERR "I/O scheduler %s not found\n",
  166. chosen_elevator);
  167. }
  168. if (!e) {
  169. e = elevator_get(CONFIG_DEFAULT_IOSCHED);
  170. if (!e) {
  171. printk(KERN_ERR
  172. "Default I/O scheduler not found. " \
  173. "Using noop.\n");
  174. e = elevator_get("noop");
  175. }
  176. }
  177. q->elevator = elevator_alloc(q, e);
  178. if (!q->elevator)
  179. return -ENOMEM;
  180. err = e->ops.elevator_init_fn(q);
  181. if (err) {
  182. kobject_put(&q->elevator->kobj);
  183. return err;
  184. }
  185. return 0;
  186. }
  187. EXPORT_SYMBOL(elevator_init);
  188. void elevator_exit(struct elevator_queue *e)
  189. {
  190. mutex_lock(&e->sysfs_lock);
  191. if (e->type->ops.elevator_exit_fn)
  192. e->type->ops.elevator_exit_fn(e);
  193. mutex_unlock(&e->sysfs_lock);
  194. kobject_put(&e->kobj);
  195. }
  196. EXPORT_SYMBOL(elevator_exit);
  197. static inline void __elv_rqhash_del(struct request *rq)
  198. {
  199. hlist_del_init(&rq->hash);
  200. }
  201. static void elv_rqhash_del(struct request_queue *q, struct request *rq)
  202. {
  203. if (ELV_ON_HASH(rq))
  204. __elv_rqhash_del(rq);
  205. }
  206. static void elv_rqhash_add(struct request_queue *q, struct request *rq)
  207. {
  208. struct elevator_queue *e = q->elevator;
  209. BUG_ON(ELV_ON_HASH(rq));
  210. hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
  211. }
  212. static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
  213. {
  214. __elv_rqhash_del(rq);
  215. elv_rqhash_add(q, rq);
  216. }
  217. static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  218. {
  219. struct elevator_queue *e = q->elevator;
  220. struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
  221. struct hlist_node *entry, *next;
  222. struct request *rq;
  223. hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
  224. BUG_ON(!ELV_ON_HASH(rq));
  225. if (unlikely(!rq_mergeable(rq))) {
  226. __elv_rqhash_del(rq);
  227. continue;
  228. }
  229. if (rq_hash_key(rq) == offset)
  230. return rq;
  231. }
  232. return NULL;
  233. }
  234. /*
  235. * RB-tree support functions for inserting/lookup/removal of requests
  236. * in a sorted RB tree.
  237. */
  238. void elv_rb_add(struct rb_root *root, struct request *rq)
  239. {
  240. struct rb_node **p = &root->rb_node;
  241. struct rb_node *parent = NULL;
  242. struct request *__rq;
  243. while (*p) {
  244. parent = *p;
  245. __rq = rb_entry(parent, struct request, rb_node);
  246. if (blk_rq_pos(rq) < blk_rq_pos(__rq))
  247. p = &(*p)->rb_left;
  248. else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
  249. p = &(*p)->rb_right;
  250. }
  251. rb_link_node(&rq->rb_node, parent, p);
  252. rb_insert_color(&rq->rb_node, root);
  253. }
  254. EXPORT_SYMBOL(elv_rb_add);
  255. void elv_rb_del(struct rb_root *root, struct request *rq)
  256. {
  257. BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  258. rb_erase(&rq->rb_node, root);
  259. RB_CLEAR_NODE(&rq->rb_node);
  260. }
  261. EXPORT_SYMBOL(elv_rb_del);
  262. struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  263. {
  264. struct rb_node *n = root->rb_node;
  265. struct request *rq;
  266. while (n) {
  267. rq = rb_entry(n, struct request, rb_node);
  268. if (sector < blk_rq_pos(rq))
  269. n = n->rb_left;
  270. else if (sector > blk_rq_pos(rq))
  271. n = n->rb_right;
  272. else
  273. return rq;
  274. }
  275. return NULL;
  276. }
  277. EXPORT_SYMBOL(elv_rb_find);
  278. /*
  279. * Insert rq into dispatch queue of q. Queue lock must be held on
  280. * entry. rq is sort instead into the dispatch queue. To be used by
  281. * specific elevators.
  282. */
  283. void elv_dispatch_sort(struct request_queue *q, struct request *rq)
  284. {
  285. sector_t boundary;
  286. struct list_head *entry;
  287. int stop_flags;
  288. if (q->last_merge == rq)
  289. q->last_merge = NULL;
  290. elv_rqhash_del(q, rq);
  291. q->nr_sorted--;
  292. boundary = q->end_sector;
  293. stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
  294. list_for_each_prev(entry, &q->queue_head) {
  295. struct request *pos = list_entry_rq(entry);
  296. if ((rq->cmd_flags & REQ_DISCARD) !=
  297. (pos->cmd_flags & REQ_DISCARD))
  298. break;
  299. if (rq_data_dir(rq) != rq_data_dir(pos))
  300. break;
  301. if (pos->cmd_flags & stop_flags)
  302. break;
  303. if (blk_rq_pos(rq) >= boundary) {
  304. if (blk_rq_pos(pos) < boundary)
  305. continue;
  306. } else {
  307. if (blk_rq_pos(pos) >= boundary)
  308. break;
  309. }
  310. if (blk_rq_pos(rq) >= blk_rq_pos(pos))
  311. break;
  312. }
  313. list_add(&rq->queuelist, entry);
  314. }
  315. EXPORT_SYMBOL(elv_dispatch_sort);
  316. /*
  317. * Insert rq into dispatch queue of q. Queue lock must be held on
  318. * entry. rq is added to the back of the dispatch queue. To be used by
  319. * specific elevators.
  320. */
  321. void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
  322. {
  323. if (q->last_merge == rq)
  324. q->last_merge = NULL;
  325. elv_rqhash_del(q, rq);
  326. q->nr_sorted--;
  327. q->end_sector = rq_end_sector(rq);
  328. q->boundary_rq = rq;
  329. list_add_tail(&rq->queuelist, &q->queue_head);
  330. }
  331. EXPORT_SYMBOL(elv_dispatch_add_tail);
  332. int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
  333. {
  334. struct elevator_queue *e = q->elevator;
  335. struct request *__rq;
  336. int ret;
  337. /*
  338. * Levels of merges:
  339. * nomerges: No merges at all attempted
  340. * noxmerges: Only simple one-hit cache try
  341. * merges: All merge tries attempted
  342. */
  343. if (blk_queue_nomerges(q))
  344. return ELEVATOR_NO_MERGE;
  345. /*
  346. * First try one-hit cache.
  347. */
  348. if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
  349. ret = blk_try_merge(q->last_merge, bio);
  350. if (ret != ELEVATOR_NO_MERGE) {
  351. *req = q->last_merge;
  352. return ret;
  353. }
  354. }
  355. if (blk_queue_noxmerges(q))
  356. return ELEVATOR_NO_MERGE;
  357. /*
  358. * See if our hash lookup can find a potential backmerge.
  359. */
  360. __rq = elv_rqhash_find(q, bio->bi_sector);
  361. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  362. *req = __rq;
  363. return ELEVATOR_BACK_MERGE;
  364. }
  365. if (e->type->ops.elevator_merge_fn)
  366. return e->type->ops.elevator_merge_fn(q, req, bio);
  367. return ELEVATOR_NO_MERGE;
  368. }
  369. /*
  370. * Attempt to do an insertion back merge. Only check for the case where
  371. * we can append 'rq' to an existing request, so we can throw 'rq' away
  372. * afterwards.
  373. *
  374. * Returns true if we merged, false otherwise
  375. */
  376. static bool elv_attempt_insert_merge(struct request_queue *q,
  377. struct request *rq)
  378. {
  379. struct request *__rq;
  380. if (blk_queue_nomerges(q))
  381. return false;
  382. /*
  383. * First try one-hit cache.
  384. */
  385. if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
  386. return true;
  387. if (blk_queue_noxmerges(q))
  388. return false;
  389. /*
  390. * See if our hash lookup can find a potential backmerge.
  391. */
  392. __rq = elv_rqhash_find(q, blk_rq_pos(rq));
  393. if (__rq && blk_attempt_req_merge(q, __rq, rq))
  394. return true;
  395. return false;
  396. }
  397. void elv_merged_request(struct request_queue *q, struct request *rq, int type)
  398. {
  399. struct elevator_queue *e = q->elevator;
  400. if (e->type->ops.elevator_merged_fn)
  401. e->type->ops.elevator_merged_fn(q, rq, type);
  402. if (type == ELEVATOR_BACK_MERGE)
  403. elv_rqhash_reposition(q, rq);
  404. q->last_merge = rq;
  405. }
  406. void elv_merge_requests(struct request_queue *q, struct request *rq,
  407. struct request *next)
  408. {
  409. struct elevator_queue *e = q->elevator;
  410. const int next_sorted = next->cmd_flags & REQ_SORTED;
  411. if (next_sorted && e->type->ops.elevator_merge_req_fn)
  412. e->type->ops.elevator_merge_req_fn(q, rq, next);
  413. elv_rqhash_reposition(q, rq);
  414. if (next_sorted) {
  415. elv_rqhash_del(q, next);
  416. q->nr_sorted--;
  417. }
  418. q->last_merge = rq;
  419. }
  420. void elv_bio_merged(struct request_queue *q, struct request *rq,
  421. struct bio *bio)
  422. {
  423. struct elevator_queue *e = q->elevator;
  424. if (e->type->ops.elevator_bio_merged_fn)
  425. e->type->ops.elevator_bio_merged_fn(q, rq, bio);
  426. }
  427. void elv_requeue_request(struct request_queue *q, struct request *rq)
  428. {
  429. /*
  430. * it already went through dequeue, we need to decrement the
  431. * in_flight count again
  432. */
  433. if (blk_account_rq(rq)) {
  434. q->in_flight[rq_is_sync(rq)]--;
  435. if (rq->cmd_flags & REQ_SORTED)
  436. elv_deactivate_rq(q, rq);
  437. }
  438. rq->cmd_flags &= ~REQ_STARTED;
  439. __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
  440. }
  441. void elv_drain_elevator(struct request_queue *q)
  442. {
  443. static int printed;
  444. lockdep_assert_held(q->queue_lock);
  445. while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
  446. ;
  447. if (q->nr_sorted && printed++ < 10) {
  448. printk(KERN_ERR "%s: forced dispatching is broken "
  449. "(nr_sorted=%u), please report this\n",
  450. q->elevator->type->elevator_name, q->nr_sorted);
  451. }
  452. }
  453. void __elv_add_request(struct request_queue *q, struct request *rq, int where)
  454. {
  455. trace_block_rq_insert(q, rq);
  456. rq->q = q;
  457. if (rq->cmd_flags & REQ_SOFTBARRIER) {
  458. /* barriers are scheduling boundary, update end_sector */
  459. if (rq->cmd_type == REQ_TYPE_FS) {
  460. q->end_sector = rq_end_sector(rq);
  461. q->boundary_rq = rq;
  462. }
  463. } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
  464. (where == ELEVATOR_INSERT_SORT ||
  465. where == ELEVATOR_INSERT_SORT_MERGE))
  466. where = ELEVATOR_INSERT_BACK;
  467. switch (where) {
  468. case ELEVATOR_INSERT_REQUEUE:
  469. case ELEVATOR_INSERT_FRONT:
  470. rq->cmd_flags |= REQ_SOFTBARRIER;
  471. list_add(&rq->queuelist, &q->queue_head);
  472. break;
  473. case ELEVATOR_INSERT_BACK:
  474. rq->cmd_flags |= REQ_SOFTBARRIER;
  475. elv_drain_elevator(q);
  476. list_add_tail(&rq->queuelist, &q->queue_head);
  477. /*
  478. * We kick the queue here for the following reasons.
  479. * - The elevator might have returned NULL previously
  480. * to delay requests and returned them now. As the
  481. * queue wasn't empty before this request, ll_rw_blk
  482. * won't run the queue on return, resulting in hang.
  483. * - Usually, back inserted requests won't be merged
  484. * with anything. There's no point in delaying queue
  485. * processing.
  486. */
  487. __blk_run_queue(q);
  488. break;
  489. case ELEVATOR_INSERT_SORT_MERGE:
  490. /*
  491. * If we succeed in merging this request with one in the
  492. * queue already, we are done - rq has now been freed,
  493. * so no need to do anything further.
  494. */
  495. if (elv_attempt_insert_merge(q, rq))
  496. break;
  497. case ELEVATOR_INSERT_SORT:
  498. BUG_ON(rq->cmd_type != REQ_TYPE_FS);
  499. rq->cmd_flags |= REQ_SORTED;
  500. q->nr_sorted++;
  501. if (rq_mergeable(rq)) {
  502. elv_rqhash_add(q, rq);
  503. if (!q->last_merge)
  504. q->last_merge = rq;
  505. }
  506. /*
  507. * Some ioscheds (cfq) run q->request_fn directly, so
  508. * rq cannot be accessed after calling
  509. * elevator_add_req_fn.
  510. */
  511. q->elevator->type->ops.elevator_add_req_fn(q, rq);
  512. break;
  513. case ELEVATOR_INSERT_FLUSH:
  514. rq->cmd_flags |= REQ_SOFTBARRIER;
  515. blk_insert_flush(rq);
  516. break;
  517. default:
  518. printk(KERN_ERR "%s: bad insertion point %d\n",
  519. __func__, where);
  520. BUG();
  521. }
  522. }
  523. EXPORT_SYMBOL(__elv_add_request);
  524. void elv_add_request(struct request_queue *q, struct request *rq, int where)
  525. {
  526. unsigned long flags;
  527. spin_lock_irqsave(q->queue_lock, flags);
  528. __elv_add_request(q, rq, where);
  529. spin_unlock_irqrestore(q->queue_lock, flags);
  530. }
  531. EXPORT_SYMBOL(elv_add_request);
  532. struct request *elv_latter_request(struct request_queue *q, struct request *rq)
  533. {
  534. struct elevator_queue *e = q->elevator;
  535. if (e->type->ops.elevator_latter_req_fn)
  536. return e->type->ops.elevator_latter_req_fn(q, rq);
  537. return NULL;
  538. }
  539. struct request *elv_former_request(struct request_queue *q, struct request *rq)
  540. {
  541. struct elevator_queue *e = q->elevator;
  542. if (e->type->ops.elevator_former_req_fn)
  543. return e->type->ops.elevator_former_req_fn(q, rq);
  544. return NULL;
  545. }
  546. int elv_set_request(struct request_queue *q, struct request *rq,
  547. struct bio *bio, gfp_t gfp_mask)
  548. {
  549. struct elevator_queue *e = q->elevator;
  550. if (e->type->ops.elevator_set_req_fn)
  551. return e->type->ops.elevator_set_req_fn(q, rq, bio, gfp_mask);
  552. return 0;
  553. }
  554. void elv_put_request(struct request_queue *q, struct request *rq)
  555. {
  556. struct elevator_queue *e = q->elevator;
  557. if (e->type->ops.elevator_put_req_fn)
  558. e->type->ops.elevator_put_req_fn(rq);
  559. }
  560. int elv_may_queue(struct request_queue *q, int rw)
  561. {
  562. struct elevator_queue *e = q->elevator;
  563. if (e->type->ops.elevator_may_queue_fn)
  564. return e->type->ops.elevator_may_queue_fn(q, rw);
  565. return ELV_MQUEUE_MAY;
  566. }
  567. void elv_abort_queue(struct request_queue *q)
  568. {
  569. struct request *rq;
  570. blk_abort_flushes(q);
  571. while (!list_empty(&q->queue_head)) {
  572. rq = list_entry_rq(q->queue_head.next);
  573. rq->cmd_flags |= REQ_QUIET;
  574. trace_block_rq_abort(q, rq);
  575. /*
  576. * Mark this request as started so we don't trigger
  577. * any debug logic in the end I/O path.
  578. */
  579. blk_start_request(rq);
  580. __blk_end_request_all(rq, -EIO);
  581. }
  582. }
  583. EXPORT_SYMBOL(elv_abort_queue);
  584. void elv_completed_request(struct request_queue *q, struct request *rq)
  585. {
  586. struct elevator_queue *e = q->elevator;
  587. /*
  588. * request is released from the driver, io must be done
  589. */
  590. if (blk_account_rq(rq)) {
  591. q->in_flight[rq_is_sync(rq)]--;
  592. if ((rq->cmd_flags & REQ_SORTED) &&
  593. e->type->ops.elevator_completed_req_fn)
  594. e->type->ops.elevator_completed_req_fn(q, rq);
  595. }
  596. }
  597. #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
  598. static ssize_t
  599. elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  600. {
  601. struct elv_fs_entry *entry = to_elv(attr);
  602. struct elevator_queue *e;
  603. ssize_t error;
  604. if (!entry->show)
  605. return -EIO;
  606. e = container_of(kobj, struct elevator_queue, kobj);
  607. mutex_lock(&e->sysfs_lock);
  608. error = e->type ? entry->show(e, page) : -ENOENT;
  609. mutex_unlock(&e->sysfs_lock);
  610. return error;
  611. }
  612. static ssize_t
  613. elv_attr_store(struct kobject *kobj, struct attribute *attr,
  614. const char *page, size_t length)
  615. {
  616. struct elv_fs_entry *entry = to_elv(attr);
  617. struct elevator_queue *e;
  618. ssize_t error;
  619. if (!entry->store)
  620. return -EIO;
  621. e = container_of(kobj, struct elevator_queue, kobj);
  622. mutex_lock(&e->sysfs_lock);
  623. error = e->type ? entry->store(e, page, length) : -ENOENT;
  624. mutex_unlock(&e->sysfs_lock);
  625. return error;
  626. }
  627. static const struct sysfs_ops elv_sysfs_ops = {
  628. .show = elv_attr_show,
  629. .store = elv_attr_store,
  630. };
  631. static struct kobj_type elv_ktype = {
  632. .sysfs_ops = &elv_sysfs_ops,
  633. .release = elevator_release,
  634. };
  635. int elv_register_queue(struct request_queue *q)
  636. {
  637. struct elevator_queue *e = q->elevator;
  638. int error;
  639. error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
  640. if (!error) {
  641. struct elv_fs_entry *attr = e->type->elevator_attrs;
  642. if (attr) {
  643. while (attr->attr.name) {
  644. if (sysfs_create_file(&e->kobj, &attr->attr))
  645. break;
  646. attr++;
  647. }
  648. }
  649. kobject_uevent(&e->kobj, KOBJ_ADD);
  650. e->registered = 1;
  651. }
  652. return error;
  653. }
  654. EXPORT_SYMBOL(elv_register_queue);
  655. void elv_unregister_queue(struct request_queue *q)
  656. {
  657. if (q) {
  658. struct elevator_queue *e = q->elevator;
  659. kobject_uevent(&e->kobj, KOBJ_REMOVE);
  660. kobject_del(&e->kobj);
  661. e->registered = 0;
  662. }
  663. }
  664. EXPORT_SYMBOL(elv_unregister_queue);
  665. int elv_register(struct elevator_type *e)
  666. {
  667. char *def = "";
  668. /* create icq_cache if requested */
  669. if (e->icq_size) {
  670. if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
  671. WARN_ON(e->icq_align < __alignof__(struct io_cq)))
  672. return -EINVAL;
  673. snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
  674. "%s_io_cq", e->elevator_name);
  675. e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
  676. e->icq_align, 0, NULL);
  677. if (!e->icq_cache)
  678. return -ENOMEM;
  679. }
  680. /* register, don't allow duplicate names */
  681. spin_lock(&elv_list_lock);
  682. if (elevator_find(e->elevator_name)) {
  683. spin_unlock(&elv_list_lock);
  684. if (e->icq_cache)
  685. kmem_cache_destroy(e->icq_cache);
  686. return -EBUSY;
  687. }
  688. list_add_tail(&e->list, &elv_list);
  689. spin_unlock(&elv_list_lock);
  690. /* print pretty message */
  691. if (!strcmp(e->elevator_name, chosen_elevator) ||
  692. (!*chosen_elevator &&
  693. !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
  694. def = " (default)";
  695. printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
  696. def);
  697. return 0;
  698. }
  699. EXPORT_SYMBOL_GPL(elv_register);
  700. void elv_unregister(struct elevator_type *e)
  701. {
  702. /* unregister */
  703. spin_lock(&elv_list_lock);
  704. list_del_init(&e->list);
  705. spin_unlock(&elv_list_lock);
  706. /*
  707. * Destroy icq_cache if it exists. icq's are RCU managed. Make
  708. * sure all RCU operations are complete before proceeding.
  709. */
  710. if (e->icq_cache) {
  711. rcu_barrier();
  712. kmem_cache_destroy(e->icq_cache);
  713. e->icq_cache = NULL;
  714. }
  715. }
  716. EXPORT_SYMBOL_GPL(elv_unregister);
  717. /*
  718. * switch to new_e io scheduler. be careful not to introduce deadlocks -
  719. * we don't free the old io scheduler, before we have allocated what we
  720. * need for the new one. this way we have a chance of going back to the old
  721. * one, if the new one fails init for some reason.
  722. */
  723. static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
  724. {
  725. struct elevator_queue *old = q->elevator;
  726. bool registered = old->registered;
  727. int err;
  728. /*
  729. * Turn on BYPASS and drain all requests w/ elevator private data.
  730. * Block layer doesn't call into a quiesced elevator - all requests
  731. * are directly put on the dispatch list without elevator data
  732. * using INSERT_BACK. All requests have SOFTBARRIER set and no
  733. * merge happens either.
  734. */
  735. blk_queue_bypass_start(q);
  736. /* unregister and clear all auxiliary data of the old elevator */
  737. if (registered)
  738. elv_unregister_queue(q);
  739. spin_lock_irq(q->queue_lock);
  740. ioc_clear_queue(q);
  741. spin_unlock_irq(q->queue_lock);
  742. /* allocate, init and register new elevator */
  743. err = -ENOMEM;
  744. q->elevator = elevator_alloc(q, new_e);
  745. if (!q->elevator)
  746. goto fail_init;
  747. err = new_e->ops.elevator_init_fn(q);
  748. if (err) {
  749. kobject_put(&q->elevator->kobj);
  750. goto fail_init;
  751. }
  752. if (registered) {
  753. err = elv_register_queue(q);
  754. if (err)
  755. goto fail_register;
  756. }
  757. /* done, kill the old one and finish */
  758. elevator_exit(old);
  759. blk_queue_bypass_end(q);
  760. blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
  761. return 0;
  762. fail_register:
  763. elevator_exit(q->elevator);
  764. fail_init:
  765. /* switch failed, restore and re-register old elevator */
  766. q->elevator = old;
  767. elv_register_queue(q);
  768. blk_queue_bypass_end(q);
  769. return err;
  770. }
  771. /*
  772. * Switch this queue to the given IO scheduler.
  773. */
  774. int elevator_change(struct request_queue *q, const char *name)
  775. {
  776. char elevator_name[ELV_NAME_MAX];
  777. struct elevator_type *e;
  778. if (!q->elevator)
  779. return -ENXIO;
  780. strlcpy(elevator_name, name, sizeof(elevator_name));
  781. e = elevator_get(strstrip(elevator_name));
  782. if (!e) {
  783. printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
  784. return -EINVAL;
  785. }
  786. if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
  787. elevator_put(e);
  788. return 0;
  789. }
  790. return elevator_switch(q, e);
  791. }
  792. EXPORT_SYMBOL(elevator_change);
  793. ssize_t elv_iosched_store(struct request_queue *q, const char *name,
  794. size_t count)
  795. {
  796. int ret;
  797. if (!q->elevator)
  798. return count;
  799. ret = elevator_change(q, name);
  800. if (!ret)
  801. return count;
  802. printk(KERN_ERR "elevator: switch to %s failed\n", name);
  803. return ret;
  804. }
  805. ssize_t elv_iosched_show(struct request_queue *q, char *name)
  806. {
  807. struct elevator_queue *e = q->elevator;
  808. struct elevator_type *elv;
  809. struct elevator_type *__e;
  810. int len = 0;
  811. if (!q->elevator || !blk_queue_stackable(q))
  812. return sprintf(name, "none\n");
  813. elv = e->type;
  814. spin_lock(&elv_list_lock);
  815. list_for_each_entry(__e, &elv_list, list) {
  816. if (!strcmp(elv->elevator_name, __e->elevator_name))
  817. len += sprintf(name+len, "[%s] ", elv->elevator_name);
  818. else
  819. len += sprintf(name+len, "%s ", __e->elevator_name);
  820. }
  821. spin_unlock(&elv_list_lock);
  822. len += sprintf(len+name, "\n");
  823. return len;
  824. }
  825. struct request *elv_rb_former_request(struct request_queue *q,
  826. struct request *rq)
  827. {
  828. struct rb_node *rbprev = rb_prev(&rq->rb_node);
  829. if (rbprev)
  830. return rb_entry_rq(rbprev);
  831. return NULL;
  832. }
  833. EXPORT_SYMBOL(elv_rb_former_request);
  834. struct request *elv_rb_latter_request(struct request_queue *q,
  835. struct request *rq)
  836. {
  837. struct rb_node *rbnext = rb_next(&rq->rb_node);
  838. if (rbnext)
  839. return rb_entry_rq(rbnext);
  840. return NULL;
  841. }
  842. EXPORT_SYMBOL(elv_rb_latter_request);