elevator.c 23 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043
  1. /*
  2. * Block device elevator/IO-scheduler.
  3. *
  4. * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
  5. *
  6. * 30042000 Jens Axboe <axboe@kernel.dk> :
  7. *
  8. * Split the elevator a bit so that it is possible to choose a different
  9. * one or even write a new "plug in". There are three pieces:
  10. * - elevator_fn, inserts a new request in the queue list
  11. * - elevator_merge_fn, decides whether a new buffer can be merged with
  12. * an existing request
  13. * - elevator_dequeue_fn, called when a request is taken off the active list
  14. *
  15. * 20082000 Dave Jones <davej@suse.de> :
  16. * Removed tests for max-bomb-segments, which was breaking elvtune
  17. * when run without -bN
  18. *
  19. * Jens:
  20. * - Rework again to work with bio instead of buffer_heads
  21. * - loose bi_dev comparisons, partition handling is right now
  22. * - completely modularize elevator setup and teardown
  23. *
  24. */
  25. #include <linux/kernel.h>
  26. #include <linux/fs.h>
  27. #include <linux/blkdev.h>
  28. #include <linux/elevator.h>
  29. #include <linux/bio.h>
  30. #include <linux/module.h>
  31. #include <linux/slab.h>
  32. #include <linux/init.h>
  33. #include <linux/compiler.h>
  34. #include <linux/blktrace_api.h>
  35. #include <linux/hash.h>
  36. #include <linux/uaccess.h>
  37. #include <trace/events/block.h>
  38. #include "blk.h"
  39. static DEFINE_SPINLOCK(elv_list_lock);
  40. static LIST_HEAD(elv_list);
  41. /*
  42. * Merge hash stuff.
  43. */
  44. static const int elv_hash_shift = 6;
  45. #define ELV_HASH_BLOCK(sec) ((sec) >> 3)
  46. #define ELV_HASH_FN(sec) \
  47. (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift))
  48. #define ELV_HASH_ENTRIES (1 << elv_hash_shift)
  49. #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
  50. /*
  51. * Query io scheduler to see if the current process issuing bio may be
  52. * merged with rq.
  53. */
  54. static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
  55. {
  56. struct request_queue *q = rq->q;
  57. struct elevator_queue *e = q->elevator;
  58. if (e->type->ops.elevator_allow_merge_fn)
  59. return e->type->ops.elevator_allow_merge_fn(q, rq, bio);
  60. return 1;
  61. }
  62. /*
  63. * can we safely merge with this request?
  64. */
  65. bool elv_rq_merge_ok(struct request *rq, struct bio *bio)
  66. {
  67. if (!blk_rq_merge_ok(rq, bio))
  68. return 0;
  69. if (!elv_iosched_allow_merge(rq, bio))
  70. return 0;
  71. return 1;
  72. }
  73. EXPORT_SYMBOL(elv_rq_merge_ok);
  74. static struct elevator_type *elevator_find(const char *name)
  75. {
  76. struct elevator_type *e;
  77. list_for_each_entry(e, &elv_list, list) {
  78. if (!strcmp(e->elevator_name, name))
  79. return e;
  80. }
  81. return NULL;
  82. }
  83. static void elevator_put(struct elevator_type *e)
  84. {
  85. module_put(e->elevator_owner);
  86. }
  87. static struct elevator_type *elevator_get(const char *name)
  88. {
  89. struct elevator_type *e;
  90. spin_lock(&elv_list_lock);
  91. e = elevator_find(name);
  92. if (!e) {
  93. spin_unlock(&elv_list_lock);
  94. request_module("%s-iosched", name);
  95. spin_lock(&elv_list_lock);
  96. e = elevator_find(name);
  97. }
  98. if (e && !try_module_get(e->elevator_owner))
  99. e = NULL;
  100. spin_unlock(&elv_list_lock);
  101. return e;
  102. }
  103. static char chosen_elevator[ELV_NAME_MAX];
  104. static int __init elevator_setup(char *str)
  105. {
  106. /*
  107. * Be backwards-compatible with previous kernels, so users
  108. * won't get the wrong elevator.
  109. */
  110. strncpy(chosen_elevator, str, sizeof(chosen_elevator) - 1);
  111. return 1;
  112. }
  113. __setup("elevator=", elevator_setup);
  114. static struct kobj_type elv_ktype;
  115. static struct elevator_queue *elevator_alloc(struct request_queue *q,
  116. struct elevator_type *e)
  117. {
  118. struct elevator_queue *eq;
  119. int i;
  120. eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
  121. if (unlikely(!eq))
  122. goto err;
  123. eq->type = e;
  124. kobject_init(&eq->kobj, &elv_ktype);
  125. mutex_init(&eq->sysfs_lock);
  126. eq->hash = kmalloc_node(sizeof(struct hlist_head) * ELV_HASH_ENTRIES,
  127. GFP_KERNEL, q->node);
  128. if (!eq->hash)
  129. goto err;
  130. for (i = 0; i < ELV_HASH_ENTRIES; i++)
  131. INIT_HLIST_HEAD(&eq->hash[i]);
  132. return eq;
  133. err:
  134. kfree(eq);
  135. elevator_put(e);
  136. return NULL;
  137. }
  138. static void elevator_release(struct kobject *kobj)
  139. {
  140. struct elevator_queue *e;
  141. e = container_of(kobj, struct elevator_queue, kobj);
  142. elevator_put(e->type);
  143. kfree(e->hash);
  144. kfree(e);
  145. }
  146. int elevator_init(struct request_queue *q, char *name)
  147. {
  148. struct elevator_type *e = NULL;
  149. int err;
  150. if (unlikely(q->elevator))
  151. return 0;
  152. INIT_LIST_HEAD(&q->queue_head);
  153. q->last_merge = NULL;
  154. q->end_sector = 0;
  155. q->boundary_rq = NULL;
  156. if (name) {
  157. e = elevator_get(name);
  158. if (!e)
  159. return -EINVAL;
  160. }
  161. if (!e && *chosen_elevator) {
  162. e = elevator_get(chosen_elevator);
  163. if (!e)
  164. printk(KERN_ERR "I/O scheduler %s not found\n",
  165. chosen_elevator);
  166. }
  167. if (!e) {
  168. e = elevator_get(CONFIG_DEFAULT_IOSCHED);
  169. if (!e) {
  170. printk(KERN_ERR
  171. "Default I/O scheduler not found. " \
  172. "Using noop.\n");
  173. e = elevator_get("noop");
  174. }
  175. }
  176. q->elevator = elevator_alloc(q, e);
  177. if (!q->elevator)
  178. return -ENOMEM;
  179. err = e->ops.elevator_init_fn(q);
  180. if (err) {
  181. kobject_put(&q->elevator->kobj);
  182. return err;
  183. }
  184. return 0;
  185. }
  186. EXPORT_SYMBOL(elevator_init);
  187. void elevator_exit(struct elevator_queue *e)
  188. {
  189. mutex_lock(&e->sysfs_lock);
  190. if (e->type->ops.elevator_exit_fn)
  191. e->type->ops.elevator_exit_fn(e);
  192. mutex_unlock(&e->sysfs_lock);
  193. kobject_put(&e->kobj);
  194. }
  195. EXPORT_SYMBOL(elevator_exit);
  196. static inline void __elv_rqhash_del(struct request *rq)
  197. {
  198. hlist_del_init(&rq->hash);
  199. }
  200. static void elv_rqhash_del(struct request_queue *q, struct request *rq)
  201. {
  202. if (ELV_ON_HASH(rq))
  203. __elv_rqhash_del(rq);
  204. }
  205. static void elv_rqhash_add(struct request_queue *q, struct request *rq)
  206. {
  207. struct elevator_queue *e = q->elevator;
  208. BUG_ON(ELV_ON_HASH(rq));
  209. hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
  210. }
  211. static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
  212. {
  213. __elv_rqhash_del(rq);
  214. elv_rqhash_add(q, rq);
  215. }
  216. static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
  217. {
  218. struct elevator_queue *e = q->elevator;
  219. struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
  220. struct hlist_node *entry, *next;
  221. struct request *rq;
  222. hlist_for_each_entry_safe(rq, entry, next, hash_list, hash) {
  223. BUG_ON(!ELV_ON_HASH(rq));
  224. if (unlikely(!rq_mergeable(rq))) {
  225. __elv_rqhash_del(rq);
  226. continue;
  227. }
  228. if (rq_hash_key(rq) == offset)
  229. return rq;
  230. }
  231. return NULL;
  232. }
  233. /*
  234. * RB-tree support functions for inserting/lookup/removal of requests
  235. * in a sorted RB tree.
  236. */
  237. void elv_rb_add(struct rb_root *root, struct request *rq)
  238. {
  239. struct rb_node **p = &root->rb_node;
  240. struct rb_node *parent = NULL;
  241. struct request *__rq;
  242. while (*p) {
  243. parent = *p;
  244. __rq = rb_entry(parent, struct request, rb_node);
  245. if (blk_rq_pos(rq) < blk_rq_pos(__rq))
  246. p = &(*p)->rb_left;
  247. else if (blk_rq_pos(rq) >= blk_rq_pos(__rq))
  248. p = &(*p)->rb_right;
  249. }
  250. rb_link_node(&rq->rb_node, parent, p);
  251. rb_insert_color(&rq->rb_node, root);
  252. }
  253. EXPORT_SYMBOL(elv_rb_add);
  254. void elv_rb_del(struct rb_root *root, struct request *rq)
  255. {
  256. BUG_ON(RB_EMPTY_NODE(&rq->rb_node));
  257. rb_erase(&rq->rb_node, root);
  258. RB_CLEAR_NODE(&rq->rb_node);
  259. }
  260. EXPORT_SYMBOL(elv_rb_del);
  261. struct request *elv_rb_find(struct rb_root *root, sector_t sector)
  262. {
  263. struct rb_node *n = root->rb_node;
  264. struct request *rq;
  265. while (n) {
  266. rq = rb_entry(n, struct request, rb_node);
  267. if (sector < blk_rq_pos(rq))
  268. n = n->rb_left;
  269. else if (sector > blk_rq_pos(rq))
  270. n = n->rb_right;
  271. else
  272. return rq;
  273. }
  274. return NULL;
  275. }
  276. EXPORT_SYMBOL(elv_rb_find);
  277. /*
  278. * Insert rq into dispatch queue of q. Queue lock must be held on
  279. * entry. rq is sort instead into the dispatch queue. To be used by
  280. * specific elevators.
  281. */
  282. void elv_dispatch_sort(struct request_queue *q, struct request *rq)
  283. {
  284. sector_t boundary;
  285. struct list_head *entry;
  286. int stop_flags;
  287. if (q->last_merge == rq)
  288. q->last_merge = NULL;
  289. elv_rqhash_del(q, rq);
  290. q->nr_sorted--;
  291. boundary = q->end_sector;
  292. stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
  293. list_for_each_prev(entry, &q->queue_head) {
  294. struct request *pos = list_entry_rq(entry);
  295. if ((rq->cmd_flags & REQ_DISCARD) !=
  296. (pos->cmd_flags & REQ_DISCARD))
  297. break;
  298. if (rq_data_dir(rq) != rq_data_dir(pos))
  299. break;
  300. if (pos->cmd_flags & stop_flags)
  301. break;
  302. if (blk_rq_pos(rq) >= boundary) {
  303. if (blk_rq_pos(pos) < boundary)
  304. continue;
  305. } else {
  306. if (blk_rq_pos(pos) >= boundary)
  307. break;
  308. }
  309. if (blk_rq_pos(rq) >= blk_rq_pos(pos))
  310. break;
  311. }
  312. list_add(&rq->queuelist, entry);
  313. }
  314. EXPORT_SYMBOL(elv_dispatch_sort);
  315. /*
  316. * Insert rq into dispatch queue of q. Queue lock must be held on
  317. * entry. rq is added to the back of the dispatch queue. To be used by
  318. * specific elevators.
  319. */
  320. void elv_dispatch_add_tail(struct request_queue *q, struct request *rq)
  321. {
  322. if (q->last_merge == rq)
  323. q->last_merge = NULL;
  324. elv_rqhash_del(q, rq);
  325. q->nr_sorted--;
  326. q->end_sector = rq_end_sector(rq);
  327. q->boundary_rq = rq;
  328. list_add_tail(&rq->queuelist, &q->queue_head);
  329. }
  330. EXPORT_SYMBOL(elv_dispatch_add_tail);
  331. int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
  332. {
  333. struct elevator_queue *e = q->elevator;
  334. struct request *__rq;
  335. int ret;
  336. /*
  337. * Levels of merges:
  338. * nomerges: No merges at all attempted
  339. * noxmerges: Only simple one-hit cache try
  340. * merges: All merge tries attempted
  341. */
  342. if (blk_queue_nomerges(q))
  343. return ELEVATOR_NO_MERGE;
  344. /*
  345. * First try one-hit cache.
  346. */
  347. if (q->last_merge && elv_rq_merge_ok(q->last_merge, bio)) {
  348. ret = blk_try_merge(q->last_merge, bio);
  349. if (ret != ELEVATOR_NO_MERGE) {
  350. *req = q->last_merge;
  351. return ret;
  352. }
  353. }
  354. if (blk_queue_noxmerges(q))
  355. return ELEVATOR_NO_MERGE;
  356. /*
  357. * See if our hash lookup can find a potential backmerge.
  358. */
  359. __rq = elv_rqhash_find(q, bio->bi_sector);
  360. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  361. *req = __rq;
  362. return ELEVATOR_BACK_MERGE;
  363. }
  364. if (e->type->ops.elevator_merge_fn)
  365. return e->type->ops.elevator_merge_fn(q, req, bio);
  366. return ELEVATOR_NO_MERGE;
  367. }
  368. /*
  369. * Attempt to do an insertion back merge. Only check for the case where
  370. * we can append 'rq' to an existing request, so we can throw 'rq' away
  371. * afterwards.
  372. *
  373. * Returns true if we merged, false otherwise
  374. */
  375. static bool elv_attempt_insert_merge(struct request_queue *q,
  376. struct request *rq)
  377. {
  378. struct request *__rq;
  379. if (blk_queue_nomerges(q))
  380. return false;
  381. /*
  382. * First try one-hit cache.
  383. */
  384. if (q->last_merge && blk_attempt_req_merge(q, q->last_merge, rq))
  385. return true;
  386. if (blk_queue_noxmerges(q))
  387. return false;
  388. /*
  389. * See if our hash lookup can find a potential backmerge.
  390. */
  391. __rq = elv_rqhash_find(q, blk_rq_pos(rq));
  392. if (__rq && blk_attempt_req_merge(q, __rq, rq))
  393. return true;
  394. return false;
  395. }
  396. void elv_merged_request(struct request_queue *q, struct request *rq, int type)
  397. {
  398. struct elevator_queue *e = q->elevator;
  399. if (e->type->ops.elevator_merged_fn)
  400. e->type->ops.elevator_merged_fn(q, rq, type);
  401. if (type == ELEVATOR_BACK_MERGE)
  402. elv_rqhash_reposition(q, rq);
  403. q->last_merge = rq;
  404. }
  405. void elv_merge_requests(struct request_queue *q, struct request *rq,
  406. struct request *next)
  407. {
  408. struct elevator_queue *e = q->elevator;
  409. const int next_sorted = next->cmd_flags & REQ_SORTED;
  410. if (next_sorted && e->type->ops.elevator_merge_req_fn)
  411. e->type->ops.elevator_merge_req_fn(q, rq, next);
  412. elv_rqhash_reposition(q, rq);
  413. if (next_sorted) {
  414. elv_rqhash_del(q, next);
  415. q->nr_sorted--;
  416. }
  417. q->last_merge = rq;
  418. }
  419. void elv_bio_merged(struct request_queue *q, struct request *rq,
  420. struct bio *bio)
  421. {
  422. struct elevator_queue *e = q->elevator;
  423. if (e->type->ops.elevator_bio_merged_fn)
  424. e->type->ops.elevator_bio_merged_fn(q, rq, bio);
  425. }
  426. void elv_requeue_request(struct request_queue *q, struct request *rq)
  427. {
  428. /*
  429. * it already went through dequeue, we need to decrement the
  430. * in_flight count again
  431. */
  432. if (blk_account_rq(rq)) {
  433. q->in_flight[rq_is_sync(rq)]--;
  434. if (rq->cmd_flags & REQ_SORTED)
  435. elv_deactivate_rq(q, rq);
  436. }
  437. rq->cmd_flags &= ~REQ_STARTED;
  438. __elv_add_request(q, rq, ELEVATOR_INSERT_REQUEUE);
  439. }
  440. void elv_drain_elevator(struct request_queue *q)
  441. {
  442. static int printed;
  443. lockdep_assert_held(q->queue_lock);
  444. while (q->elevator->type->ops.elevator_dispatch_fn(q, 1))
  445. ;
  446. if (q->nr_sorted && printed++ < 10) {
  447. printk(KERN_ERR "%s: forced dispatching is broken "
  448. "(nr_sorted=%u), please report this\n",
  449. q->elevator->type->elevator_name, q->nr_sorted);
  450. }
  451. }
  452. void elv_quiesce_start(struct request_queue *q)
  453. {
  454. if (!q->elevator)
  455. return;
  456. spin_lock_irq(q->queue_lock);
  457. queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
  458. spin_unlock_irq(q->queue_lock);
  459. blk_drain_queue(q, false);
  460. }
  461. void elv_quiesce_end(struct request_queue *q)
  462. {
  463. spin_lock_irq(q->queue_lock);
  464. queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
  465. spin_unlock_irq(q->queue_lock);
  466. }
  467. void __elv_add_request(struct request_queue *q, struct request *rq, int where)
  468. {
  469. trace_block_rq_insert(q, rq);
  470. rq->q = q;
  471. if (rq->cmd_flags & REQ_SOFTBARRIER) {
  472. /* barriers are scheduling boundary, update end_sector */
  473. if (rq->cmd_type == REQ_TYPE_FS ||
  474. (rq->cmd_flags & REQ_DISCARD)) {
  475. q->end_sector = rq_end_sector(rq);
  476. q->boundary_rq = rq;
  477. }
  478. } else if (!(rq->cmd_flags & REQ_ELVPRIV) &&
  479. (where == ELEVATOR_INSERT_SORT ||
  480. where == ELEVATOR_INSERT_SORT_MERGE))
  481. where = ELEVATOR_INSERT_BACK;
  482. switch (where) {
  483. case ELEVATOR_INSERT_REQUEUE:
  484. case ELEVATOR_INSERT_FRONT:
  485. rq->cmd_flags |= REQ_SOFTBARRIER;
  486. list_add(&rq->queuelist, &q->queue_head);
  487. break;
  488. case ELEVATOR_INSERT_BACK:
  489. rq->cmd_flags |= REQ_SOFTBARRIER;
  490. elv_drain_elevator(q);
  491. list_add_tail(&rq->queuelist, &q->queue_head);
  492. /*
  493. * We kick the queue here for the following reasons.
  494. * - The elevator might have returned NULL previously
  495. * to delay requests and returned them now. As the
  496. * queue wasn't empty before this request, ll_rw_blk
  497. * won't run the queue on return, resulting in hang.
  498. * - Usually, back inserted requests won't be merged
  499. * with anything. There's no point in delaying queue
  500. * processing.
  501. */
  502. __blk_run_queue(q);
  503. break;
  504. case ELEVATOR_INSERT_SORT_MERGE:
  505. /*
  506. * If we succeed in merging this request with one in the
  507. * queue already, we are done - rq has now been freed,
  508. * so no need to do anything further.
  509. */
  510. if (elv_attempt_insert_merge(q, rq))
  511. break;
  512. case ELEVATOR_INSERT_SORT:
  513. BUG_ON(rq->cmd_type != REQ_TYPE_FS &&
  514. !(rq->cmd_flags & REQ_DISCARD));
  515. rq->cmd_flags |= REQ_SORTED;
  516. q->nr_sorted++;
  517. if (rq_mergeable(rq)) {
  518. elv_rqhash_add(q, rq);
  519. if (!q->last_merge)
  520. q->last_merge = rq;
  521. }
  522. /*
  523. * Some ioscheds (cfq) run q->request_fn directly, so
  524. * rq cannot be accessed after calling
  525. * elevator_add_req_fn.
  526. */
  527. q->elevator->type->ops.elevator_add_req_fn(q, rq);
  528. break;
  529. case ELEVATOR_INSERT_FLUSH:
  530. rq->cmd_flags |= REQ_SOFTBARRIER;
  531. blk_insert_flush(rq);
  532. break;
  533. default:
  534. printk(KERN_ERR "%s: bad insertion point %d\n",
  535. __func__, where);
  536. BUG();
  537. }
  538. }
  539. EXPORT_SYMBOL(__elv_add_request);
  540. void elv_add_request(struct request_queue *q, struct request *rq, int where)
  541. {
  542. unsigned long flags;
  543. spin_lock_irqsave(q->queue_lock, flags);
  544. __elv_add_request(q, rq, where);
  545. spin_unlock_irqrestore(q->queue_lock, flags);
  546. }
  547. EXPORT_SYMBOL(elv_add_request);
  548. struct request *elv_latter_request(struct request_queue *q, struct request *rq)
  549. {
  550. struct elevator_queue *e = q->elevator;
  551. if (e->type->ops.elevator_latter_req_fn)
  552. return e->type->ops.elevator_latter_req_fn(q, rq);
  553. return NULL;
  554. }
  555. struct request *elv_former_request(struct request_queue *q, struct request *rq)
  556. {
  557. struct elevator_queue *e = q->elevator;
  558. if (e->type->ops.elevator_former_req_fn)
  559. return e->type->ops.elevator_former_req_fn(q, rq);
  560. return NULL;
  561. }
  562. int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
  563. {
  564. struct elevator_queue *e = q->elevator;
  565. if (e->type->ops.elevator_set_req_fn)
  566. return e->type->ops.elevator_set_req_fn(q, rq, gfp_mask);
  567. return 0;
  568. }
  569. void elv_put_request(struct request_queue *q, struct request *rq)
  570. {
  571. struct elevator_queue *e = q->elevator;
  572. if (e->type->ops.elevator_put_req_fn)
  573. e->type->ops.elevator_put_req_fn(rq);
  574. }
  575. int elv_may_queue(struct request_queue *q, int rw)
  576. {
  577. struct elevator_queue *e = q->elevator;
  578. if (e->type->ops.elevator_may_queue_fn)
  579. return e->type->ops.elevator_may_queue_fn(q, rw);
  580. return ELV_MQUEUE_MAY;
  581. }
  582. void elv_abort_queue(struct request_queue *q)
  583. {
  584. struct request *rq;
  585. blk_abort_flushes(q);
  586. while (!list_empty(&q->queue_head)) {
  587. rq = list_entry_rq(q->queue_head.next);
  588. rq->cmd_flags |= REQ_QUIET;
  589. trace_block_rq_abort(q, rq);
  590. /*
  591. * Mark this request as started so we don't trigger
  592. * any debug logic in the end I/O path.
  593. */
  594. blk_start_request(rq);
  595. __blk_end_request_all(rq, -EIO);
  596. }
  597. }
  598. EXPORT_SYMBOL(elv_abort_queue);
  599. void elv_completed_request(struct request_queue *q, struct request *rq)
  600. {
  601. struct elevator_queue *e = q->elevator;
  602. /*
  603. * request is released from the driver, io must be done
  604. */
  605. if (blk_account_rq(rq)) {
  606. q->in_flight[rq_is_sync(rq)]--;
  607. if ((rq->cmd_flags & REQ_SORTED) &&
  608. e->type->ops.elevator_completed_req_fn)
  609. e->type->ops.elevator_completed_req_fn(q, rq);
  610. }
  611. }
  612. #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr)
  613. static ssize_t
  614. elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
  615. {
  616. struct elv_fs_entry *entry = to_elv(attr);
  617. struct elevator_queue *e;
  618. ssize_t error;
  619. if (!entry->show)
  620. return -EIO;
  621. e = container_of(kobj, struct elevator_queue, kobj);
  622. mutex_lock(&e->sysfs_lock);
  623. error = e->type ? entry->show(e, page) : -ENOENT;
  624. mutex_unlock(&e->sysfs_lock);
  625. return error;
  626. }
  627. static ssize_t
  628. elv_attr_store(struct kobject *kobj, struct attribute *attr,
  629. const char *page, size_t length)
  630. {
  631. struct elv_fs_entry *entry = to_elv(attr);
  632. struct elevator_queue *e;
  633. ssize_t error;
  634. if (!entry->store)
  635. return -EIO;
  636. e = container_of(kobj, struct elevator_queue, kobj);
  637. mutex_lock(&e->sysfs_lock);
  638. error = e->type ? entry->store(e, page, length) : -ENOENT;
  639. mutex_unlock(&e->sysfs_lock);
  640. return error;
  641. }
  642. static const struct sysfs_ops elv_sysfs_ops = {
  643. .show = elv_attr_show,
  644. .store = elv_attr_store,
  645. };
  646. static struct kobj_type elv_ktype = {
  647. .sysfs_ops = &elv_sysfs_ops,
  648. .release = elevator_release,
  649. };
  650. int elv_register_queue(struct request_queue *q)
  651. {
  652. struct elevator_queue *e = q->elevator;
  653. int error;
  654. error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
  655. if (!error) {
  656. struct elv_fs_entry *attr = e->type->elevator_attrs;
  657. if (attr) {
  658. while (attr->attr.name) {
  659. if (sysfs_create_file(&e->kobj, &attr->attr))
  660. break;
  661. attr++;
  662. }
  663. }
  664. kobject_uevent(&e->kobj, KOBJ_ADD);
  665. e->registered = 1;
  666. }
  667. return error;
  668. }
  669. EXPORT_SYMBOL(elv_register_queue);
  670. void elv_unregister_queue(struct request_queue *q)
  671. {
  672. if (q) {
  673. struct elevator_queue *e = q->elevator;
  674. kobject_uevent(&e->kobj, KOBJ_REMOVE);
  675. kobject_del(&e->kobj);
  676. e->registered = 0;
  677. }
  678. }
  679. EXPORT_SYMBOL(elv_unregister_queue);
  680. int elv_register(struct elevator_type *e)
  681. {
  682. char *def = "";
  683. /* create icq_cache if requested */
  684. if (e->icq_size) {
  685. if (WARN_ON(e->icq_size < sizeof(struct io_cq)) ||
  686. WARN_ON(e->icq_align < __alignof__(struct io_cq)))
  687. return -EINVAL;
  688. snprintf(e->icq_cache_name, sizeof(e->icq_cache_name),
  689. "%s_io_cq", e->elevator_name);
  690. e->icq_cache = kmem_cache_create(e->icq_cache_name, e->icq_size,
  691. e->icq_align, 0, NULL);
  692. if (!e->icq_cache)
  693. return -ENOMEM;
  694. }
  695. /* register, don't allow duplicate names */
  696. spin_lock(&elv_list_lock);
  697. if (elevator_find(e->elevator_name)) {
  698. spin_unlock(&elv_list_lock);
  699. if (e->icq_cache)
  700. kmem_cache_destroy(e->icq_cache);
  701. return -EBUSY;
  702. }
  703. list_add_tail(&e->list, &elv_list);
  704. spin_unlock(&elv_list_lock);
  705. /* print pretty message */
  706. if (!strcmp(e->elevator_name, chosen_elevator) ||
  707. (!*chosen_elevator &&
  708. !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED)))
  709. def = " (default)";
  710. printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name,
  711. def);
  712. return 0;
  713. }
  714. EXPORT_SYMBOL_GPL(elv_register);
  715. void elv_unregister(struct elevator_type *e)
  716. {
  717. /* unregister */
  718. spin_lock(&elv_list_lock);
  719. list_del_init(&e->list);
  720. spin_unlock(&elv_list_lock);
  721. /*
  722. * Destroy icq_cache if it exists. icq's are RCU managed. Make
  723. * sure all RCU operations are complete before proceeding.
  724. */
  725. if (e->icq_cache) {
  726. rcu_barrier();
  727. kmem_cache_destroy(e->icq_cache);
  728. e->icq_cache = NULL;
  729. }
  730. }
  731. EXPORT_SYMBOL_GPL(elv_unregister);
  732. /*
  733. * switch to new_e io scheduler. be careful not to introduce deadlocks -
  734. * we don't free the old io scheduler, before we have allocated what we
  735. * need for the new one. this way we have a chance of going back to the old
  736. * one, if the new one fails init for some reason.
  737. */
  738. static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
  739. {
  740. struct elevator_queue *old = q->elevator;
  741. bool registered = old->registered;
  742. int err;
  743. /*
  744. * Turn on BYPASS and drain all requests w/ elevator private data.
  745. * Block layer doesn't call into a quiesced elevator - all requests
  746. * are directly put on the dispatch list without elevator data
  747. * using INSERT_BACK. All requests have SOFTBARRIER set and no
  748. * merge happens either.
  749. */
  750. elv_quiesce_start(q);
  751. /* unregister and clear all auxiliary data of the old elevator */
  752. if (registered)
  753. elv_unregister_queue(q);
  754. spin_lock_irq(q->queue_lock);
  755. ioc_clear_queue(q);
  756. spin_unlock_irq(q->queue_lock);
  757. /* allocate, init and register new elevator */
  758. err = -ENOMEM;
  759. q->elevator = elevator_alloc(q, new_e);
  760. if (!q->elevator)
  761. goto fail_init;
  762. err = new_e->ops.elevator_init_fn(q);
  763. if (err) {
  764. kobject_put(&q->elevator->kobj);
  765. goto fail_init;
  766. }
  767. if (registered) {
  768. err = elv_register_queue(q);
  769. if (err)
  770. goto fail_register;
  771. }
  772. /* done, kill the old one and finish */
  773. elevator_exit(old);
  774. elv_quiesce_end(q);
  775. blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
  776. return 0;
  777. fail_register:
  778. elevator_exit(q->elevator);
  779. fail_init:
  780. /* switch failed, restore and re-register old elevator */
  781. q->elevator = old;
  782. elv_register_queue(q);
  783. elv_quiesce_end(q);
  784. return err;
  785. }
  786. /*
  787. * Switch this queue to the given IO scheduler.
  788. */
  789. int elevator_change(struct request_queue *q, const char *name)
  790. {
  791. char elevator_name[ELV_NAME_MAX];
  792. struct elevator_type *e;
  793. if (!q->elevator)
  794. return -ENXIO;
  795. strlcpy(elevator_name, name, sizeof(elevator_name));
  796. e = elevator_get(strstrip(elevator_name));
  797. if (!e) {
  798. printk(KERN_ERR "elevator: type %s not found\n", elevator_name);
  799. return -EINVAL;
  800. }
  801. if (!strcmp(elevator_name, q->elevator->type->elevator_name)) {
  802. elevator_put(e);
  803. return 0;
  804. }
  805. return elevator_switch(q, e);
  806. }
  807. EXPORT_SYMBOL(elevator_change);
  808. ssize_t elv_iosched_store(struct request_queue *q, const char *name,
  809. size_t count)
  810. {
  811. int ret;
  812. if (!q->elevator)
  813. return count;
  814. ret = elevator_change(q, name);
  815. if (!ret)
  816. return count;
  817. printk(KERN_ERR "elevator: switch to %s failed\n", name);
  818. return ret;
  819. }
  820. ssize_t elv_iosched_show(struct request_queue *q, char *name)
  821. {
  822. struct elevator_queue *e = q->elevator;
  823. struct elevator_type *elv;
  824. struct elevator_type *__e;
  825. int len = 0;
  826. if (!q->elevator || !blk_queue_stackable(q))
  827. return sprintf(name, "none\n");
  828. elv = e->type;
  829. spin_lock(&elv_list_lock);
  830. list_for_each_entry(__e, &elv_list, list) {
  831. if (!strcmp(elv->elevator_name, __e->elevator_name))
  832. len += sprintf(name+len, "[%s] ", elv->elevator_name);
  833. else
  834. len += sprintf(name+len, "%s ", __e->elevator_name);
  835. }
  836. spin_unlock(&elv_list_lock);
  837. len += sprintf(len+name, "\n");
  838. return len;
  839. }
  840. struct request *elv_rb_former_request(struct request_queue *q,
  841. struct request *rq)
  842. {
  843. struct rb_node *rbprev = rb_prev(&rq->rb_node);
  844. if (rbprev)
  845. return rb_entry_rq(rbprev);
  846. return NULL;
  847. }
  848. EXPORT_SYMBOL(elv_rb_former_request);
  849. struct request *elv_rb_latter_request(struct request_queue *q,
  850. struct request *rq)
  851. {
  852. struct rb_node *rbnext = rb_next(&rq->rb_node);
  853. if (rbnext)
  854. return rb_entry_rq(rbnext);
  855. return NULL;
  856. }
  857. EXPORT_SYMBOL(elv_rb_latter_request);