cfq-iosched.c 59 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505
  1. /*
  2. * CFQ, or complete fairness queueing, disk scheduler.
  3. *
  4. * Based on ideas from a previously unfinished io
  5. * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  6. *
  7. * Copyright (C) 2003 Jens Axboe <axboe@suse.de>
  8. */
  9. #include <linux/config.h>
  10. #include <linux/module.h>
  11. #include <linux/blkdev.h>
  12. #include <linux/elevator.h>
  13. #include <linux/hash.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/ioprio.h>
  16. /*
  17. * tunables
  18. */
  19. static const int cfq_quantum = 4; /* max queue in one round of service */
  20. static const int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
  21. static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  22. static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
  23. static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
  24. static const int cfq_slice_sync = HZ / 10;
  25. static int cfq_slice_async = HZ / 25;
  26. static const int cfq_slice_async_rq = 2;
  27. static int cfq_slice_idle = HZ / 70;
  28. #define CFQ_IDLE_GRACE (HZ / 10)
  29. #define CFQ_SLICE_SCALE (5)
  30. #define CFQ_KEY_ASYNC (0)
  31. static DEFINE_SPINLOCK(cfq_exit_lock);
  32. /*
  33. * for the hash of cfqq inside the cfqd
  34. */
  35. #define CFQ_QHASH_SHIFT 6
  36. #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
  37. #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
  38. /*
  39. * for the hash of crq inside the cfqq
  40. */
  41. #define CFQ_MHASH_SHIFT 6
  42. #define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
  43. #define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
  44. #define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
  45. #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
  46. #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
  47. #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
  48. #define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
  49. #define RQ_DATA(rq) (rq)->elevator_private
  50. /*
  51. * rb-tree defines
  52. */
  53. #define RB_NONE (2)
  54. #define RB_EMPTY(node) ((node)->rb_node == NULL)
  55. #define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE
  56. #define RB_CLEAR(node) do { \
  57. (node)->rb_parent = NULL; \
  58. RB_CLEAR_COLOR((node)); \
  59. (node)->rb_right = NULL; \
  60. (node)->rb_left = NULL; \
  61. } while (0)
  62. #define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
  63. #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
  64. #define rq_rb_key(rq) (rq)->sector
  65. static kmem_cache_t *crq_pool;
  66. static kmem_cache_t *cfq_pool;
  67. static kmem_cache_t *cfq_ioc_pool;
  68. static atomic_t ioc_count = ATOMIC_INIT(0);
  69. static struct completion *ioc_gone;
  70. #define CFQ_PRIO_LISTS IOPRIO_BE_NR
  71. #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  72. #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
  73. #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  74. #define ASYNC (0)
  75. #define SYNC (1)
  76. #define cfq_cfqq_dispatched(cfqq) \
  77. ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
  78. #define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC)
  79. #define cfq_cfqq_sync(cfqq) \
  80. (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
  81. #define sample_valid(samples) ((samples) > 80)
  82. /*
  83. * Per block device queue structure
  84. */
  85. struct cfq_data {
  86. request_queue_t *queue;
  87. /*
  88. * rr list of queues with requests and the count of them
  89. */
  90. struct list_head rr_list[CFQ_PRIO_LISTS];
  91. struct list_head busy_rr;
  92. struct list_head cur_rr;
  93. struct list_head idle_rr;
  94. unsigned int busy_queues;
  95. /*
  96. * non-ordered list of empty cfqq's
  97. */
  98. struct list_head empty_list;
  99. /*
  100. * cfqq lookup hash
  101. */
  102. struct hlist_head *cfq_hash;
  103. /*
  104. * global crq hash for all queues
  105. */
  106. struct hlist_head *crq_hash;
  107. unsigned int max_queued;
  108. mempool_t *crq_pool;
  109. int rq_in_driver;
  110. int hw_tag;
  111. /*
  112. * schedule slice state info
  113. */
  114. /*
  115. * idle window management
  116. */
  117. struct timer_list idle_slice_timer;
  118. struct work_struct unplug_work;
  119. struct cfq_queue *active_queue;
  120. struct cfq_io_context *active_cic;
  121. int cur_prio, cur_end_prio;
  122. unsigned int dispatch_slice;
  123. struct timer_list idle_class_timer;
  124. sector_t last_sector;
  125. unsigned long last_end_request;
  126. unsigned int rq_starved;
  127. /*
  128. * tunables, see top of file
  129. */
  130. unsigned int cfq_quantum;
  131. unsigned int cfq_queued;
  132. unsigned int cfq_fifo_expire[2];
  133. unsigned int cfq_back_penalty;
  134. unsigned int cfq_back_max;
  135. unsigned int cfq_slice[2];
  136. unsigned int cfq_slice_async_rq;
  137. unsigned int cfq_slice_idle;
  138. struct list_head cic_list;
  139. };
  140. /*
  141. * Per process-grouping structure
  142. */
  143. struct cfq_queue {
  144. /* reference count */
  145. atomic_t ref;
  146. /* parent cfq_data */
  147. struct cfq_data *cfqd;
  148. /* cfqq lookup hash */
  149. struct hlist_node cfq_hash;
  150. /* hash key */
  151. unsigned int key;
  152. /* on either rr or empty list of cfqd */
  153. struct list_head cfq_list;
  154. /* sorted list of pending requests */
  155. struct rb_root sort_list;
  156. /* if fifo isn't expired, next request to serve */
  157. struct cfq_rq *next_crq;
  158. /* requests queued in sort_list */
  159. int queued[2];
  160. /* currently allocated requests */
  161. int allocated[2];
  162. /* fifo list of requests in sort_list */
  163. struct list_head fifo;
  164. unsigned long slice_start;
  165. unsigned long slice_end;
  166. unsigned long slice_left;
  167. unsigned long service_last;
  168. /* number of requests that are on the dispatch list */
  169. int on_dispatch[2];
  170. /* io prio of this group */
  171. unsigned short ioprio, org_ioprio;
  172. unsigned short ioprio_class, org_ioprio_class;
  173. /* various state flags, see below */
  174. unsigned int flags;
  175. };
  176. struct cfq_rq {
  177. struct rb_node rb_node;
  178. sector_t rb_key;
  179. struct request *request;
  180. struct hlist_node hash;
  181. struct cfq_queue *cfq_queue;
  182. struct cfq_io_context *io_context;
  183. unsigned int crq_flags;
  184. };
  185. enum cfqq_state_flags {
  186. CFQ_CFQQ_FLAG_on_rr = 0,
  187. CFQ_CFQQ_FLAG_wait_request,
  188. CFQ_CFQQ_FLAG_must_alloc,
  189. CFQ_CFQQ_FLAG_must_alloc_slice,
  190. CFQ_CFQQ_FLAG_must_dispatch,
  191. CFQ_CFQQ_FLAG_fifo_expire,
  192. CFQ_CFQQ_FLAG_idle_window,
  193. CFQ_CFQQ_FLAG_prio_changed,
  194. };
  195. #define CFQ_CFQQ_FNS(name) \
  196. static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
  197. { \
  198. cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
  199. } \
  200. static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
  201. { \
  202. cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
  203. } \
  204. static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
  205. { \
  206. return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
  207. }
  208. CFQ_CFQQ_FNS(on_rr);
  209. CFQ_CFQQ_FNS(wait_request);
  210. CFQ_CFQQ_FNS(must_alloc);
  211. CFQ_CFQQ_FNS(must_alloc_slice);
  212. CFQ_CFQQ_FNS(must_dispatch);
  213. CFQ_CFQQ_FNS(fifo_expire);
  214. CFQ_CFQQ_FNS(idle_window);
  215. CFQ_CFQQ_FNS(prio_changed);
  216. #undef CFQ_CFQQ_FNS
  217. enum cfq_rq_state_flags {
  218. CFQ_CRQ_FLAG_is_sync = 0,
  219. };
  220. #define CFQ_CRQ_FNS(name) \
  221. static inline void cfq_mark_crq_##name(struct cfq_rq *crq) \
  222. { \
  223. crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name); \
  224. } \
  225. static inline void cfq_clear_crq_##name(struct cfq_rq *crq) \
  226. { \
  227. crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name); \
  228. } \
  229. static inline int cfq_crq_##name(const struct cfq_rq *crq) \
  230. { \
  231. return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \
  232. }
  233. CFQ_CRQ_FNS(is_sync);
  234. #undef CFQ_CRQ_FNS
  235. static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
  236. static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
  237. static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
  238. #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
  239. /*
  240. * lots of deadline iosched dupes, can be abstracted later...
  241. */
  242. static inline void cfq_del_crq_hash(struct cfq_rq *crq)
  243. {
  244. hlist_del_init(&crq->hash);
  245. }
  246. static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
  247. {
  248. const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
  249. hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
  250. }
  251. static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
  252. {
  253. struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
  254. struct hlist_node *entry, *next;
  255. hlist_for_each_safe(entry, next, hash_list) {
  256. struct cfq_rq *crq = list_entry_hash(entry);
  257. struct request *__rq = crq->request;
  258. if (!rq_mergeable(__rq)) {
  259. cfq_del_crq_hash(crq);
  260. continue;
  261. }
  262. if (rq_hash_key(__rq) == offset)
  263. return __rq;
  264. }
  265. return NULL;
  266. }
  267. /*
  268. * scheduler run of queue, if there are requests pending and no one in the
  269. * driver that will restart queueing
  270. */
  271. static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
  272. {
  273. if (cfqd->busy_queues)
  274. kblockd_schedule_work(&cfqd->unplug_work);
  275. }
  276. static int cfq_queue_empty(request_queue_t *q)
  277. {
  278. struct cfq_data *cfqd = q->elevator->elevator_data;
  279. return !cfqd->busy_queues;
  280. }
  281. static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
  282. {
  283. if (rw == READ || process_sync(task))
  284. return task->pid;
  285. return CFQ_KEY_ASYNC;
  286. }
  287. /*
  288. * Lifted from AS - choose which of crq1 and crq2 that is best served now.
  289. * We choose the request that is closest to the head right now. Distance
  290. * behind the head is penalized and only allowed to a certain extent.
  291. */
  292. static struct cfq_rq *
  293. cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
  294. {
  295. sector_t last, s1, s2, d1 = 0, d2 = 0;
  296. unsigned long back_max;
  297. #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  298. #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  299. unsigned wrap = 0; /* bit mask: requests behind the disk head? */
  300. if (crq1 == NULL || crq1 == crq2)
  301. return crq2;
  302. if (crq2 == NULL)
  303. return crq1;
  304. if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
  305. return crq1;
  306. else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
  307. return crq2;
  308. s1 = crq1->request->sector;
  309. s2 = crq2->request->sector;
  310. last = cfqd->last_sector;
  311. /*
  312. * by definition, 1KiB is 2 sectors
  313. */
  314. back_max = cfqd->cfq_back_max * 2;
  315. /*
  316. * Strict one way elevator _except_ in the case where we allow
  317. * short backward seeks which are biased as twice the cost of a
  318. * similar forward seek.
  319. */
  320. if (s1 >= last)
  321. d1 = s1 - last;
  322. else if (s1 + back_max >= last)
  323. d1 = (last - s1) * cfqd->cfq_back_penalty;
  324. else
  325. wrap |= CFQ_RQ1_WRAP;
  326. if (s2 >= last)
  327. d2 = s2 - last;
  328. else if (s2 + back_max >= last)
  329. d2 = (last - s2) * cfqd->cfq_back_penalty;
  330. else
  331. wrap |= CFQ_RQ2_WRAP;
  332. /* Found required data */
  333. /*
  334. * By doing switch() on the bit mask "wrap" we avoid having to
  335. * check two variables for all permutations: --> faster!
  336. */
  337. switch (wrap) {
  338. case 0: /* common case for CFQ: crq1 and crq2 not wrapped */
  339. if (d1 < d2)
  340. return crq1;
  341. else if (d2 < d1)
  342. return crq2;
  343. else {
  344. if (s1 >= s2)
  345. return crq1;
  346. else
  347. return crq2;
  348. }
  349. case CFQ_RQ2_WRAP:
  350. return crq1;
  351. case CFQ_RQ1_WRAP:
  352. return crq2;
  353. case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both crqs wrapped */
  354. default:
  355. /*
  356. * Since both rqs are wrapped,
  357. * start with the one that's further behind head
  358. * (--> only *one* back seek required),
  359. * since back seek takes more time than forward.
  360. */
  361. if (s1 <= s2)
  362. return crq1;
  363. else
  364. return crq2;
  365. }
  366. }
  367. /*
  368. * would be nice to take fifo expire time into account as well
  369. */
  370. static struct cfq_rq *
  371. cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  372. struct cfq_rq *last)
  373. {
  374. struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
  375. struct rb_node *rbnext, *rbprev;
  376. if (!(rbnext = rb_next(&last->rb_node))) {
  377. rbnext = rb_first(&cfqq->sort_list);
  378. if (rbnext == &last->rb_node)
  379. rbnext = NULL;
  380. }
  381. rbprev = rb_prev(&last->rb_node);
  382. if (rbprev)
  383. crq_prev = rb_entry_crq(rbprev);
  384. if (rbnext)
  385. crq_next = rb_entry_crq(rbnext);
  386. return cfq_choose_req(cfqd, crq_next, crq_prev);
  387. }
  388. static void cfq_update_next_crq(struct cfq_rq *crq)
  389. {
  390. struct cfq_queue *cfqq = crq->cfq_queue;
  391. if (cfqq->next_crq == crq)
  392. cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
  393. }
  394. static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
  395. {
  396. struct cfq_data *cfqd = cfqq->cfqd;
  397. struct list_head *list, *entry;
  398. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  399. list_del(&cfqq->cfq_list);
  400. if (cfq_class_rt(cfqq))
  401. list = &cfqd->cur_rr;
  402. else if (cfq_class_idle(cfqq))
  403. list = &cfqd->idle_rr;
  404. else {
  405. /*
  406. * if cfqq has requests in flight, don't allow it to be
  407. * found in cfq_set_active_queue before it has finished them.
  408. * this is done to increase fairness between a process that
  409. * has lots of io pending vs one that only generates one
  410. * sporadically or synchronously
  411. */
  412. if (cfq_cfqq_dispatched(cfqq))
  413. list = &cfqd->busy_rr;
  414. else
  415. list = &cfqd->rr_list[cfqq->ioprio];
  416. }
  417. /*
  418. * if queue was preempted, just add to front to be fair. busy_rr
  419. * isn't sorted, but insert at the back for fairness.
  420. */
  421. if (preempted || list == &cfqd->busy_rr) {
  422. if (preempted)
  423. list = list->prev;
  424. list_add_tail(&cfqq->cfq_list, list);
  425. return;
  426. }
  427. /*
  428. * sort by when queue was last serviced
  429. */
  430. entry = list;
  431. while ((entry = entry->prev) != list) {
  432. struct cfq_queue *__cfqq = list_entry_cfqq(entry);
  433. if (!__cfqq->service_last)
  434. break;
  435. if (time_before(__cfqq->service_last, cfqq->service_last))
  436. break;
  437. }
  438. list_add(&cfqq->cfq_list, entry);
  439. }
  440. /*
  441. * add to busy list of queues for service, trying to be fair in ordering
  442. * the pending list according to last request service
  443. */
  444. static inline void
  445. cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  446. {
  447. BUG_ON(cfq_cfqq_on_rr(cfqq));
  448. cfq_mark_cfqq_on_rr(cfqq);
  449. cfqd->busy_queues++;
  450. cfq_resort_rr_list(cfqq, 0);
  451. }
  452. static inline void
  453. cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  454. {
  455. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  456. cfq_clear_cfqq_on_rr(cfqq);
  457. list_move(&cfqq->cfq_list, &cfqd->empty_list);
  458. BUG_ON(!cfqd->busy_queues);
  459. cfqd->busy_queues--;
  460. }
  461. /*
  462. * rb tree support functions
  463. */
  464. static inline void cfq_del_crq_rb(struct cfq_rq *crq)
  465. {
  466. struct cfq_queue *cfqq = crq->cfq_queue;
  467. struct cfq_data *cfqd = cfqq->cfqd;
  468. const int sync = cfq_crq_is_sync(crq);
  469. BUG_ON(!cfqq->queued[sync]);
  470. cfqq->queued[sync]--;
  471. cfq_update_next_crq(crq);
  472. rb_erase(&crq->rb_node, &cfqq->sort_list);
  473. RB_CLEAR_COLOR(&crq->rb_node);
  474. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
  475. cfq_del_cfqq_rr(cfqd, cfqq);
  476. }
  477. static struct cfq_rq *
  478. __cfq_add_crq_rb(struct cfq_rq *crq)
  479. {
  480. struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
  481. struct rb_node *parent = NULL;
  482. struct cfq_rq *__crq;
  483. while (*p) {
  484. parent = *p;
  485. __crq = rb_entry_crq(parent);
  486. if (crq->rb_key < __crq->rb_key)
  487. p = &(*p)->rb_left;
  488. else if (crq->rb_key > __crq->rb_key)
  489. p = &(*p)->rb_right;
  490. else
  491. return __crq;
  492. }
  493. rb_link_node(&crq->rb_node, parent, p);
  494. return NULL;
  495. }
  496. static void cfq_add_crq_rb(struct cfq_rq *crq)
  497. {
  498. struct cfq_queue *cfqq = crq->cfq_queue;
  499. struct cfq_data *cfqd = cfqq->cfqd;
  500. struct request *rq = crq->request;
  501. struct cfq_rq *__alias;
  502. crq->rb_key = rq_rb_key(rq);
  503. cfqq->queued[cfq_crq_is_sync(crq)]++;
  504. /*
  505. * looks a little odd, but the first insert might return an alias.
  506. * if that happens, put the alias on the dispatch list
  507. */
  508. while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
  509. cfq_dispatch_insert(cfqd->queue, __alias);
  510. rb_insert_color(&crq->rb_node, &cfqq->sort_list);
  511. if (!cfq_cfqq_on_rr(cfqq))
  512. cfq_add_cfqq_rr(cfqd, cfqq);
  513. /*
  514. * check if this request is a better next-serve candidate
  515. */
  516. cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
  517. }
  518. static inline void
  519. cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
  520. {
  521. rb_erase(&crq->rb_node, &cfqq->sort_list);
  522. cfqq->queued[cfq_crq_is_sync(crq)]--;
  523. cfq_add_crq_rb(crq);
  524. }
  525. static struct request *
  526. cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
  527. {
  528. struct task_struct *tsk = current;
  529. pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio));
  530. struct cfq_queue *cfqq;
  531. struct rb_node *n;
  532. sector_t sector;
  533. cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
  534. if (!cfqq)
  535. goto out;
  536. sector = bio->bi_sector + bio_sectors(bio);
  537. n = cfqq->sort_list.rb_node;
  538. while (n) {
  539. struct cfq_rq *crq = rb_entry_crq(n);
  540. if (sector < crq->rb_key)
  541. n = n->rb_left;
  542. else if (sector > crq->rb_key)
  543. n = n->rb_right;
  544. else
  545. return crq->request;
  546. }
  547. out:
  548. return NULL;
  549. }
  550. static void cfq_activate_request(request_queue_t *q, struct request *rq)
  551. {
  552. struct cfq_data *cfqd = q->elevator->elevator_data;
  553. cfqd->rq_in_driver++;
  554. /*
  555. * If the depth is larger 1, it really could be queueing. But lets
  556. * make the mark a little higher - idling could still be good for
  557. * low queueing, and a low queueing number could also just indicate
  558. * a SCSI mid layer like behaviour where limit+1 is often seen.
  559. */
  560. if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
  561. cfqd->hw_tag = 1;
  562. }
  563. static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
  564. {
  565. struct cfq_data *cfqd = q->elevator->elevator_data;
  566. WARN_ON(!cfqd->rq_in_driver);
  567. cfqd->rq_in_driver--;
  568. }
  569. static void cfq_remove_request(struct request *rq)
  570. {
  571. struct cfq_rq *crq = RQ_DATA(rq);
  572. list_del_init(&rq->queuelist);
  573. cfq_del_crq_rb(crq);
  574. cfq_del_crq_hash(crq);
  575. }
  576. static int
  577. cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
  578. {
  579. struct cfq_data *cfqd = q->elevator->elevator_data;
  580. struct request *__rq;
  581. int ret;
  582. __rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
  583. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  584. ret = ELEVATOR_BACK_MERGE;
  585. goto out;
  586. }
  587. __rq = cfq_find_rq_fmerge(cfqd, bio);
  588. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  589. ret = ELEVATOR_FRONT_MERGE;
  590. goto out;
  591. }
  592. return ELEVATOR_NO_MERGE;
  593. out:
  594. *req = __rq;
  595. return ret;
  596. }
  597. static void cfq_merged_request(request_queue_t *q, struct request *req)
  598. {
  599. struct cfq_data *cfqd = q->elevator->elevator_data;
  600. struct cfq_rq *crq = RQ_DATA(req);
  601. cfq_del_crq_hash(crq);
  602. cfq_add_crq_hash(cfqd, crq);
  603. if (rq_rb_key(req) != crq->rb_key) {
  604. struct cfq_queue *cfqq = crq->cfq_queue;
  605. cfq_update_next_crq(crq);
  606. cfq_reposition_crq_rb(cfqq, crq);
  607. }
  608. }
  609. static void
  610. cfq_merged_requests(request_queue_t *q, struct request *rq,
  611. struct request *next)
  612. {
  613. cfq_merged_request(q, rq);
  614. /*
  615. * reposition in fifo if next is older than rq
  616. */
  617. if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  618. time_before(next->start_time, rq->start_time))
  619. list_move(&rq->queuelist, &next->queuelist);
  620. cfq_remove_request(next);
  621. }
  622. static inline void
  623. __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  624. {
  625. if (cfqq) {
  626. /*
  627. * stop potential idle class queues waiting service
  628. */
  629. del_timer(&cfqd->idle_class_timer);
  630. cfqq->slice_start = jiffies;
  631. cfqq->slice_end = 0;
  632. cfqq->slice_left = 0;
  633. cfq_clear_cfqq_must_alloc_slice(cfqq);
  634. cfq_clear_cfqq_fifo_expire(cfqq);
  635. }
  636. cfqd->active_queue = cfqq;
  637. }
  638. /*
  639. * current cfqq expired its slice (or was too idle), select new one
  640. */
  641. static void
  642. __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  643. int preempted)
  644. {
  645. unsigned long now = jiffies;
  646. if (cfq_cfqq_wait_request(cfqq))
  647. del_timer(&cfqd->idle_slice_timer);
  648. if (!preempted && !cfq_cfqq_dispatched(cfqq)) {
  649. cfqq->service_last = now;
  650. cfq_schedule_dispatch(cfqd);
  651. }
  652. cfq_clear_cfqq_must_dispatch(cfqq);
  653. cfq_clear_cfqq_wait_request(cfqq);
  654. /*
  655. * store what was left of this slice, if the queue idled out
  656. * or was preempted
  657. */
  658. if (time_after(cfqq->slice_end, now))
  659. cfqq->slice_left = cfqq->slice_end - now;
  660. else
  661. cfqq->slice_left = 0;
  662. if (cfq_cfqq_on_rr(cfqq))
  663. cfq_resort_rr_list(cfqq, preempted);
  664. if (cfqq == cfqd->active_queue)
  665. cfqd->active_queue = NULL;
  666. if (cfqd->active_cic) {
  667. put_io_context(cfqd->active_cic->ioc);
  668. cfqd->active_cic = NULL;
  669. }
  670. cfqd->dispatch_slice = 0;
  671. }
  672. static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
  673. {
  674. struct cfq_queue *cfqq = cfqd->active_queue;
  675. if (cfqq)
  676. __cfq_slice_expired(cfqd, cfqq, preempted);
  677. }
  678. /*
  679. * 0
  680. * 0,1
  681. * 0,1,2
  682. * 0,1,2,3
  683. * 0,1,2,3,4
  684. * 0,1,2,3,4,5
  685. * 0,1,2,3,4,5,6
  686. * 0,1,2,3,4,5,6,7
  687. */
  688. static int cfq_get_next_prio_level(struct cfq_data *cfqd)
  689. {
  690. int prio, wrap;
  691. prio = -1;
  692. wrap = 0;
  693. do {
  694. int p;
  695. for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
  696. if (!list_empty(&cfqd->rr_list[p])) {
  697. prio = p;
  698. break;
  699. }
  700. }
  701. if (prio != -1)
  702. break;
  703. cfqd->cur_prio = 0;
  704. if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
  705. cfqd->cur_end_prio = 0;
  706. if (wrap)
  707. break;
  708. wrap = 1;
  709. }
  710. } while (1);
  711. if (unlikely(prio == -1))
  712. return -1;
  713. BUG_ON(prio >= CFQ_PRIO_LISTS);
  714. list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
  715. cfqd->cur_prio = prio + 1;
  716. if (cfqd->cur_prio > cfqd->cur_end_prio) {
  717. cfqd->cur_end_prio = cfqd->cur_prio;
  718. cfqd->cur_prio = 0;
  719. }
  720. if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
  721. cfqd->cur_prio = 0;
  722. cfqd->cur_end_prio = 0;
  723. }
  724. return prio;
  725. }
  726. static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
  727. {
  728. struct cfq_queue *cfqq = NULL;
  729. /*
  730. * if current list is non-empty, grab first entry. if it is empty,
  731. * get next prio level and grab first entry then if any are spliced
  732. */
  733. if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
  734. cfqq = list_entry_cfqq(cfqd->cur_rr.next);
  735. /*
  736. * If no new queues are available, check if the busy list has some
  737. * before falling back to idle io.
  738. */
  739. if (!cfqq && !list_empty(&cfqd->busy_rr))
  740. cfqq = list_entry_cfqq(cfqd->busy_rr.next);
  741. /*
  742. * if we have idle queues and no rt or be queues had pending
  743. * requests, either allow immediate service if the grace period
  744. * has passed or arm the idle grace timer
  745. */
  746. if (!cfqq && !list_empty(&cfqd->idle_rr)) {
  747. unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
  748. if (time_after_eq(jiffies, end))
  749. cfqq = list_entry_cfqq(cfqd->idle_rr.next);
  750. else
  751. mod_timer(&cfqd->idle_class_timer, end);
  752. }
  753. __cfq_set_active_queue(cfqd, cfqq);
  754. return cfqq;
  755. }
  756. static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  757. {
  758. struct cfq_io_context *cic;
  759. unsigned long sl;
  760. WARN_ON(!RB_EMPTY(&cfqq->sort_list));
  761. WARN_ON(cfqq != cfqd->active_queue);
  762. /*
  763. * idle is disabled, either manually or by past process history
  764. */
  765. if (!cfqd->cfq_slice_idle)
  766. return 0;
  767. if (!cfq_cfqq_idle_window(cfqq))
  768. return 0;
  769. /*
  770. * task has exited, don't wait
  771. */
  772. cic = cfqd->active_cic;
  773. if (!cic || !cic->ioc->task)
  774. return 0;
  775. cfq_mark_cfqq_must_dispatch(cfqq);
  776. cfq_mark_cfqq_wait_request(cfqq);
  777. sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
  778. /*
  779. * we don't want to idle for seeks, but we do want to allow
  780. * fair distribution of slice time for a process doing back-to-back
  781. * seeks. so allow a little bit of time for him to submit a new rq
  782. */
  783. if (sample_valid(cic->seek_samples) && cic->seek_mean > 131072)
  784. sl = 2;
  785. mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
  786. return 1;
  787. }
  788. static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
  789. {
  790. struct cfq_data *cfqd = q->elevator->elevator_data;
  791. struct cfq_queue *cfqq = crq->cfq_queue;
  792. cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
  793. cfq_remove_request(crq->request);
  794. cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
  795. elv_dispatch_sort(q, crq->request);
  796. }
  797. /*
  798. * return expired entry, or NULL to just start from scratch in rbtree
  799. */
  800. static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
  801. {
  802. struct cfq_data *cfqd = cfqq->cfqd;
  803. struct request *rq;
  804. struct cfq_rq *crq;
  805. if (cfq_cfqq_fifo_expire(cfqq))
  806. return NULL;
  807. if (!list_empty(&cfqq->fifo)) {
  808. int fifo = cfq_cfqq_class_sync(cfqq);
  809. crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
  810. rq = crq->request;
  811. if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
  812. cfq_mark_cfqq_fifo_expire(cfqq);
  813. return crq;
  814. }
  815. }
  816. return NULL;
  817. }
  818. /*
  819. * Scale schedule slice based on io priority. Use the sync time slice only
  820. * if a queue is marked sync and has sync io queued. A sync queue with async
  821. * io only, should not get full sync slice length.
  822. */
  823. static inline int
  824. cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  825. {
  826. const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
  827. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  828. return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
  829. }
  830. static inline void
  831. cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  832. {
  833. cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
  834. }
  835. static inline int
  836. cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  837. {
  838. const int base_rq = cfqd->cfq_slice_async_rq;
  839. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  840. return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
  841. }
  842. /*
  843. * get next queue for service
  844. */
  845. static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
  846. {
  847. unsigned long now = jiffies;
  848. struct cfq_queue *cfqq;
  849. cfqq = cfqd->active_queue;
  850. if (!cfqq)
  851. goto new_queue;
  852. /*
  853. * slice has expired
  854. */
  855. if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
  856. goto expire;
  857. /*
  858. * if queue has requests, dispatch one. if not, check if
  859. * enough slice is left to wait for one
  860. */
  861. if (!RB_EMPTY(&cfqq->sort_list))
  862. goto keep_queue;
  863. else if (cfq_cfqq_class_sync(cfqq) &&
  864. time_before(now, cfqq->slice_end)) {
  865. if (cfq_arm_slice_timer(cfqd, cfqq))
  866. return NULL;
  867. }
  868. expire:
  869. cfq_slice_expired(cfqd, 0);
  870. new_queue:
  871. cfqq = cfq_set_active_queue(cfqd);
  872. keep_queue:
  873. return cfqq;
  874. }
  875. static int
  876. __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  877. int max_dispatch)
  878. {
  879. int dispatched = 0;
  880. BUG_ON(RB_EMPTY(&cfqq->sort_list));
  881. do {
  882. struct cfq_rq *crq;
  883. /*
  884. * follow expired path, else get first next available
  885. */
  886. if ((crq = cfq_check_fifo(cfqq)) == NULL)
  887. crq = cfqq->next_crq;
  888. /*
  889. * finally, insert request into driver dispatch list
  890. */
  891. cfq_dispatch_insert(cfqd->queue, crq);
  892. cfqd->dispatch_slice++;
  893. dispatched++;
  894. if (!cfqd->active_cic) {
  895. atomic_inc(&crq->io_context->ioc->refcount);
  896. cfqd->active_cic = crq->io_context;
  897. }
  898. if (RB_EMPTY(&cfqq->sort_list))
  899. break;
  900. } while (dispatched < max_dispatch);
  901. /*
  902. * if slice end isn't set yet, set it. if at least one request was
  903. * sync, use the sync time slice value
  904. */
  905. if (!cfqq->slice_end)
  906. cfq_set_prio_slice(cfqd, cfqq);
  907. /*
  908. * expire an async queue immediately if it has used up its slice. idle
  909. * queue always expire after 1 dispatch round.
  910. */
  911. if ((!cfq_cfqq_sync(cfqq) &&
  912. cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  913. cfq_class_idle(cfqq))
  914. cfq_slice_expired(cfqd, 0);
  915. return dispatched;
  916. }
  917. static int
  918. cfq_forced_dispatch_cfqqs(struct list_head *list)
  919. {
  920. int dispatched = 0;
  921. struct cfq_queue *cfqq, *next;
  922. struct cfq_rq *crq;
  923. list_for_each_entry_safe(cfqq, next, list, cfq_list) {
  924. while ((crq = cfqq->next_crq)) {
  925. cfq_dispatch_insert(cfqq->cfqd->queue, crq);
  926. dispatched++;
  927. }
  928. BUG_ON(!list_empty(&cfqq->fifo));
  929. }
  930. return dispatched;
  931. }
  932. static int
  933. cfq_forced_dispatch(struct cfq_data *cfqd)
  934. {
  935. int i, dispatched = 0;
  936. for (i = 0; i < CFQ_PRIO_LISTS; i++)
  937. dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]);
  938. dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr);
  939. dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
  940. dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);
  941. cfq_slice_expired(cfqd, 0);
  942. BUG_ON(cfqd->busy_queues);
  943. return dispatched;
  944. }
  945. static int
  946. cfq_dispatch_requests(request_queue_t *q, int force)
  947. {
  948. struct cfq_data *cfqd = q->elevator->elevator_data;
  949. struct cfq_queue *cfqq;
  950. if (!cfqd->busy_queues)
  951. return 0;
  952. if (unlikely(force))
  953. return cfq_forced_dispatch(cfqd);
  954. cfqq = cfq_select_queue(cfqd);
  955. if (cfqq) {
  956. int max_dispatch;
  957. cfq_clear_cfqq_must_dispatch(cfqq);
  958. cfq_clear_cfqq_wait_request(cfqq);
  959. del_timer(&cfqd->idle_slice_timer);
  960. max_dispatch = cfqd->cfq_quantum;
  961. if (cfq_class_idle(cfqq))
  962. max_dispatch = 1;
  963. return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
  964. }
  965. return 0;
  966. }
  967. /*
  968. * task holds one reference to the queue, dropped when task exits. each crq
  969. * in-flight on this queue also holds a reference, dropped when crq is freed.
  970. *
  971. * queue lock must be held here.
  972. */
  973. static void cfq_put_queue(struct cfq_queue *cfqq)
  974. {
  975. struct cfq_data *cfqd = cfqq->cfqd;
  976. BUG_ON(atomic_read(&cfqq->ref) <= 0);
  977. if (!atomic_dec_and_test(&cfqq->ref))
  978. return;
  979. BUG_ON(rb_first(&cfqq->sort_list));
  980. BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
  981. BUG_ON(cfq_cfqq_on_rr(cfqq));
  982. if (unlikely(cfqd->active_queue == cfqq))
  983. __cfq_slice_expired(cfqd, cfqq, 0);
  984. /*
  985. * it's on the empty list and still hashed
  986. */
  987. list_del(&cfqq->cfq_list);
  988. hlist_del(&cfqq->cfq_hash);
  989. kmem_cache_free(cfq_pool, cfqq);
  990. }
  991. static inline struct cfq_queue *
  992. __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
  993. const int hashval)
  994. {
  995. struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
  996. struct hlist_node *entry;
  997. struct cfq_queue *__cfqq;
  998. hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
  999. const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
  1000. if (__cfqq->key == key && (__p == prio || !prio))
  1001. return __cfqq;
  1002. }
  1003. return NULL;
  1004. }
  1005. static struct cfq_queue *
  1006. cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
  1007. {
  1008. return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
  1009. }
  1010. static void cfq_free_io_context(struct io_context *ioc)
  1011. {
  1012. struct cfq_io_context *__cic;
  1013. struct rb_node *n;
  1014. int freed = 0;
  1015. while ((n = rb_first(&ioc->cic_root)) != NULL) {
  1016. __cic = rb_entry(n, struct cfq_io_context, rb_node);
  1017. rb_erase(&__cic->rb_node, &ioc->cic_root);
  1018. kmem_cache_free(cfq_ioc_pool, __cic);
  1019. freed++;
  1020. }
  1021. if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone)
  1022. complete(ioc_gone);
  1023. }
  1024. static void cfq_trim(struct io_context *ioc)
  1025. {
  1026. ioc->set_ioprio = NULL;
  1027. cfq_free_io_context(ioc);
  1028. }
  1029. /*
  1030. * Called with interrupts disabled
  1031. */
  1032. static void cfq_exit_single_io_context(struct cfq_io_context *cic)
  1033. {
  1034. struct cfq_data *cfqd = cic->key;
  1035. request_queue_t *q;
  1036. if (!cfqd)
  1037. return;
  1038. q = cfqd->queue;
  1039. WARN_ON(!irqs_disabled());
  1040. spin_lock(q->queue_lock);
  1041. if (cic->cfqq[ASYNC]) {
  1042. if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue))
  1043. __cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0);
  1044. cfq_put_queue(cic->cfqq[ASYNC]);
  1045. cic->cfqq[ASYNC] = NULL;
  1046. }
  1047. if (cic->cfqq[SYNC]) {
  1048. if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue))
  1049. __cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0);
  1050. cfq_put_queue(cic->cfqq[SYNC]);
  1051. cic->cfqq[SYNC] = NULL;
  1052. }
  1053. cic->key = NULL;
  1054. list_del_init(&cic->queue_list);
  1055. spin_unlock(q->queue_lock);
  1056. }
  1057. static void cfq_exit_io_context(struct io_context *ioc)
  1058. {
  1059. struct cfq_io_context *__cic;
  1060. unsigned long flags;
  1061. struct rb_node *n;
  1062. /*
  1063. * put the reference this task is holding to the various queues
  1064. */
  1065. spin_lock_irqsave(&cfq_exit_lock, flags);
  1066. n = rb_first(&ioc->cic_root);
  1067. while (n != NULL) {
  1068. __cic = rb_entry(n, struct cfq_io_context, rb_node);
  1069. cfq_exit_single_io_context(__cic);
  1070. n = rb_next(n);
  1071. }
  1072. spin_unlock_irqrestore(&cfq_exit_lock, flags);
  1073. }
  1074. static struct cfq_io_context *
  1075. cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  1076. {
  1077. struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
  1078. if (cic) {
  1079. memset(cic, 0, sizeof(*cic));
  1080. RB_CLEAR_COLOR(&cic->rb_node);
  1081. cic->last_end_request = jiffies;
  1082. INIT_LIST_HEAD(&cic->queue_list);
  1083. cic->dtor = cfq_free_io_context;
  1084. cic->exit = cfq_exit_io_context;
  1085. atomic_inc(&ioc_count);
  1086. }
  1087. return cic;
  1088. }
  1089. static void cfq_init_prio_data(struct cfq_queue *cfqq)
  1090. {
  1091. struct task_struct *tsk = current;
  1092. int ioprio_class;
  1093. if (!cfq_cfqq_prio_changed(cfqq))
  1094. return;
  1095. ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
  1096. switch (ioprio_class) {
  1097. default:
  1098. printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
  1099. case IOPRIO_CLASS_NONE:
  1100. /*
  1101. * no prio set, place us in the middle of the BE classes
  1102. */
  1103. cfqq->ioprio = task_nice_ioprio(tsk);
  1104. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1105. break;
  1106. case IOPRIO_CLASS_RT:
  1107. cfqq->ioprio = task_ioprio(tsk);
  1108. cfqq->ioprio_class = IOPRIO_CLASS_RT;
  1109. break;
  1110. case IOPRIO_CLASS_BE:
  1111. cfqq->ioprio = task_ioprio(tsk);
  1112. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1113. break;
  1114. case IOPRIO_CLASS_IDLE:
  1115. cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  1116. cfqq->ioprio = 7;
  1117. cfq_clear_cfqq_idle_window(cfqq);
  1118. break;
  1119. }
  1120. /*
  1121. * keep track of original prio settings in case we have to temporarily
  1122. * elevate the priority of this queue
  1123. */
  1124. cfqq->org_ioprio = cfqq->ioprio;
  1125. cfqq->org_ioprio_class = cfqq->ioprio_class;
  1126. if (cfq_cfqq_on_rr(cfqq))
  1127. cfq_resort_rr_list(cfqq, 0);
  1128. cfq_clear_cfqq_prio_changed(cfqq);
  1129. }
  1130. static inline void changed_ioprio(struct cfq_io_context *cic)
  1131. {
  1132. struct cfq_data *cfqd = cic->key;
  1133. struct cfq_queue *cfqq;
  1134. if (cfqd) {
  1135. spin_lock(cfqd->queue->queue_lock);
  1136. cfqq = cic->cfqq[ASYNC];
  1137. if (cfqq) {
  1138. struct cfq_queue *new_cfqq;
  1139. new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC,
  1140. cic->ioc->task, GFP_ATOMIC);
  1141. if (new_cfqq) {
  1142. cic->cfqq[ASYNC] = new_cfqq;
  1143. cfq_put_queue(cfqq);
  1144. }
  1145. }
  1146. cfqq = cic->cfqq[SYNC];
  1147. if (cfqq) {
  1148. cfq_mark_cfqq_prio_changed(cfqq);
  1149. cfq_init_prio_data(cfqq);
  1150. }
  1151. spin_unlock(cfqd->queue->queue_lock);
  1152. }
  1153. }
  1154. /*
  1155. * callback from sys_ioprio_set, irqs are disabled
  1156. */
  1157. static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
  1158. {
  1159. struct cfq_io_context *cic;
  1160. struct rb_node *n;
  1161. spin_lock(&cfq_exit_lock);
  1162. n = rb_first(&ioc->cic_root);
  1163. while (n != NULL) {
  1164. cic = rb_entry(n, struct cfq_io_context, rb_node);
  1165. changed_ioprio(cic);
  1166. n = rb_next(n);
  1167. }
  1168. spin_unlock(&cfq_exit_lock);
  1169. return 0;
  1170. }
  1171. static struct cfq_queue *
  1172. cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
  1173. gfp_t gfp_mask)
  1174. {
  1175. const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
  1176. struct cfq_queue *cfqq, *new_cfqq = NULL;
  1177. unsigned short ioprio;
  1178. retry:
  1179. ioprio = tsk->ioprio;
  1180. cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
  1181. if (!cfqq) {
  1182. if (new_cfqq) {
  1183. cfqq = new_cfqq;
  1184. new_cfqq = NULL;
  1185. } else if (gfp_mask & __GFP_WAIT) {
  1186. spin_unlock_irq(cfqd->queue->queue_lock);
  1187. new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
  1188. spin_lock_irq(cfqd->queue->queue_lock);
  1189. goto retry;
  1190. } else {
  1191. cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
  1192. if (!cfqq)
  1193. goto out;
  1194. }
  1195. memset(cfqq, 0, sizeof(*cfqq));
  1196. INIT_HLIST_NODE(&cfqq->cfq_hash);
  1197. INIT_LIST_HEAD(&cfqq->cfq_list);
  1198. RB_CLEAR_ROOT(&cfqq->sort_list);
  1199. INIT_LIST_HEAD(&cfqq->fifo);
  1200. cfqq->key = key;
  1201. hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
  1202. atomic_set(&cfqq->ref, 0);
  1203. cfqq->cfqd = cfqd;
  1204. cfqq->service_last = 0;
  1205. /*
  1206. * set ->slice_left to allow preemption for a new process
  1207. */
  1208. cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
  1209. if (!cfqd->hw_tag)
  1210. cfq_mark_cfqq_idle_window(cfqq);
  1211. cfq_mark_cfqq_prio_changed(cfqq);
  1212. cfq_init_prio_data(cfqq);
  1213. }
  1214. if (new_cfqq)
  1215. kmem_cache_free(cfq_pool, new_cfqq);
  1216. atomic_inc(&cfqq->ref);
  1217. out:
  1218. WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
  1219. return cfqq;
  1220. }
  1221. static void
  1222. cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
  1223. {
  1224. spin_lock(&cfq_exit_lock);
  1225. rb_erase(&cic->rb_node, &ioc->cic_root);
  1226. list_del_init(&cic->queue_list);
  1227. spin_unlock(&cfq_exit_lock);
  1228. kmem_cache_free(cfq_ioc_pool, cic);
  1229. atomic_dec(&ioc_count);
  1230. }
  1231. static struct cfq_io_context *
  1232. cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
  1233. {
  1234. struct rb_node *n;
  1235. struct cfq_io_context *cic;
  1236. void *k, *key = cfqd;
  1237. restart:
  1238. n = ioc->cic_root.rb_node;
  1239. while (n) {
  1240. cic = rb_entry(n, struct cfq_io_context, rb_node);
  1241. /* ->key must be copied to avoid race with cfq_exit_queue() */
  1242. k = cic->key;
  1243. if (unlikely(!k)) {
  1244. cfq_drop_dead_cic(ioc, cic);
  1245. goto restart;
  1246. }
  1247. if (key < k)
  1248. n = n->rb_left;
  1249. else if (key > k)
  1250. n = n->rb_right;
  1251. else
  1252. return cic;
  1253. }
  1254. return NULL;
  1255. }
  1256. static inline void
  1257. cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
  1258. struct cfq_io_context *cic)
  1259. {
  1260. struct rb_node **p;
  1261. struct rb_node *parent;
  1262. struct cfq_io_context *__cic;
  1263. void *k;
  1264. cic->ioc = ioc;
  1265. cic->key = cfqd;
  1266. ioc->set_ioprio = cfq_ioc_set_ioprio;
  1267. restart:
  1268. parent = NULL;
  1269. p = &ioc->cic_root.rb_node;
  1270. while (*p) {
  1271. parent = *p;
  1272. __cic = rb_entry(parent, struct cfq_io_context, rb_node);
  1273. /* ->key must be copied to avoid race with cfq_exit_queue() */
  1274. k = __cic->key;
  1275. if (unlikely(!k)) {
  1276. cfq_drop_dead_cic(ioc, cic);
  1277. goto restart;
  1278. }
  1279. if (cic->key < k)
  1280. p = &(*p)->rb_left;
  1281. else if (cic->key > k)
  1282. p = &(*p)->rb_right;
  1283. else
  1284. BUG();
  1285. }
  1286. spin_lock(&cfq_exit_lock);
  1287. rb_link_node(&cic->rb_node, parent, p);
  1288. rb_insert_color(&cic->rb_node, &ioc->cic_root);
  1289. list_add(&cic->queue_list, &cfqd->cic_list);
  1290. spin_unlock(&cfq_exit_lock);
  1291. }
  1292. /*
  1293. * Setup general io context and cfq io context. There can be several cfq
  1294. * io contexts per general io context, if this process is doing io to more
  1295. * than one device managed by cfq.
  1296. */
  1297. static struct cfq_io_context *
  1298. cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  1299. {
  1300. struct io_context *ioc = NULL;
  1301. struct cfq_io_context *cic;
  1302. might_sleep_if(gfp_mask & __GFP_WAIT);
  1303. ioc = get_io_context(gfp_mask);
  1304. if (!ioc)
  1305. return NULL;
  1306. cic = cfq_cic_rb_lookup(cfqd, ioc);
  1307. if (cic)
  1308. goto out;
  1309. cic = cfq_alloc_io_context(cfqd, gfp_mask);
  1310. if (cic == NULL)
  1311. goto err;
  1312. cfq_cic_link(cfqd, ioc, cic);
  1313. out:
  1314. return cic;
  1315. err:
  1316. put_io_context(ioc);
  1317. return NULL;
  1318. }
  1319. static void
  1320. cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
  1321. {
  1322. unsigned long elapsed, ttime;
  1323. /*
  1324. * if this context already has stuff queued, thinktime is from
  1325. * last queue not last end
  1326. */
  1327. #if 0
  1328. if (time_after(cic->last_end_request, cic->last_queue))
  1329. elapsed = jiffies - cic->last_end_request;
  1330. else
  1331. elapsed = jiffies - cic->last_queue;
  1332. #else
  1333. elapsed = jiffies - cic->last_end_request;
  1334. #endif
  1335. ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
  1336. cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
  1337. cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
  1338. cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
  1339. }
  1340. static void
  1341. cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
  1342. struct cfq_rq *crq)
  1343. {
  1344. sector_t sdist;
  1345. u64 total;
  1346. if (cic->last_request_pos < crq->request->sector)
  1347. sdist = crq->request->sector - cic->last_request_pos;
  1348. else
  1349. sdist = cic->last_request_pos - crq->request->sector;
  1350. /*
  1351. * Don't allow the seek distance to get too large from the
  1352. * odd fragment, pagein, etc
  1353. */
  1354. if (cic->seek_samples <= 60) /* second&third seek */
  1355. sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
  1356. else
  1357. sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
  1358. cic->seek_samples = (7*cic->seek_samples + 256) / 8;
  1359. cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
  1360. total = cic->seek_total + (cic->seek_samples/2);
  1361. do_div(total, cic->seek_samples);
  1362. cic->seek_mean = (sector_t)total;
  1363. }
  1364. /*
  1365. * Disable idle window if the process thinks too long or seeks so much that
  1366. * it doesn't matter
  1367. */
  1368. static void
  1369. cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1370. struct cfq_io_context *cic)
  1371. {
  1372. int enable_idle = cfq_cfqq_idle_window(cfqq);
  1373. if (!cic->ioc->task || !cfqd->cfq_slice_idle || cfqd->hw_tag)
  1374. enable_idle = 0;
  1375. else if (sample_valid(cic->ttime_samples)) {
  1376. if (cic->ttime_mean > cfqd->cfq_slice_idle)
  1377. enable_idle = 0;
  1378. else
  1379. enable_idle = 1;
  1380. }
  1381. if (enable_idle)
  1382. cfq_mark_cfqq_idle_window(cfqq);
  1383. else
  1384. cfq_clear_cfqq_idle_window(cfqq);
  1385. }
  1386. /*
  1387. * Check if new_cfqq should preempt the currently active queue. Return 0 for
  1388. * no or if we aren't sure, a 1 will cause a preempt.
  1389. */
  1390. static int
  1391. cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  1392. struct cfq_rq *crq)
  1393. {
  1394. struct cfq_queue *cfqq = cfqd->active_queue;
  1395. if (cfq_class_idle(new_cfqq))
  1396. return 0;
  1397. if (!cfqq)
  1398. return 1;
  1399. if (cfq_class_idle(cfqq))
  1400. return 1;
  1401. if (!cfq_cfqq_wait_request(new_cfqq))
  1402. return 0;
  1403. /*
  1404. * if it doesn't have slice left, forget it
  1405. */
  1406. if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
  1407. return 0;
  1408. if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq))
  1409. return 1;
  1410. return 0;
  1411. }
  1412. /*
  1413. * cfqq preempts the active queue. if we allowed preempt with no slice left,
  1414. * let it have half of its nominal slice.
  1415. */
  1416. static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1417. {
  1418. struct cfq_queue *__cfqq, *next;
  1419. list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
  1420. cfq_resort_rr_list(__cfqq, 1);
  1421. if (!cfqq->slice_left)
  1422. cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
  1423. cfqq->slice_end = cfqq->slice_left + jiffies;
  1424. __cfq_slice_expired(cfqd, cfqq, 1);
  1425. __cfq_set_active_queue(cfqd, cfqq);
  1426. }
  1427. /*
  1428. * should really be a ll_rw_blk.c helper
  1429. */
  1430. static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1431. {
  1432. request_queue_t *q = cfqd->queue;
  1433. if (!blk_queue_plugged(q))
  1434. q->request_fn(q);
  1435. else
  1436. __generic_unplug_device(q);
  1437. }
  1438. /*
  1439. * Called when a new fs request (crq) is added (to cfqq). Check if there's
  1440. * something we should do about it
  1441. */
  1442. static void
  1443. cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1444. struct cfq_rq *crq)
  1445. {
  1446. struct cfq_io_context *cic;
  1447. cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
  1448. cic = crq->io_context;
  1449. /*
  1450. * we never wait for an async request and we don't allow preemption
  1451. * of an async request. so just return early
  1452. */
  1453. if (!cfq_crq_is_sync(crq)) {
  1454. /*
  1455. * sync process issued an async request, if it's waiting
  1456. * then expire it and kick rq handling.
  1457. */
  1458. if (cic == cfqd->active_cic &&
  1459. del_timer(&cfqd->idle_slice_timer)) {
  1460. cfq_slice_expired(cfqd, 0);
  1461. cfq_start_queueing(cfqd, cfqq);
  1462. }
  1463. return;
  1464. }
  1465. cfq_update_io_thinktime(cfqd, cic);
  1466. cfq_update_io_seektime(cfqd, cic, crq);
  1467. cfq_update_idle_window(cfqd, cfqq, cic);
  1468. cic->last_queue = jiffies;
  1469. cic->last_request_pos = crq->request->sector + crq->request->nr_sectors;
  1470. if (cfqq == cfqd->active_queue) {
  1471. /*
  1472. * if we are waiting for a request for this queue, let it rip
  1473. * immediately and flag that we must not expire this queue
  1474. * just now
  1475. */
  1476. if (cfq_cfqq_wait_request(cfqq)) {
  1477. cfq_mark_cfqq_must_dispatch(cfqq);
  1478. del_timer(&cfqd->idle_slice_timer);
  1479. cfq_start_queueing(cfqd, cfqq);
  1480. }
  1481. } else if (cfq_should_preempt(cfqd, cfqq, crq)) {
  1482. /*
  1483. * not the active queue - expire current slice if it is
  1484. * idle and has expired it's mean thinktime or this new queue
  1485. * has some old slice time left and is of higher priority
  1486. */
  1487. cfq_preempt_queue(cfqd, cfqq);
  1488. cfq_mark_cfqq_must_dispatch(cfqq);
  1489. cfq_start_queueing(cfqd, cfqq);
  1490. }
  1491. }
  1492. static void cfq_insert_request(request_queue_t *q, struct request *rq)
  1493. {
  1494. struct cfq_data *cfqd = q->elevator->elevator_data;
  1495. struct cfq_rq *crq = RQ_DATA(rq);
  1496. struct cfq_queue *cfqq = crq->cfq_queue;
  1497. cfq_init_prio_data(cfqq);
  1498. cfq_add_crq_rb(crq);
  1499. list_add_tail(&rq->queuelist, &cfqq->fifo);
  1500. if (rq_mergeable(rq))
  1501. cfq_add_crq_hash(cfqd, crq);
  1502. cfq_crq_enqueued(cfqd, cfqq, crq);
  1503. }
  1504. static void cfq_completed_request(request_queue_t *q, struct request *rq)
  1505. {
  1506. struct cfq_rq *crq = RQ_DATA(rq);
  1507. struct cfq_queue *cfqq = crq->cfq_queue;
  1508. struct cfq_data *cfqd = cfqq->cfqd;
  1509. const int sync = cfq_crq_is_sync(crq);
  1510. unsigned long now;
  1511. now = jiffies;
  1512. WARN_ON(!cfqd->rq_in_driver);
  1513. WARN_ON(!cfqq->on_dispatch[sync]);
  1514. cfqd->rq_in_driver--;
  1515. cfqq->on_dispatch[sync]--;
  1516. if (!cfq_class_idle(cfqq))
  1517. cfqd->last_end_request = now;
  1518. if (!cfq_cfqq_dispatched(cfqq)) {
  1519. if (cfq_cfqq_on_rr(cfqq)) {
  1520. cfqq->service_last = now;
  1521. cfq_resort_rr_list(cfqq, 0);
  1522. }
  1523. cfq_schedule_dispatch(cfqd);
  1524. }
  1525. if (cfq_crq_is_sync(crq))
  1526. crq->io_context->last_end_request = now;
  1527. }
  1528. static struct request *
  1529. cfq_former_request(request_queue_t *q, struct request *rq)
  1530. {
  1531. struct cfq_rq *crq = RQ_DATA(rq);
  1532. struct rb_node *rbprev = rb_prev(&crq->rb_node);
  1533. if (rbprev)
  1534. return rb_entry_crq(rbprev)->request;
  1535. return NULL;
  1536. }
  1537. static struct request *
  1538. cfq_latter_request(request_queue_t *q, struct request *rq)
  1539. {
  1540. struct cfq_rq *crq = RQ_DATA(rq);
  1541. struct rb_node *rbnext = rb_next(&crq->rb_node);
  1542. if (rbnext)
  1543. return rb_entry_crq(rbnext)->request;
  1544. return NULL;
  1545. }
  1546. /*
  1547. * we temporarily boost lower priority queues if they are holding fs exclusive
  1548. * resources. they are boosted to normal prio (CLASS_BE/4)
  1549. */
  1550. static void cfq_prio_boost(struct cfq_queue *cfqq)
  1551. {
  1552. const int ioprio_class = cfqq->ioprio_class;
  1553. const int ioprio = cfqq->ioprio;
  1554. if (has_fs_excl()) {
  1555. /*
  1556. * boost idle prio on transactions that would lock out other
  1557. * users of the filesystem
  1558. */
  1559. if (cfq_class_idle(cfqq))
  1560. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1561. if (cfqq->ioprio > IOPRIO_NORM)
  1562. cfqq->ioprio = IOPRIO_NORM;
  1563. } else {
  1564. /*
  1565. * check if we need to unboost the queue
  1566. */
  1567. if (cfqq->ioprio_class != cfqq->org_ioprio_class)
  1568. cfqq->ioprio_class = cfqq->org_ioprio_class;
  1569. if (cfqq->ioprio != cfqq->org_ioprio)
  1570. cfqq->ioprio = cfqq->org_ioprio;
  1571. }
  1572. /*
  1573. * refile between round-robin lists if we moved the priority class
  1574. */
  1575. if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
  1576. cfq_cfqq_on_rr(cfqq))
  1577. cfq_resort_rr_list(cfqq, 0);
  1578. }
  1579. static inline int
  1580. __cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1581. struct task_struct *task, int rw)
  1582. {
  1583. #if 1
  1584. if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
  1585. !cfq_cfqq_must_alloc_slice(cfqq)) {
  1586. cfq_mark_cfqq_must_alloc_slice(cfqq);
  1587. return ELV_MQUEUE_MUST;
  1588. }
  1589. return ELV_MQUEUE_MAY;
  1590. #else
  1591. if (!cfqq || task->flags & PF_MEMALLOC)
  1592. return ELV_MQUEUE_MAY;
  1593. if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
  1594. if (cfq_cfqq_wait_request(cfqq))
  1595. return ELV_MQUEUE_MUST;
  1596. /*
  1597. * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
  1598. * can quickly flood the queue with writes from a single task
  1599. */
  1600. if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
  1601. cfq_mark_cfqq_must_alloc_slice(cfqq);
  1602. return ELV_MQUEUE_MUST;
  1603. }
  1604. return ELV_MQUEUE_MAY;
  1605. }
  1606. if (cfq_class_idle(cfqq))
  1607. return ELV_MQUEUE_NO;
  1608. if (cfqq->allocated[rw] >= cfqd->max_queued) {
  1609. struct io_context *ioc = get_io_context(GFP_ATOMIC);
  1610. int ret = ELV_MQUEUE_NO;
  1611. if (ioc && ioc->nr_batch_requests)
  1612. ret = ELV_MQUEUE_MAY;
  1613. put_io_context(ioc);
  1614. return ret;
  1615. }
  1616. return ELV_MQUEUE_MAY;
  1617. #endif
  1618. }
  1619. static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio)
  1620. {
  1621. struct cfq_data *cfqd = q->elevator->elevator_data;
  1622. struct task_struct *tsk = current;
  1623. struct cfq_queue *cfqq;
  1624. /*
  1625. * don't force setup of a queue from here, as a call to may_queue
  1626. * does not necessarily imply that a request actually will be queued.
  1627. * so just lookup a possibly existing queue, or return 'may queue'
  1628. * if that fails
  1629. */
  1630. cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(tsk, rw), tsk->ioprio);
  1631. if (cfqq) {
  1632. cfq_init_prio_data(cfqq);
  1633. cfq_prio_boost(cfqq);
  1634. return __cfq_may_queue(cfqd, cfqq, tsk, rw);
  1635. }
  1636. return ELV_MQUEUE_MAY;
  1637. }
  1638. static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq)
  1639. {
  1640. struct cfq_data *cfqd = q->elevator->elevator_data;
  1641. struct request_list *rl = &q->rq;
  1642. if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) {
  1643. smp_mb();
  1644. if (waitqueue_active(&rl->wait[READ]))
  1645. wake_up(&rl->wait[READ]);
  1646. }
  1647. if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) {
  1648. smp_mb();
  1649. if (waitqueue_active(&rl->wait[WRITE]))
  1650. wake_up(&rl->wait[WRITE]);
  1651. }
  1652. }
  1653. /*
  1654. * queue lock held here
  1655. */
  1656. static void cfq_put_request(request_queue_t *q, struct request *rq)
  1657. {
  1658. struct cfq_data *cfqd = q->elevator->elevator_data;
  1659. struct cfq_rq *crq = RQ_DATA(rq);
  1660. if (crq) {
  1661. struct cfq_queue *cfqq = crq->cfq_queue;
  1662. const int rw = rq_data_dir(rq);
  1663. BUG_ON(!cfqq->allocated[rw]);
  1664. cfqq->allocated[rw]--;
  1665. put_io_context(crq->io_context->ioc);
  1666. mempool_free(crq, cfqd->crq_pool);
  1667. rq->elevator_private = NULL;
  1668. cfq_check_waiters(q, cfqq);
  1669. cfq_put_queue(cfqq);
  1670. }
  1671. }
  1672. /*
  1673. * Allocate cfq data structures associated with this request.
  1674. */
  1675. static int
  1676. cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio,
  1677. gfp_t gfp_mask)
  1678. {
  1679. struct cfq_data *cfqd = q->elevator->elevator_data;
  1680. struct task_struct *tsk = current;
  1681. struct cfq_io_context *cic;
  1682. const int rw = rq_data_dir(rq);
  1683. pid_t key = cfq_queue_pid(tsk, rw);
  1684. struct cfq_queue *cfqq;
  1685. struct cfq_rq *crq;
  1686. unsigned long flags;
  1687. int is_sync = key != CFQ_KEY_ASYNC;
  1688. might_sleep_if(gfp_mask & __GFP_WAIT);
  1689. cic = cfq_get_io_context(cfqd, gfp_mask);
  1690. spin_lock_irqsave(q->queue_lock, flags);
  1691. if (!cic)
  1692. goto queue_fail;
  1693. if (!cic->cfqq[is_sync]) {
  1694. cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
  1695. if (!cfqq)
  1696. goto queue_fail;
  1697. cic->cfqq[is_sync] = cfqq;
  1698. } else
  1699. cfqq = cic->cfqq[is_sync];
  1700. cfqq->allocated[rw]++;
  1701. cfq_clear_cfqq_must_alloc(cfqq);
  1702. cfqd->rq_starved = 0;
  1703. atomic_inc(&cfqq->ref);
  1704. spin_unlock_irqrestore(q->queue_lock, flags);
  1705. crq = mempool_alloc(cfqd->crq_pool, gfp_mask);
  1706. if (crq) {
  1707. RB_CLEAR(&crq->rb_node);
  1708. crq->rb_key = 0;
  1709. crq->request = rq;
  1710. INIT_HLIST_NODE(&crq->hash);
  1711. crq->cfq_queue = cfqq;
  1712. crq->io_context = cic;
  1713. if (is_sync)
  1714. cfq_mark_crq_is_sync(crq);
  1715. else
  1716. cfq_clear_crq_is_sync(crq);
  1717. rq->elevator_private = crq;
  1718. return 0;
  1719. }
  1720. spin_lock_irqsave(q->queue_lock, flags);
  1721. cfqq->allocated[rw]--;
  1722. if (!(cfqq->allocated[0] + cfqq->allocated[1]))
  1723. cfq_mark_cfqq_must_alloc(cfqq);
  1724. cfq_put_queue(cfqq);
  1725. queue_fail:
  1726. if (cic)
  1727. put_io_context(cic->ioc);
  1728. /*
  1729. * mark us rq allocation starved. we need to kickstart the process
  1730. * ourselves if there are no pending requests that can do it for us.
  1731. * that would be an extremely rare OOM situation
  1732. */
  1733. cfqd->rq_starved = 1;
  1734. cfq_schedule_dispatch(cfqd);
  1735. spin_unlock_irqrestore(q->queue_lock, flags);
  1736. return 1;
  1737. }
  1738. static void cfq_kick_queue(void *data)
  1739. {
  1740. request_queue_t *q = data;
  1741. struct cfq_data *cfqd = q->elevator->elevator_data;
  1742. unsigned long flags;
  1743. spin_lock_irqsave(q->queue_lock, flags);
  1744. if (cfqd->rq_starved) {
  1745. struct request_list *rl = &q->rq;
  1746. /*
  1747. * we aren't guaranteed to get a request after this, but we
  1748. * have to be opportunistic
  1749. */
  1750. smp_mb();
  1751. if (waitqueue_active(&rl->wait[READ]))
  1752. wake_up(&rl->wait[READ]);
  1753. if (waitqueue_active(&rl->wait[WRITE]))
  1754. wake_up(&rl->wait[WRITE]);
  1755. }
  1756. blk_remove_plug(q);
  1757. q->request_fn(q);
  1758. spin_unlock_irqrestore(q->queue_lock, flags);
  1759. }
  1760. /*
  1761. * Timer running if the active_queue is currently idling inside its time slice
  1762. */
  1763. static void cfq_idle_slice_timer(unsigned long data)
  1764. {
  1765. struct cfq_data *cfqd = (struct cfq_data *) data;
  1766. struct cfq_queue *cfqq;
  1767. unsigned long flags;
  1768. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1769. if ((cfqq = cfqd->active_queue) != NULL) {
  1770. unsigned long now = jiffies;
  1771. /*
  1772. * expired
  1773. */
  1774. if (time_after(now, cfqq->slice_end))
  1775. goto expire;
  1776. /*
  1777. * only expire and reinvoke request handler, if there are
  1778. * other queues with pending requests
  1779. */
  1780. if (!cfqd->busy_queues) {
  1781. cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end);
  1782. add_timer(&cfqd->idle_slice_timer);
  1783. goto out_cont;
  1784. }
  1785. /*
  1786. * not expired and it has a request pending, let it dispatch
  1787. */
  1788. if (!RB_EMPTY(&cfqq->sort_list)) {
  1789. cfq_mark_cfqq_must_dispatch(cfqq);
  1790. goto out_kick;
  1791. }
  1792. }
  1793. expire:
  1794. cfq_slice_expired(cfqd, 0);
  1795. out_kick:
  1796. cfq_schedule_dispatch(cfqd);
  1797. out_cont:
  1798. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1799. }
  1800. /*
  1801. * Timer running if an idle class queue is waiting for service
  1802. */
  1803. static void cfq_idle_class_timer(unsigned long data)
  1804. {
  1805. struct cfq_data *cfqd = (struct cfq_data *) data;
  1806. unsigned long flags, end;
  1807. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1808. /*
  1809. * race with a non-idle queue, reset timer
  1810. */
  1811. end = cfqd->last_end_request + CFQ_IDLE_GRACE;
  1812. if (!time_after_eq(jiffies, end))
  1813. mod_timer(&cfqd->idle_class_timer, end);
  1814. else
  1815. cfq_schedule_dispatch(cfqd);
  1816. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1817. }
  1818. static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  1819. {
  1820. del_timer_sync(&cfqd->idle_slice_timer);
  1821. del_timer_sync(&cfqd->idle_class_timer);
  1822. blk_sync_queue(cfqd->queue);
  1823. }
  1824. static void cfq_exit_queue(elevator_t *e)
  1825. {
  1826. struct cfq_data *cfqd = e->elevator_data;
  1827. request_queue_t *q = cfqd->queue;
  1828. cfq_shutdown_timer_wq(cfqd);
  1829. spin_lock(&cfq_exit_lock);
  1830. spin_lock_irq(q->queue_lock);
  1831. if (cfqd->active_queue)
  1832. __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
  1833. while (!list_empty(&cfqd->cic_list)) {
  1834. struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
  1835. struct cfq_io_context,
  1836. queue_list);
  1837. if (cic->cfqq[ASYNC]) {
  1838. cfq_put_queue(cic->cfqq[ASYNC]);
  1839. cic->cfqq[ASYNC] = NULL;
  1840. }
  1841. if (cic->cfqq[SYNC]) {
  1842. cfq_put_queue(cic->cfqq[SYNC]);
  1843. cic->cfqq[SYNC] = NULL;
  1844. }
  1845. cic->key = NULL;
  1846. list_del_init(&cic->queue_list);
  1847. }
  1848. spin_unlock_irq(q->queue_lock);
  1849. spin_unlock(&cfq_exit_lock);
  1850. cfq_shutdown_timer_wq(cfqd);
  1851. mempool_destroy(cfqd->crq_pool);
  1852. kfree(cfqd->crq_hash);
  1853. kfree(cfqd->cfq_hash);
  1854. kfree(cfqd);
  1855. }
  1856. static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
  1857. {
  1858. struct cfq_data *cfqd;
  1859. int i;
  1860. cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
  1861. if (!cfqd)
  1862. return NULL;
  1863. memset(cfqd, 0, sizeof(*cfqd));
  1864. for (i = 0; i < CFQ_PRIO_LISTS; i++)
  1865. INIT_LIST_HEAD(&cfqd->rr_list[i]);
  1866. INIT_LIST_HEAD(&cfqd->busy_rr);
  1867. INIT_LIST_HEAD(&cfqd->cur_rr);
  1868. INIT_LIST_HEAD(&cfqd->idle_rr);
  1869. INIT_LIST_HEAD(&cfqd->empty_list);
  1870. INIT_LIST_HEAD(&cfqd->cic_list);
  1871. cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL);
  1872. if (!cfqd->crq_hash)
  1873. goto out_crqhash;
  1874. cfqd->cfq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL);
  1875. if (!cfqd->cfq_hash)
  1876. goto out_cfqhash;
  1877. cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool);
  1878. if (!cfqd->crq_pool)
  1879. goto out_crqpool;
  1880. for (i = 0; i < CFQ_MHASH_ENTRIES; i++)
  1881. INIT_HLIST_HEAD(&cfqd->crq_hash[i]);
  1882. for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
  1883. INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
  1884. cfqd->queue = q;
  1885. cfqd->max_queued = q->nr_requests / 4;
  1886. q->nr_batching = cfq_queued;
  1887. init_timer(&cfqd->idle_slice_timer);
  1888. cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  1889. cfqd->idle_slice_timer.data = (unsigned long) cfqd;
  1890. init_timer(&cfqd->idle_class_timer);
  1891. cfqd->idle_class_timer.function = cfq_idle_class_timer;
  1892. cfqd->idle_class_timer.data = (unsigned long) cfqd;
  1893. INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q);
  1894. cfqd->cfq_queued = cfq_queued;
  1895. cfqd->cfq_quantum = cfq_quantum;
  1896. cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  1897. cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
  1898. cfqd->cfq_back_max = cfq_back_max;
  1899. cfqd->cfq_back_penalty = cfq_back_penalty;
  1900. cfqd->cfq_slice[0] = cfq_slice_async;
  1901. cfqd->cfq_slice[1] = cfq_slice_sync;
  1902. cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  1903. cfqd->cfq_slice_idle = cfq_slice_idle;
  1904. return cfqd;
  1905. out_crqpool:
  1906. kfree(cfqd->cfq_hash);
  1907. out_cfqhash:
  1908. kfree(cfqd->crq_hash);
  1909. out_crqhash:
  1910. kfree(cfqd);
  1911. return NULL;
  1912. }
  1913. static void cfq_slab_kill(void)
  1914. {
  1915. if (crq_pool)
  1916. kmem_cache_destroy(crq_pool);
  1917. if (cfq_pool)
  1918. kmem_cache_destroy(cfq_pool);
  1919. if (cfq_ioc_pool)
  1920. kmem_cache_destroy(cfq_ioc_pool);
  1921. }
  1922. static int __init cfq_slab_setup(void)
  1923. {
  1924. crq_pool = kmem_cache_create("crq_pool", sizeof(struct cfq_rq), 0, 0,
  1925. NULL, NULL);
  1926. if (!crq_pool)
  1927. goto fail;
  1928. cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
  1929. NULL, NULL);
  1930. if (!cfq_pool)
  1931. goto fail;
  1932. cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
  1933. sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
  1934. if (!cfq_ioc_pool)
  1935. goto fail;
  1936. return 0;
  1937. fail:
  1938. cfq_slab_kill();
  1939. return -ENOMEM;
  1940. }
  1941. /*
  1942. * sysfs parts below -->
  1943. */
  1944. static ssize_t
  1945. cfq_var_show(unsigned int var, char *page)
  1946. {
  1947. return sprintf(page, "%d\n", var);
  1948. }
  1949. static ssize_t
  1950. cfq_var_store(unsigned int *var, const char *page, size_t count)
  1951. {
  1952. char *p = (char *) page;
  1953. *var = simple_strtoul(p, &p, 10);
  1954. return count;
  1955. }
  1956. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  1957. static ssize_t __FUNC(elevator_t *e, char *page) \
  1958. { \
  1959. struct cfq_data *cfqd = e->elevator_data; \
  1960. unsigned int __data = __VAR; \
  1961. if (__CONV) \
  1962. __data = jiffies_to_msecs(__data); \
  1963. return cfq_var_show(__data, (page)); \
  1964. }
  1965. SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
  1966. SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0);
  1967. SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  1968. SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
  1969. SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  1970. SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
  1971. SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
  1972. SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  1973. SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  1974. SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
  1975. #undef SHOW_FUNCTION
  1976. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  1977. static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
  1978. { \
  1979. struct cfq_data *cfqd = e->elevator_data; \
  1980. unsigned int __data; \
  1981. int ret = cfq_var_store(&__data, (page), count); \
  1982. if (__data < (MIN)) \
  1983. __data = (MIN); \
  1984. else if (__data > (MAX)) \
  1985. __data = (MAX); \
  1986. if (__CONV) \
  1987. *(__PTR) = msecs_to_jiffies(__data); \
  1988. else \
  1989. *(__PTR) = __data; \
  1990. return ret; \
  1991. }
  1992. STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
  1993. STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0);
  1994. STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
  1995. STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
  1996. STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
  1997. STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
  1998. STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
  1999. STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  2000. STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
  2001. STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
  2002. #undef STORE_FUNCTION
  2003. #define CFQ_ATTR(name) \
  2004. __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  2005. static struct elv_fs_entry cfq_attrs[] = {
  2006. CFQ_ATTR(quantum),
  2007. CFQ_ATTR(queued),
  2008. CFQ_ATTR(fifo_expire_sync),
  2009. CFQ_ATTR(fifo_expire_async),
  2010. CFQ_ATTR(back_seek_max),
  2011. CFQ_ATTR(back_seek_penalty),
  2012. CFQ_ATTR(slice_sync),
  2013. CFQ_ATTR(slice_async),
  2014. CFQ_ATTR(slice_async_rq),
  2015. CFQ_ATTR(slice_idle),
  2016. __ATTR_NULL
  2017. };
  2018. static struct elevator_type iosched_cfq = {
  2019. .ops = {
  2020. .elevator_merge_fn = cfq_merge,
  2021. .elevator_merged_fn = cfq_merged_request,
  2022. .elevator_merge_req_fn = cfq_merged_requests,
  2023. .elevator_dispatch_fn = cfq_dispatch_requests,
  2024. .elevator_add_req_fn = cfq_insert_request,
  2025. .elevator_activate_req_fn = cfq_activate_request,
  2026. .elevator_deactivate_req_fn = cfq_deactivate_request,
  2027. .elevator_queue_empty_fn = cfq_queue_empty,
  2028. .elevator_completed_req_fn = cfq_completed_request,
  2029. .elevator_former_req_fn = cfq_former_request,
  2030. .elevator_latter_req_fn = cfq_latter_request,
  2031. .elevator_set_req_fn = cfq_set_request,
  2032. .elevator_put_req_fn = cfq_put_request,
  2033. .elevator_may_queue_fn = cfq_may_queue,
  2034. .elevator_init_fn = cfq_init_queue,
  2035. .elevator_exit_fn = cfq_exit_queue,
  2036. .trim = cfq_trim,
  2037. },
  2038. .elevator_attrs = cfq_attrs,
  2039. .elevator_name = "cfq",
  2040. .elevator_owner = THIS_MODULE,
  2041. };
  2042. static int __init cfq_init(void)
  2043. {
  2044. int ret;
  2045. /*
  2046. * could be 0 on HZ < 1000 setups
  2047. */
  2048. if (!cfq_slice_async)
  2049. cfq_slice_async = 1;
  2050. if (!cfq_slice_idle)
  2051. cfq_slice_idle = 1;
  2052. if (cfq_slab_setup())
  2053. return -ENOMEM;
  2054. ret = elv_register(&iosched_cfq);
  2055. if (ret)
  2056. cfq_slab_kill();
  2057. return ret;
  2058. }
  2059. static void __exit cfq_exit(void)
  2060. {
  2061. DECLARE_COMPLETION(all_gone);
  2062. elv_unregister(&iosched_cfq);
  2063. ioc_gone = &all_gone;
  2064. /* ioc_gone's update must be visible before reading ioc_count */
  2065. smp_wmb();
  2066. if (atomic_read(&ioc_count))
  2067. wait_for_completion(ioc_gone);
  2068. synchronize_rcu();
  2069. cfq_slab_kill();
  2070. }
  2071. module_init(cfq_init);
  2072. module_exit(cfq_exit);
  2073. MODULE_AUTHOR("Jens Axboe");
  2074. MODULE_LICENSE("GPL");
  2075. MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");