cfq-iosched.c 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235
  1. /*
  2. * CFQ, or complete fairness queueing, disk scheduler.
  3. *
  4. * Based on ideas from a previously unfinished io
  5. * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  6. *
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/elevator.h>
  12. #include <linux/hash.h>
  13. #include <linux/rbtree.h>
  14. #include <linux/ioprio.h>
  15. /*
  16. * tunables
  17. */
  18. static const int cfq_quantum = 4; /* max queue in one round of service */
  19. static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  20. static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
  21. static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
  22. static const int cfq_slice_sync = HZ / 10;
  23. static int cfq_slice_async = HZ / 25;
  24. static const int cfq_slice_async_rq = 2;
  25. static int cfq_slice_idle = HZ / 125;
  26. #define CFQ_IDLE_GRACE (HZ / 10)
  27. #define CFQ_SLICE_SCALE (5)
  28. #define CFQ_KEY_ASYNC (0)
  29. /*
  30. * for the hash of cfqq inside the cfqd
  31. */
  32. #define CFQ_QHASH_SHIFT 6
  33. #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
  34. #define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
  35. #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
  36. #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
  37. #define RQ_CFQQ(rq) ((rq)->elevator_private2)
  38. static struct kmem_cache *cfq_pool;
  39. static struct kmem_cache *cfq_ioc_pool;
  40. static DEFINE_PER_CPU(unsigned long, ioc_count);
  41. static struct completion *ioc_gone;
  42. #define CFQ_PRIO_LISTS IOPRIO_BE_NR
  43. #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  44. #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  45. #define ASYNC (0)
  46. #define SYNC (1)
  47. #define cfq_cfqq_dispatched(cfqq) \
  48. ((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
  49. #define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC)
  50. #define cfq_cfqq_sync(cfqq) \
  51. (cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
  52. #define sample_valid(samples) ((samples) > 80)
  53. /*
  54. * Per block device queue structure
  55. */
  56. struct cfq_data {
  57. request_queue_t *queue;
  58. /*
  59. * rr list of queues with requests and the count of them
  60. */
  61. struct list_head rr_list[CFQ_PRIO_LISTS];
  62. struct list_head busy_rr;
  63. struct list_head cur_rr;
  64. struct list_head idle_rr;
  65. unsigned int busy_queues;
  66. /*
  67. * cfqq lookup hash
  68. */
  69. struct hlist_head *cfq_hash;
  70. int rq_in_driver;
  71. int hw_tag;
  72. /*
  73. * idle window management
  74. */
  75. struct timer_list idle_slice_timer;
  76. struct work_struct unplug_work;
  77. struct cfq_queue *active_queue;
  78. struct cfq_io_context *active_cic;
  79. int cur_prio, cur_end_prio;
  80. unsigned int dispatch_slice;
  81. struct timer_list idle_class_timer;
  82. sector_t last_sector;
  83. unsigned long last_end_request;
  84. /*
  85. * tunables, see top of file
  86. */
  87. unsigned int cfq_quantum;
  88. unsigned int cfq_fifo_expire[2];
  89. unsigned int cfq_back_penalty;
  90. unsigned int cfq_back_max;
  91. unsigned int cfq_slice[2];
  92. unsigned int cfq_slice_async_rq;
  93. unsigned int cfq_slice_idle;
  94. struct list_head cic_list;
  95. };
  96. /*
  97. * Per process-grouping structure
  98. */
  99. struct cfq_queue {
  100. /* reference count */
  101. atomic_t ref;
  102. /* parent cfq_data */
  103. struct cfq_data *cfqd;
  104. /* cfqq lookup hash */
  105. struct hlist_node cfq_hash;
  106. /* hash key */
  107. unsigned int key;
  108. /* member of the rr/busy/cur/idle cfqd list */
  109. struct list_head cfq_list;
  110. /* sorted list of pending requests */
  111. struct rb_root sort_list;
  112. /* if fifo isn't expired, next request to serve */
  113. struct request *next_rq;
  114. /* requests queued in sort_list */
  115. int queued[2];
  116. /* currently allocated requests */
  117. int allocated[2];
  118. /* pending metadata requests */
  119. int meta_pending;
  120. /* fifo list of requests in sort_list */
  121. struct list_head fifo;
  122. unsigned long slice_end;
  123. unsigned long slice_left;
  124. unsigned long service_last;
  125. /* number of requests that are on the dispatch list */
  126. int on_dispatch[2];
  127. /* io prio of this group */
  128. unsigned short ioprio, org_ioprio;
  129. unsigned short ioprio_class, org_ioprio_class;
  130. /* various state flags, see below */
  131. unsigned int flags;
  132. };
  133. enum cfqq_state_flags {
  134. CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
  135. CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
  136. CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
  137. CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
  138. CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
  139. CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
  140. CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
  141. CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
  142. CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
  143. CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
  144. };
  145. #define CFQ_CFQQ_FNS(name) \
  146. static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
  147. { \
  148. cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
  149. } \
  150. static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
  151. { \
  152. cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
  153. } \
  154. static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
  155. { \
  156. return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
  157. }
  158. CFQ_CFQQ_FNS(on_rr);
  159. CFQ_CFQQ_FNS(wait_request);
  160. CFQ_CFQQ_FNS(must_alloc);
  161. CFQ_CFQQ_FNS(must_alloc_slice);
  162. CFQ_CFQQ_FNS(must_dispatch);
  163. CFQ_CFQQ_FNS(fifo_expire);
  164. CFQ_CFQQ_FNS(idle_window);
  165. CFQ_CFQQ_FNS(prio_changed);
  166. CFQ_CFQQ_FNS(queue_new);
  167. CFQ_CFQQ_FNS(slice_new);
  168. #undef CFQ_CFQQ_FNS
  169. static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
  170. static void cfq_dispatch_insert(request_queue_t *, struct request *);
  171. static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
  172. /*
  173. * scheduler run of queue, if there are requests pending and no one in the
  174. * driver that will restart queueing
  175. */
  176. static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
  177. {
  178. if (cfqd->busy_queues)
  179. kblockd_schedule_work(&cfqd->unplug_work);
  180. }
  181. static int cfq_queue_empty(request_queue_t *q)
  182. {
  183. struct cfq_data *cfqd = q->elevator->elevator_data;
  184. return !cfqd->busy_queues;
  185. }
  186. static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
  187. {
  188. /*
  189. * Use the per-process queue, for read requests and syncronous writes
  190. */
  191. if (!(rw & REQ_RW) || is_sync)
  192. return task->pid;
  193. return CFQ_KEY_ASYNC;
  194. }
  195. /*
  196. * Scale schedule slice based on io priority. Use the sync time slice only
  197. * if a queue is marked sync and has sync io queued. A sync queue with async
  198. * io only, should not get full sync slice length.
  199. */
  200. static inline int
  201. cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  202. {
  203. const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
  204. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  205. return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
  206. }
  207. static inline void
  208. cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  209. {
  210. cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
  211. }
  212. /*
  213. * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
  214. * isn't valid until the first request from the dispatch is activated
  215. * and the slice time set.
  216. */
  217. static inline int cfq_slice_used(struct cfq_queue *cfqq)
  218. {
  219. if (cfq_cfqq_slice_new(cfqq))
  220. return 0;
  221. if (time_before(jiffies, cfqq->slice_end))
  222. return 0;
  223. return 1;
  224. }
  225. /*
  226. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  227. * We choose the request that is closest to the head right now. Distance
  228. * behind the head is penalized and only allowed to a certain extent.
  229. */
  230. static struct request *
  231. cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
  232. {
  233. sector_t last, s1, s2, d1 = 0, d2 = 0;
  234. unsigned long back_max;
  235. #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  236. #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  237. unsigned wrap = 0; /* bit mask: requests behind the disk head? */
  238. if (rq1 == NULL || rq1 == rq2)
  239. return rq2;
  240. if (rq2 == NULL)
  241. return rq1;
  242. if (rq_is_sync(rq1) && !rq_is_sync(rq2))
  243. return rq1;
  244. else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
  245. return rq2;
  246. if (rq_is_meta(rq1) && !rq_is_meta(rq2))
  247. return rq1;
  248. else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
  249. return rq2;
  250. s1 = rq1->sector;
  251. s2 = rq2->sector;
  252. last = cfqd->last_sector;
  253. /*
  254. * by definition, 1KiB is 2 sectors
  255. */
  256. back_max = cfqd->cfq_back_max * 2;
  257. /*
  258. * Strict one way elevator _except_ in the case where we allow
  259. * short backward seeks which are biased as twice the cost of a
  260. * similar forward seek.
  261. */
  262. if (s1 >= last)
  263. d1 = s1 - last;
  264. else if (s1 + back_max >= last)
  265. d1 = (last - s1) * cfqd->cfq_back_penalty;
  266. else
  267. wrap |= CFQ_RQ1_WRAP;
  268. if (s2 >= last)
  269. d2 = s2 - last;
  270. else if (s2 + back_max >= last)
  271. d2 = (last - s2) * cfqd->cfq_back_penalty;
  272. else
  273. wrap |= CFQ_RQ2_WRAP;
  274. /* Found required data */
  275. /*
  276. * By doing switch() on the bit mask "wrap" we avoid having to
  277. * check two variables for all permutations: --> faster!
  278. */
  279. switch (wrap) {
  280. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  281. if (d1 < d2)
  282. return rq1;
  283. else if (d2 < d1)
  284. return rq2;
  285. else {
  286. if (s1 >= s2)
  287. return rq1;
  288. else
  289. return rq2;
  290. }
  291. case CFQ_RQ2_WRAP:
  292. return rq1;
  293. case CFQ_RQ1_WRAP:
  294. return rq2;
  295. case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
  296. default:
  297. /*
  298. * Since both rqs are wrapped,
  299. * start with the one that's further behind head
  300. * (--> only *one* back seek required),
  301. * since back seek takes more time than forward.
  302. */
  303. if (s1 <= s2)
  304. return rq1;
  305. else
  306. return rq2;
  307. }
  308. }
  309. /*
  310. * would be nice to take fifo expire time into account as well
  311. */
  312. static struct request *
  313. cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  314. struct request *last)
  315. {
  316. struct rb_node *rbnext = rb_next(&last->rb_node);
  317. struct rb_node *rbprev = rb_prev(&last->rb_node);
  318. struct request *next = NULL, *prev = NULL;
  319. BUG_ON(RB_EMPTY_NODE(&last->rb_node));
  320. if (rbprev)
  321. prev = rb_entry_rq(rbprev);
  322. if (rbnext)
  323. next = rb_entry_rq(rbnext);
  324. else {
  325. rbnext = rb_first(&cfqq->sort_list);
  326. if (rbnext && rbnext != &last->rb_node)
  327. next = rb_entry_rq(rbnext);
  328. }
  329. return cfq_choose_req(cfqd, next, prev);
  330. }
  331. static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
  332. {
  333. struct cfq_data *cfqd = cfqq->cfqd;
  334. struct list_head *list, *n;
  335. struct cfq_queue *__cfqq;
  336. /*
  337. * Resorting requires the cfqq to be on the RR list already.
  338. */
  339. if (!cfq_cfqq_on_rr(cfqq))
  340. return;
  341. list_del(&cfqq->cfq_list);
  342. if (cfq_class_rt(cfqq))
  343. list = &cfqd->cur_rr;
  344. else if (cfq_class_idle(cfqq))
  345. list = &cfqd->idle_rr;
  346. else {
  347. /*
  348. * if cfqq has requests in flight, don't allow it to be
  349. * found in cfq_set_active_queue before it has finished them.
  350. * this is done to increase fairness between a process that
  351. * has lots of io pending vs one that only generates one
  352. * sporadically or synchronously
  353. */
  354. if (cfq_cfqq_dispatched(cfqq))
  355. list = &cfqd->busy_rr;
  356. else
  357. list = &cfqd->rr_list[cfqq->ioprio];
  358. }
  359. if (preempted || cfq_cfqq_queue_new(cfqq)) {
  360. /*
  361. * If this queue was preempted or is new (never been serviced),
  362. * let it be added first for fairness but beind other new
  363. * queues.
  364. */
  365. n = list;
  366. while (n->next != list) {
  367. __cfqq = list_entry_cfqq(n->next);
  368. if (!cfq_cfqq_queue_new(__cfqq))
  369. break;
  370. n = n->next;
  371. }
  372. list_add_tail(&cfqq->cfq_list, n);
  373. } else if (!cfq_cfqq_class_sync(cfqq)) {
  374. /*
  375. * async queue always goes to the end. this wont be overly
  376. * unfair to writes, as the sort of the sync queue wont be
  377. * allowed to pass the async queue again.
  378. */
  379. list_add_tail(&cfqq->cfq_list, list);
  380. } else {
  381. /*
  382. * sort by last service, but don't cross a new or async
  383. * queue. we don't cross a new queue because it hasn't been
  384. * service before, and we don't cross an async queue because
  385. * it gets added to the end on expire.
  386. */
  387. n = list;
  388. while ((n = n->prev) != list) {
  389. struct cfq_queue *__cfqq = list_entry_cfqq(n);
  390. if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last)
  391. break;
  392. if (time_before(__cfqq->service_last, cfqq->service_last))
  393. break;
  394. }
  395. list_add(&cfqq->cfq_list, n);
  396. }
  397. }
  398. /*
  399. * add to busy list of queues for service, trying to be fair in ordering
  400. * the pending list according to last request service
  401. */
  402. static inline void
  403. cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  404. {
  405. BUG_ON(cfq_cfqq_on_rr(cfqq));
  406. cfq_mark_cfqq_on_rr(cfqq);
  407. cfqd->busy_queues++;
  408. cfq_resort_rr_list(cfqq, 0);
  409. }
  410. static inline void
  411. cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  412. {
  413. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  414. cfq_clear_cfqq_on_rr(cfqq);
  415. list_del_init(&cfqq->cfq_list);
  416. BUG_ON(!cfqd->busy_queues);
  417. cfqd->busy_queues--;
  418. }
  419. /*
  420. * rb tree support functions
  421. */
  422. static inline void cfq_del_rq_rb(struct request *rq)
  423. {
  424. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  425. struct cfq_data *cfqd = cfqq->cfqd;
  426. const int sync = rq_is_sync(rq);
  427. BUG_ON(!cfqq->queued[sync]);
  428. cfqq->queued[sync]--;
  429. elv_rb_del(&cfqq->sort_list, rq);
  430. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  431. cfq_del_cfqq_rr(cfqd, cfqq);
  432. }
  433. static void cfq_add_rq_rb(struct request *rq)
  434. {
  435. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  436. struct cfq_data *cfqd = cfqq->cfqd;
  437. struct request *__alias;
  438. cfqq->queued[rq_is_sync(rq)]++;
  439. /*
  440. * looks a little odd, but the first insert might return an alias.
  441. * if that happens, put the alias on the dispatch list
  442. */
  443. while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
  444. cfq_dispatch_insert(cfqd->queue, __alias);
  445. if (!cfq_cfqq_on_rr(cfqq))
  446. cfq_add_cfqq_rr(cfqd, cfqq);
  447. }
  448. static inline void
  449. cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
  450. {
  451. elv_rb_del(&cfqq->sort_list, rq);
  452. cfqq->queued[rq_is_sync(rq)]--;
  453. cfq_add_rq_rb(rq);
  454. }
  455. static struct request *
  456. cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
  457. {
  458. struct task_struct *tsk = current;
  459. pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
  460. struct cfq_queue *cfqq;
  461. cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
  462. if (cfqq) {
  463. sector_t sector = bio->bi_sector + bio_sectors(bio);
  464. return elv_rb_find(&cfqq->sort_list, sector);
  465. }
  466. return NULL;
  467. }
  468. static void cfq_activate_request(request_queue_t *q, struct request *rq)
  469. {
  470. struct cfq_data *cfqd = q->elevator->elevator_data;
  471. cfqd->rq_in_driver++;
  472. /*
  473. * If the depth is larger 1, it really could be queueing. But lets
  474. * make the mark a little higher - idling could still be good for
  475. * low queueing, and a low queueing number could also just indicate
  476. * a SCSI mid layer like behaviour where limit+1 is often seen.
  477. */
  478. if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
  479. cfqd->hw_tag = 1;
  480. }
  481. static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
  482. {
  483. struct cfq_data *cfqd = q->elevator->elevator_data;
  484. WARN_ON(!cfqd->rq_in_driver);
  485. cfqd->rq_in_driver--;
  486. }
  487. static void cfq_remove_request(struct request *rq)
  488. {
  489. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  490. if (cfqq->next_rq == rq)
  491. cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
  492. list_del_init(&rq->queuelist);
  493. cfq_del_rq_rb(rq);
  494. if (rq_is_meta(rq)) {
  495. WARN_ON(!cfqq->meta_pending);
  496. cfqq->meta_pending--;
  497. }
  498. }
  499. static int
  500. cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
  501. {
  502. struct cfq_data *cfqd = q->elevator->elevator_data;
  503. struct request *__rq;
  504. __rq = cfq_find_rq_fmerge(cfqd, bio);
  505. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  506. *req = __rq;
  507. return ELEVATOR_FRONT_MERGE;
  508. }
  509. return ELEVATOR_NO_MERGE;
  510. }
  511. static void cfq_merged_request(request_queue_t *q, struct request *req,
  512. int type)
  513. {
  514. if (type == ELEVATOR_FRONT_MERGE) {
  515. struct cfq_queue *cfqq = RQ_CFQQ(req);
  516. cfq_reposition_rq_rb(cfqq, req);
  517. }
  518. }
  519. static void
  520. cfq_merged_requests(request_queue_t *q, struct request *rq,
  521. struct request *next)
  522. {
  523. /*
  524. * reposition in fifo if next is older than rq
  525. */
  526. if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  527. time_before(next->start_time, rq->start_time))
  528. list_move(&rq->queuelist, &next->queuelist);
  529. cfq_remove_request(next);
  530. }
  531. static int cfq_allow_merge(request_queue_t *q, struct request *rq,
  532. struct bio *bio)
  533. {
  534. struct cfq_data *cfqd = q->elevator->elevator_data;
  535. const int rw = bio_data_dir(bio);
  536. struct cfq_queue *cfqq;
  537. pid_t key;
  538. /*
  539. * Disallow merge of a sync bio into an async request.
  540. */
  541. if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
  542. return 0;
  543. /*
  544. * Lookup the cfqq that this bio will be queued with. Allow
  545. * merge only if rq is queued there.
  546. */
  547. key = cfq_queue_pid(current, rw, bio_sync(bio));
  548. cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
  549. if (cfqq == RQ_CFQQ(rq))
  550. return 1;
  551. return 0;
  552. }
  553. static inline void
  554. __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  555. {
  556. if (cfqq) {
  557. /*
  558. * stop potential idle class queues waiting service
  559. */
  560. del_timer(&cfqd->idle_class_timer);
  561. cfqq->slice_end = 0;
  562. cfqq->slice_left = 0;
  563. cfq_clear_cfqq_must_alloc_slice(cfqq);
  564. cfq_clear_cfqq_fifo_expire(cfqq);
  565. cfq_mark_cfqq_slice_new(cfqq);
  566. }
  567. cfqd->active_queue = cfqq;
  568. }
  569. /*
  570. * current cfqq expired its slice (or was too idle), select new one
  571. */
  572. static void
  573. __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  574. int preempted)
  575. {
  576. unsigned long now = jiffies;
  577. if (cfq_cfqq_wait_request(cfqq))
  578. del_timer(&cfqd->idle_slice_timer);
  579. if (!preempted && !cfq_cfqq_dispatched(cfqq))
  580. cfq_schedule_dispatch(cfqd);
  581. cfq_clear_cfqq_must_dispatch(cfqq);
  582. cfq_clear_cfqq_wait_request(cfqq);
  583. cfq_clear_cfqq_queue_new(cfqq);
  584. /*
  585. * store what was left of this slice, if the queue idled out
  586. * or was preempted
  587. */
  588. if (cfq_slice_used(cfqq))
  589. cfqq->slice_left = cfqq->slice_end - now;
  590. else
  591. cfqq->slice_left = 0;
  592. cfq_resort_rr_list(cfqq, preempted);
  593. if (cfqq == cfqd->active_queue)
  594. cfqd->active_queue = NULL;
  595. if (cfqd->active_cic) {
  596. put_io_context(cfqd->active_cic->ioc);
  597. cfqd->active_cic = NULL;
  598. }
  599. cfqd->dispatch_slice = 0;
  600. }
  601. static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
  602. {
  603. struct cfq_queue *cfqq = cfqd->active_queue;
  604. if (cfqq)
  605. __cfq_slice_expired(cfqd, cfqq, preempted);
  606. }
  607. /*
  608. * 0
  609. * 0,1
  610. * 0,1,2
  611. * 0,1,2,3
  612. * 0,1,2,3,4
  613. * 0,1,2,3,4,5
  614. * 0,1,2,3,4,5,6
  615. * 0,1,2,3,4,5,6,7
  616. */
  617. static int cfq_get_next_prio_level(struct cfq_data *cfqd)
  618. {
  619. int prio, wrap;
  620. prio = -1;
  621. wrap = 0;
  622. do {
  623. int p;
  624. for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
  625. if (!list_empty(&cfqd->rr_list[p])) {
  626. prio = p;
  627. break;
  628. }
  629. }
  630. if (prio != -1)
  631. break;
  632. cfqd->cur_prio = 0;
  633. if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
  634. cfqd->cur_end_prio = 0;
  635. if (wrap)
  636. break;
  637. wrap = 1;
  638. }
  639. } while (1);
  640. if (unlikely(prio == -1))
  641. return -1;
  642. BUG_ON(prio >= CFQ_PRIO_LISTS);
  643. list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
  644. cfqd->cur_prio = prio + 1;
  645. if (cfqd->cur_prio > cfqd->cur_end_prio) {
  646. cfqd->cur_end_prio = cfqd->cur_prio;
  647. cfqd->cur_prio = 0;
  648. }
  649. if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
  650. cfqd->cur_prio = 0;
  651. cfqd->cur_end_prio = 0;
  652. }
  653. return prio;
  654. }
  655. static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
  656. {
  657. struct cfq_queue *cfqq = NULL;
  658. if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) {
  659. /*
  660. * if current list is non-empty, grab first entry. if it is
  661. * empty, get next prio level and grab first entry then if any
  662. * are spliced
  663. */
  664. cfqq = list_entry_cfqq(cfqd->cur_rr.next);
  665. } else if (!list_empty(&cfqd->busy_rr)) {
  666. /*
  667. * If no new queues are available, check if the busy list has
  668. * some before falling back to idle io.
  669. */
  670. cfqq = list_entry_cfqq(cfqd->busy_rr.next);
  671. } else if (!list_empty(&cfqd->idle_rr)) {
  672. /*
  673. * if we have idle queues and no rt or be queues had pending
  674. * requests, either allow immediate service if the grace period
  675. * has passed or arm the idle grace timer
  676. */
  677. unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
  678. if (time_after_eq(jiffies, end))
  679. cfqq = list_entry_cfqq(cfqd->idle_rr.next);
  680. else
  681. mod_timer(&cfqd->idle_class_timer, end);
  682. }
  683. __cfq_set_active_queue(cfqd, cfqq);
  684. return cfqq;
  685. }
  686. #define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))
  687. static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  688. {
  689. struct cfq_io_context *cic;
  690. unsigned long sl;
  691. WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
  692. WARN_ON(cfqq != cfqd->active_queue);
  693. /*
  694. * idle is disabled, either manually or by past process history
  695. */
  696. if (!cfqd->cfq_slice_idle)
  697. return 0;
  698. if (!cfq_cfqq_idle_window(cfqq))
  699. return 0;
  700. /*
  701. * task has exited, don't wait
  702. */
  703. cic = cfqd->active_cic;
  704. if (!cic || !cic->ioc->task)
  705. return 0;
  706. cfq_mark_cfqq_must_dispatch(cfqq);
  707. cfq_mark_cfqq_wait_request(cfqq);
  708. sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
  709. /*
  710. * we don't want to idle for seeks, but we do want to allow
  711. * fair distribution of slice time for a process doing back-to-back
  712. * seeks. so allow a little bit of time for him to submit a new rq
  713. */
  714. if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
  715. sl = min(sl, msecs_to_jiffies(2));
  716. mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
  717. return 1;
  718. }
  719. static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
  720. {
  721. struct cfq_data *cfqd = q->elevator->elevator_data;
  722. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  723. cfq_remove_request(rq);
  724. cfqq->on_dispatch[rq_is_sync(rq)]++;
  725. elv_dispatch_sort(q, rq);
  726. rq = list_entry(q->queue_head.prev, struct request, queuelist);
  727. cfqd->last_sector = rq->sector + rq->nr_sectors;
  728. }
  729. /*
  730. * return expired entry, or NULL to just start from scratch in rbtree
  731. */
  732. static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
  733. {
  734. struct cfq_data *cfqd = cfqq->cfqd;
  735. struct request *rq;
  736. int fifo;
  737. if (cfq_cfqq_fifo_expire(cfqq))
  738. return NULL;
  739. if (list_empty(&cfqq->fifo))
  740. return NULL;
  741. fifo = cfq_cfqq_class_sync(cfqq);
  742. rq = rq_entry_fifo(cfqq->fifo.next);
  743. if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
  744. cfq_mark_cfqq_fifo_expire(cfqq);
  745. return rq;
  746. }
  747. return NULL;
  748. }
  749. static inline int
  750. cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  751. {
  752. const int base_rq = cfqd->cfq_slice_async_rq;
  753. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  754. return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
  755. }
  756. /*
  757. * get next queue for service
  758. */
  759. static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
  760. {
  761. struct cfq_queue *cfqq;
  762. cfqq = cfqd->active_queue;
  763. if (!cfqq)
  764. goto new_queue;
  765. /*
  766. * slice has expired
  767. */
  768. if (!cfq_cfqq_must_dispatch(cfqq) && cfq_slice_used(cfqq))
  769. goto expire;
  770. /*
  771. * if queue has requests, dispatch one. if not, check if
  772. * enough slice is left to wait for one
  773. */
  774. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  775. goto keep_queue;
  776. else if (cfq_cfqq_slice_new(cfqq) || cfq_cfqq_dispatched(cfqq)) {
  777. cfqq = NULL;
  778. goto keep_queue;
  779. } else if (cfq_cfqq_class_sync(cfqq)) {
  780. if (cfq_arm_slice_timer(cfqd, cfqq))
  781. return NULL;
  782. }
  783. expire:
  784. cfq_slice_expired(cfqd, 0);
  785. new_queue:
  786. cfqq = cfq_set_active_queue(cfqd);
  787. keep_queue:
  788. return cfqq;
  789. }
  790. static int
  791. __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  792. int max_dispatch)
  793. {
  794. int dispatched = 0;
  795. BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  796. do {
  797. struct request *rq;
  798. /*
  799. * follow expired path, else get first next available
  800. */
  801. if ((rq = cfq_check_fifo(cfqq)) == NULL)
  802. rq = cfqq->next_rq;
  803. /*
  804. * finally, insert request into driver dispatch list
  805. */
  806. cfq_dispatch_insert(cfqd->queue, rq);
  807. cfqd->dispatch_slice++;
  808. dispatched++;
  809. if (!cfqd->active_cic) {
  810. atomic_inc(&RQ_CIC(rq)->ioc->refcount);
  811. cfqd->active_cic = RQ_CIC(rq);
  812. }
  813. if (RB_EMPTY_ROOT(&cfqq->sort_list))
  814. break;
  815. } while (dispatched < max_dispatch);
  816. /*
  817. * expire an async queue immediately if it has used up its slice. idle
  818. * queue always expire after 1 dispatch round.
  819. */
  820. if ((!cfq_cfqq_sync(cfqq) &&
  821. cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  822. cfq_class_idle(cfqq)) {
  823. cfqq->slice_end = jiffies + 1;
  824. cfq_slice_expired(cfqd, 0);
  825. }
  826. return dispatched;
  827. }
  828. static int
  829. cfq_forced_dispatch_cfqqs(struct list_head *list)
  830. {
  831. struct cfq_queue *cfqq, *next;
  832. int dispatched;
  833. dispatched = 0;
  834. list_for_each_entry_safe(cfqq, next, list, cfq_list) {
  835. while (cfqq->next_rq) {
  836. cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  837. dispatched++;
  838. }
  839. BUG_ON(!list_empty(&cfqq->fifo));
  840. }
  841. return dispatched;
  842. }
  843. static int
  844. cfq_forced_dispatch(struct cfq_data *cfqd)
  845. {
  846. int i, dispatched = 0;
  847. for (i = 0; i < CFQ_PRIO_LISTS; i++)
  848. dispatched += cfq_forced_dispatch_cfqqs(&cfqd->rr_list[i]);
  849. dispatched += cfq_forced_dispatch_cfqqs(&cfqd->busy_rr);
  850. dispatched += cfq_forced_dispatch_cfqqs(&cfqd->cur_rr);
  851. dispatched += cfq_forced_dispatch_cfqqs(&cfqd->idle_rr);
  852. cfq_slice_expired(cfqd, 0);
  853. BUG_ON(cfqd->busy_queues);
  854. return dispatched;
  855. }
  856. static int
  857. cfq_dispatch_requests(request_queue_t *q, int force)
  858. {
  859. struct cfq_data *cfqd = q->elevator->elevator_data;
  860. struct cfq_queue *cfqq, *prev_cfqq;
  861. int dispatched;
  862. if (!cfqd->busy_queues)
  863. return 0;
  864. if (unlikely(force))
  865. return cfq_forced_dispatch(cfqd);
  866. dispatched = 0;
  867. prev_cfqq = NULL;
  868. while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
  869. int max_dispatch;
  870. /*
  871. * Don't repeat dispatch from the previous queue.
  872. */
  873. if (prev_cfqq == cfqq)
  874. break;
  875. cfq_clear_cfqq_must_dispatch(cfqq);
  876. cfq_clear_cfqq_wait_request(cfqq);
  877. del_timer(&cfqd->idle_slice_timer);
  878. max_dispatch = cfqd->cfq_quantum;
  879. if (cfq_class_idle(cfqq))
  880. max_dispatch = 1;
  881. dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
  882. /*
  883. * If the dispatch cfqq has idling enabled and is still
  884. * the active queue, break out.
  885. */
  886. if (cfq_cfqq_idle_window(cfqq) && cfqd->active_queue)
  887. break;
  888. prev_cfqq = cfqq;
  889. }
  890. return dispatched;
  891. }
  892. /*
  893. * task holds one reference to the queue, dropped when task exits. each rq
  894. * in-flight on this queue also holds a reference, dropped when rq is freed.
  895. *
  896. * queue lock must be held here.
  897. */
  898. static void cfq_put_queue(struct cfq_queue *cfqq)
  899. {
  900. struct cfq_data *cfqd = cfqq->cfqd;
  901. BUG_ON(atomic_read(&cfqq->ref) <= 0);
  902. if (!atomic_dec_and_test(&cfqq->ref))
  903. return;
  904. BUG_ON(rb_first(&cfqq->sort_list));
  905. BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
  906. BUG_ON(cfq_cfqq_on_rr(cfqq));
  907. if (unlikely(cfqd->active_queue == cfqq))
  908. __cfq_slice_expired(cfqd, cfqq, 0);
  909. /*
  910. * it's on the empty list and still hashed
  911. */
  912. list_del(&cfqq->cfq_list);
  913. hlist_del(&cfqq->cfq_hash);
  914. kmem_cache_free(cfq_pool, cfqq);
  915. }
  916. static struct cfq_queue *
  917. __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
  918. const int hashval)
  919. {
  920. struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
  921. struct hlist_node *entry;
  922. struct cfq_queue *__cfqq;
  923. hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
  924. const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
  925. if (__cfqq->key == key && (__p == prio || !prio))
  926. return __cfqq;
  927. }
  928. return NULL;
  929. }
  930. static struct cfq_queue *
  931. cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
  932. {
  933. return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
  934. }
  935. static void cfq_free_io_context(struct io_context *ioc)
  936. {
  937. struct cfq_io_context *__cic;
  938. struct rb_node *n;
  939. int freed = 0;
  940. while ((n = rb_first(&ioc->cic_root)) != NULL) {
  941. __cic = rb_entry(n, struct cfq_io_context, rb_node);
  942. rb_erase(&__cic->rb_node, &ioc->cic_root);
  943. kmem_cache_free(cfq_ioc_pool, __cic);
  944. freed++;
  945. }
  946. elv_ioc_count_mod(ioc_count, -freed);
  947. if (ioc_gone && !elv_ioc_count_read(ioc_count))
  948. complete(ioc_gone);
  949. }
  950. static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  951. {
  952. if (unlikely(cfqq == cfqd->active_queue))
  953. __cfq_slice_expired(cfqd, cfqq, 0);
  954. cfq_put_queue(cfqq);
  955. }
  956. static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
  957. struct cfq_io_context *cic)
  958. {
  959. list_del_init(&cic->queue_list);
  960. smp_wmb();
  961. cic->key = NULL;
  962. if (cic->cfqq[ASYNC]) {
  963. cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
  964. cic->cfqq[ASYNC] = NULL;
  965. }
  966. if (cic->cfqq[SYNC]) {
  967. cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
  968. cic->cfqq[SYNC] = NULL;
  969. }
  970. }
  971. /*
  972. * Called with interrupts disabled
  973. */
  974. static void cfq_exit_single_io_context(struct cfq_io_context *cic)
  975. {
  976. struct cfq_data *cfqd = cic->key;
  977. if (cfqd) {
  978. request_queue_t *q = cfqd->queue;
  979. spin_lock_irq(q->queue_lock);
  980. __cfq_exit_single_io_context(cfqd, cic);
  981. spin_unlock_irq(q->queue_lock);
  982. }
  983. }
  984. static void cfq_exit_io_context(struct io_context *ioc)
  985. {
  986. struct cfq_io_context *__cic;
  987. struct rb_node *n;
  988. /*
  989. * put the reference this task is holding to the various queues
  990. */
  991. n = rb_first(&ioc->cic_root);
  992. while (n != NULL) {
  993. __cic = rb_entry(n, struct cfq_io_context, rb_node);
  994. cfq_exit_single_io_context(__cic);
  995. n = rb_next(n);
  996. }
  997. }
  998. static struct cfq_io_context *
  999. cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  1000. {
  1001. struct cfq_io_context *cic;
  1002. cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
  1003. if (cic) {
  1004. memset(cic, 0, sizeof(*cic));
  1005. cic->last_end_request = jiffies;
  1006. INIT_LIST_HEAD(&cic->queue_list);
  1007. cic->dtor = cfq_free_io_context;
  1008. cic->exit = cfq_exit_io_context;
  1009. elv_ioc_count_inc(ioc_count);
  1010. }
  1011. return cic;
  1012. }
  1013. static void cfq_init_prio_data(struct cfq_queue *cfqq)
  1014. {
  1015. struct task_struct *tsk = current;
  1016. int ioprio_class;
  1017. if (!cfq_cfqq_prio_changed(cfqq))
  1018. return;
  1019. ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
  1020. switch (ioprio_class) {
  1021. default:
  1022. printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
  1023. case IOPRIO_CLASS_NONE:
  1024. /*
  1025. * no prio set, place us in the middle of the BE classes
  1026. */
  1027. cfqq->ioprio = task_nice_ioprio(tsk);
  1028. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1029. break;
  1030. case IOPRIO_CLASS_RT:
  1031. cfqq->ioprio = task_ioprio(tsk);
  1032. cfqq->ioprio_class = IOPRIO_CLASS_RT;
  1033. break;
  1034. case IOPRIO_CLASS_BE:
  1035. cfqq->ioprio = task_ioprio(tsk);
  1036. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1037. break;
  1038. case IOPRIO_CLASS_IDLE:
  1039. cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  1040. cfqq->ioprio = 7;
  1041. cfq_clear_cfqq_idle_window(cfqq);
  1042. break;
  1043. }
  1044. /*
  1045. * keep track of original prio settings in case we have to temporarily
  1046. * elevate the priority of this queue
  1047. */
  1048. cfqq->org_ioprio = cfqq->ioprio;
  1049. cfqq->org_ioprio_class = cfqq->ioprio_class;
  1050. cfq_resort_rr_list(cfqq, 0);
  1051. cfq_clear_cfqq_prio_changed(cfqq);
  1052. }
  1053. static inline void changed_ioprio(struct cfq_io_context *cic)
  1054. {
  1055. struct cfq_data *cfqd = cic->key;
  1056. struct cfq_queue *cfqq;
  1057. unsigned long flags;
  1058. if (unlikely(!cfqd))
  1059. return;
  1060. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1061. cfqq = cic->cfqq[ASYNC];
  1062. if (cfqq) {
  1063. struct cfq_queue *new_cfqq;
  1064. new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
  1065. GFP_ATOMIC);
  1066. if (new_cfqq) {
  1067. cic->cfqq[ASYNC] = new_cfqq;
  1068. cfq_put_queue(cfqq);
  1069. }
  1070. }
  1071. cfqq = cic->cfqq[SYNC];
  1072. if (cfqq)
  1073. cfq_mark_cfqq_prio_changed(cfqq);
  1074. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1075. }
  1076. static void cfq_ioc_set_ioprio(struct io_context *ioc)
  1077. {
  1078. struct cfq_io_context *cic;
  1079. struct rb_node *n;
  1080. ioc->ioprio_changed = 0;
  1081. n = rb_first(&ioc->cic_root);
  1082. while (n != NULL) {
  1083. cic = rb_entry(n, struct cfq_io_context, rb_node);
  1084. changed_ioprio(cic);
  1085. n = rb_next(n);
  1086. }
  1087. }
  1088. static struct cfq_queue *
  1089. cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
  1090. gfp_t gfp_mask)
  1091. {
  1092. const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
  1093. struct cfq_queue *cfqq, *new_cfqq = NULL;
  1094. unsigned short ioprio;
  1095. retry:
  1096. ioprio = tsk->ioprio;
  1097. cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
  1098. if (!cfqq) {
  1099. if (new_cfqq) {
  1100. cfqq = new_cfqq;
  1101. new_cfqq = NULL;
  1102. } else if (gfp_mask & __GFP_WAIT) {
  1103. /*
  1104. * Inform the allocator of the fact that we will
  1105. * just repeat this allocation if it fails, to allow
  1106. * the allocator to do whatever it needs to attempt to
  1107. * free memory.
  1108. */
  1109. spin_unlock_irq(cfqd->queue->queue_lock);
  1110. new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
  1111. spin_lock_irq(cfqd->queue->queue_lock);
  1112. goto retry;
  1113. } else {
  1114. cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
  1115. if (!cfqq)
  1116. goto out;
  1117. }
  1118. memset(cfqq, 0, sizeof(*cfqq));
  1119. INIT_HLIST_NODE(&cfqq->cfq_hash);
  1120. INIT_LIST_HEAD(&cfqq->cfq_list);
  1121. INIT_LIST_HEAD(&cfqq->fifo);
  1122. cfqq->key = key;
  1123. hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
  1124. atomic_set(&cfqq->ref, 0);
  1125. cfqq->cfqd = cfqd;
  1126. /*
  1127. * set ->slice_left to allow preemption for a new process
  1128. */
  1129. cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
  1130. cfq_mark_cfqq_idle_window(cfqq);
  1131. cfq_mark_cfqq_prio_changed(cfqq);
  1132. cfq_mark_cfqq_queue_new(cfqq);
  1133. cfq_init_prio_data(cfqq);
  1134. }
  1135. if (new_cfqq)
  1136. kmem_cache_free(cfq_pool, new_cfqq);
  1137. atomic_inc(&cfqq->ref);
  1138. out:
  1139. WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
  1140. return cfqq;
  1141. }
  1142. static void
  1143. cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
  1144. {
  1145. WARN_ON(!list_empty(&cic->queue_list));
  1146. rb_erase(&cic->rb_node, &ioc->cic_root);
  1147. kmem_cache_free(cfq_ioc_pool, cic);
  1148. elv_ioc_count_dec(ioc_count);
  1149. }
  1150. static struct cfq_io_context *
  1151. cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
  1152. {
  1153. struct rb_node *n;
  1154. struct cfq_io_context *cic;
  1155. void *k, *key = cfqd;
  1156. restart:
  1157. n = ioc->cic_root.rb_node;
  1158. while (n) {
  1159. cic = rb_entry(n, struct cfq_io_context, rb_node);
  1160. /* ->key must be copied to avoid race with cfq_exit_queue() */
  1161. k = cic->key;
  1162. if (unlikely(!k)) {
  1163. cfq_drop_dead_cic(ioc, cic);
  1164. goto restart;
  1165. }
  1166. if (key < k)
  1167. n = n->rb_left;
  1168. else if (key > k)
  1169. n = n->rb_right;
  1170. else
  1171. return cic;
  1172. }
  1173. return NULL;
  1174. }
  1175. static inline void
  1176. cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
  1177. struct cfq_io_context *cic)
  1178. {
  1179. struct rb_node **p;
  1180. struct rb_node *parent;
  1181. struct cfq_io_context *__cic;
  1182. unsigned long flags;
  1183. void *k;
  1184. cic->ioc = ioc;
  1185. cic->key = cfqd;
  1186. restart:
  1187. parent = NULL;
  1188. p = &ioc->cic_root.rb_node;
  1189. while (*p) {
  1190. parent = *p;
  1191. __cic = rb_entry(parent, struct cfq_io_context, rb_node);
  1192. /* ->key must be copied to avoid race with cfq_exit_queue() */
  1193. k = __cic->key;
  1194. if (unlikely(!k)) {
  1195. cfq_drop_dead_cic(ioc, __cic);
  1196. goto restart;
  1197. }
  1198. if (cic->key < k)
  1199. p = &(*p)->rb_left;
  1200. else if (cic->key > k)
  1201. p = &(*p)->rb_right;
  1202. else
  1203. BUG();
  1204. }
  1205. rb_link_node(&cic->rb_node, parent, p);
  1206. rb_insert_color(&cic->rb_node, &ioc->cic_root);
  1207. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1208. list_add(&cic->queue_list, &cfqd->cic_list);
  1209. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1210. }
  1211. /*
  1212. * Setup general io context and cfq io context. There can be several cfq
  1213. * io contexts per general io context, if this process is doing io to more
  1214. * than one device managed by cfq.
  1215. */
  1216. static struct cfq_io_context *
  1217. cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  1218. {
  1219. struct io_context *ioc = NULL;
  1220. struct cfq_io_context *cic;
  1221. might_sleep_if(gfp_mask & __GFP_WAIT);
  1222. ioc = get_io_context(gfp_mask, cfqd->queue->node);
  1223. if (!ioc)
  1224. return NULL;
  1225. cic = cfq_cic_rb_lookup(cfqd, ioc);
  1226. if (cic)
  1227. goto out;
  1228. cic = cfq_alloc_io_context(cfqd, gfp_mask);
  1229. if (cic == NULL)
  1230. goto err;
  1231. cfq_cic_link(cfqd, ioc, cic);
  1232. out:
  1233. smp_read_barrier_depends();
  1234. if (unlikely(ioc->ioprio_changed))
  1235. cfq_ioc_set_ioprio(ioc);
  1236. return cic;
  1237. err:
  1238. put_io_context(ioc);
  1239. return NULL;
  1240. }
  1241. static void
  1242. cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
  1243. {
  1244. unsigned long elapsed = jiffies - cic->last_end_request;
  1245. unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
  1246. cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
  1247. cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
  1248. cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
  1249. }
  1250. static void
  1251. cfq_update_io_seektime(struct cfq_io_context *cic, struct request *rq)
  1252. {
  1253. sector_t sdist;
  1254. u64 total;
  1255. if (cic->last_request_pos < rq->sector)
  1256. sdist = rq->sector - cic->last_request_pos;
  1257. else
  1258. sdist = cic->last_request_pos - rq->sector;
  1259. /*
  1260. * Don't allow the seek distance to get too large from the
  1261. * odd fragment, pagein, etc
  1262. */
  1263. if (cic->seek_samples <= 60) /* second&third seek */
  1264. sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
  1265. else
  1266. sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
  1267. cic->seek_samples = (7*cic->seek_samples + 256) / 8;
  1268. cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
  1269. total = cic->seek_total + (cic->seek_samples/2);
  1270. do_div(total, cic->seek_samples);
  1271. cic->seek_mean = (sector_t)total;
  1272. }
  1273. /*
  1274. * Disable idle window if the process thinks too long or seeks so much that
  1275. * it doesn't matter
  1276. */
  1277. static void
  1278. cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1279. struct cfq_io_context *cic)
  1280. {
  1281. int enable_idle = cfq_cfqq_idle_window(cfqq);
  1282. if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
  1283. (cfqd->hw_tag && CIC_SEEKY(cic)))
  1284. enable_idle = 0;
  1285. else if (sample_valid(cic->ttime_samples)) {
  1286. if (cic->ttime_mean > cfqd->cfq_slice_idle)
  1287. enable_idle = 0;
  1288. else
  1289. enable_idle = 1;
  1290. }
  1291. if (enable_idle)
  1292. cfq_mark_cfqq_idle_window(cfqq);
  1293. else
  1294. cfq_clear_cfqq_idle_window(cfqq);
  1295. }
  1296. /*
  1297. * Check if new_cfqq should preempt the currently active queue. Return 0 for
  1298. * no or if we aren't sure, a 1 will cause a preempt.
  1299. */
  1300. static int
  1301. cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  1302. struct request *rq)
  1303. {
  1304. struct cfq_queue *cfqq = cfqd->active_queue;
  1305. if (cfq_class_idle(new_cfqq))
  1306. return 0;
  1307. if (!cfqq)
  1308. return 0;
  1309. if (cfq_class_idle(cfqq))
  1310. return 1;
  1311. if (!cfq_cfqq_wait_request(new_cfqq))
  1312. return 0;
  1313. /*
  1314. * if it doesn't have slice left, forget it
  1315. */
  1316. if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
  1317. return 0;
  1318. /*
  1319. * if the new request is sync, but the currently running queue is
  1320. * not, let the sync request have priority.
  1321. */
  1322. if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
  1323. return 1;
  1324. /*
  1325. * So both queues are sync. Let the new request get disk time if
  1326. * it's a metadata request and the current queue is doing regular IO.
  1327. */
  1328. if (rq_is_meta(rq) && !cfqq->meta_pending)
  1329. return 1;
  1330. return 0;
  1331. }
  1332. /*
  1333. * cfqq preempts the active queue. if we allowed preempt with no slice left,
  1334. * let it have half of its nominal slice.
  1335. */
  1336. static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1337. {
  1338. cfq_slice_expired(cfqd, 1);
  1339. if (!cfqq->slice_left)
  1340. cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;
  1341. /*
  1342. * Put the new queue at the front of the of the current list,
  1343. * so we know that it will be selected next.
  1344. */
  1345. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  1346. list_move(&cfqq->cfq_list, &cfqd->cur_rr);
  1347. cfqq->slice_end = 0;
  1348. cfq_mark_cfqq_slice_new(cfqq);
  1349. }
  1350. /*
  1351. * Called when a new fs request (rq) is added (to cfqq). Check if there's
  1352. * something we should do about it
  1353. */
  1354. static void
  1355. cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1356. struct request *rq)
  1357. {
  1358. struct cfq_io_context *cic = RQ_CIC(rq);
  1359. if (rq_is_meta(rq))
  1360. cfqq->meta_pending++;
  1361. /*
  1362. * check if this request is a better next-serve candidate)) {
  1363. */
  1364. cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
  1365. BUG_ON(!cfqq->next_rq);
  1366. /*
  1367. * we never wait for an async request and we don't allow preemption
  1368. * of an async request. so just return early
  1369. */
  1370. if (!rq_is_sync(rq)) {
  1371. /*
  1372. * sync process issued an async request, if it's waiting
  1373. * then expire it and kick rq handling.
  1374. */
  1375. if (cic == cfqd->active_cic &&
  1376. del_timer(&cfqd->idle_slice_timer)) {
  1377. cfq_slice_expired(cfqd, 0);
  1378. blk_start_queueing(cfqd->queue);
  1379. }
  1380. return;
  1381. }
  1382. cfq_update_io_thinktime(cfqd, cic);
  1383. cfq_update_io_seektime(cic, rq);
  1384. cfq_update_idle_window(cfqd, cfqq, cic);
  1385. cic->last_request_pos = rq->sector + rq->nr_sectors;
  1386. if (cfqq == cfqd->active_queue) {
  1387. /*
  1388. * if we are waiting for a request for this queue, let it rip
  1389. * immediately and flag that we must not expire this queue
  1390. * just now
  1391. */
  1392. if (cfq_cfqq_wait_request(cfqq)) {
  1393. cfq_mark_cfqq_must_dispatch(cfqq);
  1394. del_timer(&cfqd->idle_slice_timer);
  1395. blk_start_queueing(cfqd->queue);
  1396. }
  1397. } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
  1398. /*
  1399. * not the active queue - expire current slice if it is
  1400. * idle and has expired it's mean thinktime or this new queue
  1401. * has some old slice time left and is of higher priority
  1402. */
  1403. cfq_preempt_queue(cfqd, cfqq);
  1404. cfq_mark_cfqq_must_dispatch(cfqq);
  1405. blk_start_queueing(cfqd->queue);
  1406. }
  1407. }
  1408. static void cfq_insert_request(request_queue_t *q, struct request *rq)
  1409. {
  1410. struct cfq_data *cfqd = q->elevator->elevator_data;
  1411. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1412. cfq_init_prio_data(cfqq);
  1413. cfq_add_rq_rb(rq);
  1414. list_add_tail(&rq->queuelist, &cfqq->fifo);
  1415. cfq_rq_enqueued(cfqd, cfqq, rq);
  1416. }
  1417. static void cfq_completed_request(request_queue_t *q, struct request *rq)
  1418. {
  1419. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1420. struct cfq_data *cfqd = cfqq->cfqd;
  1421. const int sync = rq_is_sync(rq);
  1422. unsigned long now;
  1423. now = jiffies;
  1424. WARN_ON(!cfqd->rq_in_driver);
  1425. WARN_ON(!cfqq->on_dispatch[sync]);
  1426. cfqd->rq_in_driver--;
  1427. cfqq->on_dispatch[sync]--;
  1428. cfqq->service_last = now;
  1429. if (!cfq_class_idle(cfqq))
  1430. cfqd->last_end_request = now;
  1431. cfq_resort_rr_list(cfqq, 0);
  1432. if (sync)
  1433. RQ_CIC(rq)->last_end_request = now;
  1434. /*
  1435. * If this is the active queue, check if it needs to be expired,
  1436. * or if we want to idle in case it has no pending requests.
  1437. */
  1438. if (cfqd->active_queue == cfqq) {
  1439. if (cfq_cfqq_slice_new(cfqq)) {
  1440. cfq_set_prio_slice(cfqd, cfqq);
  1441. cfq_clear_cfqq_slice_new(cfqq);
  1442. }
  1443. if (cfq_slice_used(cfqq))
  1444. cfq_slice_expired(cfqd, 0);
  1445. else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list)) {
  1446. if (!cfq_arm_slice_timer(cfqd, cfqq))
  1447. cfq_schedule_dispatch(cfqd);
  1448. }
  1449. }
  1450. }
  1451. /*
  1452. * we temporarily boost lower priority queues if they are holding fs exclusive
  1453. * resources. they are boosted to normal prio (CLASS_BE/4)
  1454. */
  1455. static void cfq_prio_boost(struct cfq_queue *cfqq)
  1456. {
  1457. const int ioprio_class = cfqq->ioprio_class;
  1458. const int ioprio = cfqq->ioprio;
  1459. if (has_fs_excl()) {
  1460. /*
  1461. * boost idle prio on transactions that would lock out other
  1462. * users of the filesystem
  1463. */
  1464. if (cfq_class_idle(cfqq))
  1465. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1466. if (cfqq->ioprio > IOPRIO_NORM)
  1467. cfqq->ioprio = IOPRIO_NORM;
  1468. } else {
  1469. /*
  1470. * check if we need to unboost the queue
  1471. */
  1472. if (cfqq->ioprio_class != cfqq->org_ioprio_class)
  1473. cfqq->ioprio_class = cfqq->org_ioprio_class;
  1474. if (cfqq->ioprio != cfqq->org_ioprio)
  1475. cfqq->ioprio = cfqq->org_ioprio;
  1476. }
  1477. /*
  1478. * refile between round-robin lists if we moved the priority class
  1479. */
  1480. if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio))
  1481. cfq_resort_rr_list(cfqq, 0);
  1482. }
  1483. static inline int __cfq_may_queue(struct cfq_queue *cfqq)
  1484. {
  1485. if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
  1486. !cfq_cfqq_must_alloc_slice(cfqq)) {
  1487. cfq_mark_cfqq_must_alloc_slice(cfqq);
  1488. return ELV_MQUEUE_MUST;
  1489. }
  1490. return ELV_MQUEUE_MAY;
  1491. }
  1492. static int cfq_may_queue(request_queue_t *q, int rw)
  1493. {
  1494. struct cfq_data *cfqd = q->elevator->elevator_data;
  1495. struct task_struct *tsk = current;
  1496. struct cfq_queue *cfqq;
  1497. unsigned int key;
  1498. key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
  1499. /*
  1500. * don't force setup of a queue from here, as a call to may_queue
  1501. * does not necessarily imply that a request actually will be queued.
  1502. * so just lookup a possibly existing queue, or return 'may queue'
  1503. * if that fails
  1504. */
  1505. cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
  1506. if (cfqq) {
  1507. cfq_init_prio_data(cfqq);
  1508. cfq_prio_boost(cfqq);
  1509. return __cfq_may_queue(cfqq);
  1510. }
  1511. return ELV_MQUEUE_MAY;
  1512. }
  1513. /*
  1514. * queue lock held here
  1515. */
  1516. static void cfq_put_request(struct request *rq)
  1517. {
  1518. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1519. if (cfqq) {
  1520. const int rw = rq_data_dir(rq);
  1521. BUG_ON(!cfqq->allocated[rw]);
  1522. cfqq->allocated[rw]--;
  1523. put_io_context(RQ_CIC(rq)->ioc);
  1524. rq->elevator_private = NULL;
  1525. rq->elevator_private2 = NULL;
  1526. cfq_put_queue(cfqq);
  1527. }
  1528. }
  1529. /*
  1530. * Allocate cfq data structures associated with this request.
  1531. */
  1532. static int
  1533. cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
  1534. {
  1535. struct cfq_data *cfqd = q->elevator->elevator_data;
  1536. struct task_struct *tsk = current;
  1537. struct cfq_io_context *cic;
  1538. const int rw = rq_data_dir(rq);
  1539. const int is_sync = rq_is_sync(rq);
  1540. pid_t key = cfq_queue_pid(tsk, rw, is_sync);
  1541. struct cfq_queue *cfqq;
  1542. unsigned long flags;
  1543. might_sleep_if(gfp_mask & __GFP_WAIT);
  1544. cic = cfq_get_io_context(cfqd, gfp_mask);
  1545. spin_lock_irqsave(q->queue_lock, flags);
  1546. if (!cic)
  1547. goto queue_fail;
  1548. if (!cic->cfqq[is_sync]) {
  1549. cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
  1550. if (!cfqq)
  1551. goto queue_fail;
  1552. cic->cfqq[is_sync] = cfqq;
  1553. } else
  1554. cfqq = cic->cfqq[is_sync];
  1555. cfqq->allocated[rw]++;
  1556. cfq_clear_cfqq_must_alloc(cfqq);
  1557. atomic_inc(&cfqq->ref);
  1558. spin_unlock_irqrestore(q->queue_lock, flags);
  1559. rq->elevator_private = cic;
  1560. rq->elevator_private2 = cfqq;
  1561. return 0;
  1562. queue_fail:
  1563. if (cic)
  1564. put_io_context(cic->ioc);
  1565. cfq_schedule_dispatch(cfqd);
  1566. spin_unlock_irqrestore(q->queue_lock, flags);
  1567. return 1;
  1568. }
  1569. static void cfq_kick_queue(struct work_struct *work)
  1570. {
  1571. struct cfq_data *cfqd =
  1572. container_of(work, struct cfq_data, unplug_work);
  1573. request_queue_t *q = cfqd->queue;
  1574. unsigned long flags;
  1575. spin_lock_irqsave(q->queue_lock, flags);
  1576. blk_start_queueing(q);
  1577. spin_unlock_irqrestore(q->queue_lock, flags);
  1578. }
  1579. /*
  1580. * Timer running if the active_queue is currently idling inside its time slice
  1581. */
  1582. static void cfq_idle_slice_timer(unsigned long data)
  1583. {
  1584. struct cfq_data *cfqd = (struct cfq_data *) data;
  1585. struct cfq_queue *cfqq;
  1586. unsigned long flags;
  1587. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1588. if ((cfqq = cfqd->active_queue) != NULL) {
  1589. /*
  1590. * expired
  1591. */
  1592. if (cfq_slice_used(cfqq))
  1593. goto expire;
  1594. /*
  1595. * only expire and reinvoke request handler, if there are
  1596. * other queues with pending requests
  1597. */
  1598. if (!cfqd->busy_queues)
  1599. goto out_cont;
  1600. /*
  1601. * not expired and it has a request pending, let it dispatch
  1602. */
  1603. if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
  1604. cfq_mark_cfqq_must_dispatch(cfqq);
  1605. goto out_kick;
  1606. }
  1607. }
  1608. expire:
  1609. cfq_slice_expired(cfqd, 0);
  1610. out_kick:
  1611. cfq_schedule_dispatch(cfqd);
  1612. out_cont:
  1613. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1614. }
  1615. /*
  1616. * Timer running if an idle class queue is waiting for service
  1617. */
  1618. static void cfq_idle_class_timer(unsigned long data)
  1619. {
  1620. struct cfq_data *cfqd = (struct cfq_data *) data;
  1621. unsigned long flags, end;
  1622. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1623. /*
  1624. * race with a non-idle queue, reset timer
  1625. */
  1626. end = cfqd->last_end_request + CFQ_IDLE_GRACE;
  1627. if (!time_after_eq(jiffies, end))
  1628. mod_timer(&cfqd->idle_class_timer, end);
  1629. else
  1630. cfq_schedule_dispatch(cfqd);
  1631. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1632. }
  1633. static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  1634. {
  1635. del_timer_sync(&cfqd->idle_slice_timer);
  1636. del_timer_sync(&cfqd->idle_class_timer);
  1637. blk_sync_queue(cfqd->queue);
  1638. }
  1639. static void cfq_exit_queue(elevator_t *e)
  1640. {
  1641. struct cfq_data *cfqd = e->elevator_data;
  1642. request_queue_t *q = cfqd->queue;
  1643. cfq_shutdown_timer_wq(cfqd);
  1644. spin_lock_irq(q->queue_lock);
  1645. if (cfqd->active_queue)
  1646. __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
  1647. while (!list_empty(&cfqd->cic_list)) {
  1648. struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
  1649. struct cfq_io_context,
  1650. queue_list);
  1651. __cfq_exit_single_io_context(cfqd, cic);
  1652. }
  1653. spin_unlock_irq(q->queue_lock);
  1654. cfq_shutdown_timer_wq(cfqd);
  1655. kfree(cfqd->cfq_hash);
  1656. kfree(cfqd);
  1657. }
  1658. static void *cfq_init_queue(request_queue_t *q)
  1659. {
  1660. struct cfq_data *cfqd;
  1661. int i;
  1662. cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
  1663. if (!cfqd)
  1664. return NULL;
  1665. memset(cfqd, 0, sizeof(*cfqd));
  1666. for (i = 0; i < CFQ_PRIO_LISTS; i++)
  1667. INIT_LIST_HEAD(&cfqd->rr_list[i]);
  1668. INIT_LIST_HEAD(&cfqd->busy_rr);
  1669. INIT_LIST_HEAD(&cfqd->cur_rr);
  1670. INIT_LIST_HEAD(&cfqd->idle_rr);
  1671. INIT_LIST_HEAD(&cfqd->cic_list);
  1672. cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
  1673. if (!cfqd->cfq_hash)
  1674. goto out_free;
  1675. for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
  1676. INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
  1677. cfqd->queue = q;
  1678. init_timer(&cfqd->idle_slice_timer);
  1679. cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  1680. cfqd->idle_slice_timer.data = (unsigned long) cfqd;
  1681. init_timer(&cfqd->idle_class_timer);
  1682. cfqd->idle_class_timer.function = cfq_idle_class_timer;
  1683. cfqd->idle_class_timer.data = (unsigned long) cfqd;
  1684. INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
  1685. cfqd->cfq_quantum = cfq_quantum;
  1686. cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  1687. cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
  1688. cfqd->cfq_back_max = cfq_back_max;
  1689. cfqd->cfq_back_penalty = cfq_back_penalty;
  1690. cfqd->cfq_slice[0] = cfq_slice_async;
  1691. cfqd->cfq_slice[1] = cfq_slice_sync;
  1692. cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  1693. cfqd->cfq_slice_idle = cfq_slice_idle;
  1694. return cfqd;
  1695. out_free:
  1696. kfree(cfqd);
  1697. return NULL;
  1698. }
  1699. static void cfq_slab_kill(void)
  1700. {
  1701. if (cfq_pool)
  1702. kmem_cache_destroy(cfq_pool);
  1703. if (cfq_ioc_pool)
  1704. kmem_cache_destroy(cfq_ioc_pool);
  1705. }
  1706. static int __init cfq_slab_setup(void)
  1707. {
  1708. cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
  1709. NULL, NULL);
  1710. if (!cfq_pool)
  1711. goto fail;
  1712. cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
  1713. sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
  1714. if (!cfq_ioc_pool)
  1715. goto fail;
  1716. return 0;
  1717. fail:
  1718. cfq_slab_kill();
  1719. return -ENOMEM;
  1720. }
  1721. /*
  1722. * sysfs parts below -->
  1723. */
  1724. static ssize_t
  1725. cfq_var_show(unsigned int var, char *page)
  1726. {
  1727. return sprintf(page, "%d\n", var);
  1728. }
  1729. static ssize_t
  1730. cfq_var_store(unsigned int *var, const char *page, size_t count)
  1731. {
  1732. char *p = (char *) page;
  1733. *var = simple_strtoul(p, &p, 10);
  1734. return count;
  1735. }
  1736. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  1737. static ssize_t __FUNC(elevator_t *e, char *page) \
  1738. { \
  1739. struct cfq_data *cfqd = e->elevator_data; \
  1740. unsigned int __data = __VAR; \
  1741. if (__CONV) \
  1742. __data = jiffies_to_msecs(__data); \
  1743. return cfq_var_show(__data, (page)); \
  1744. }
  1745. SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
  1746. SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  1747. SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
  1748. SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  1749. SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
  1750. SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
  1751. SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  1752. SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  1753. SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
  1754. #undef SHOW_FUNCTION
  1755. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  1756. static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
  1757. { \
  1758. struct cfq_data *cfqd = e->elevator_data; \
  1759. unsigned int __data; \
  1760. int ret = cfq_var_store(&__data, (page), count); \
  1761. if (__data < (MIN)) \
  1762. __data = (MIN); \
  1763. else if (__data > (MAX)) \
  1764. __data = (MAX); \
  1765. if (__CONV) \
  1766. *(__PTR) = msecs_to_jiffies(__data); \
  1767. else \
  1768. *(__PTR) = __data; \
  1769. return ret; \
  1770. }
  1771. STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
  1772. STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
  1773. STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
  1774. STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
  1775. STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
  1776. STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
  1777. STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  1778. STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
  1779. STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
  1780. #undef STORE_FUNCTION
  1781. #define CFQ_ATTR(name) \
  1782. __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  1783. static struct elv_fs_entry cfq_attrs[] = {
  1784. CFQ_ATTR(quantum),
  1785. CFQ_ATTR(fifo_expire_sync),
  1786. CFQ_ATTR(fifo_expire_async),
  1787. CFQ_ATTR(back_seek_max),
  1788. CFQ_ATTR(back_seek_penalty),
  1789. CFQ_ATTR(slice_sync),
  1790. CFQ_ATTR(slice_async),
  1791. CFQ_ATTR(slice_async_rq),
  1792. CFQ_ATTR(slice_idle),
  1793. __ATTR_NULL
  1794. };
  1795. static struct elevator_type iosched_cfq = {
  1796. .ops = {
  1797. .elevator_merge_fn = cfq_merge,
  1798. .elevator_merged_fn = cfq_merged_request,
  1799. .elevator_merge_req_fn = cfq_merged_requests,
  1800. .elevator_allow_merge_fn = cfq_allow_merge,
  1801. .elevator_dispatch_fn = cfq_dispatch_requests,
  1802. .elevator_add_req_fn = cfq_insert_request,
  1803. .elevator_activate_req_fn = cfq_activate_request,
  1804. .elevator_deactivate_req_fn = cfq_deactivate_request,
  1805. .elevator_queue_empty_fn = cfq_queue_empty,
  1806. .elevator_completed_req_fn = cfq_completed_request,
  1807. .elevator_former_req_fn = elv_rb_former_request,
  1808. .elevator_latter_req_fn = elv_rb_latter_request,
  1809. .elevator_set_req_fn = cfq_set_request,
  1810. .elevator_put_req_fn = cfq_put_request,
  1811. .elevator_may_queue_fn = cfq_may_queue,
  1812. .elevator_init_fn = cfq_init_queue,
  1813. .elevator_exit_fn = cfq_exit_queue,
  1814. .trim = cfq_free_io_context,
  1815. },
  1816. .elevator_attrs = cfq_attrs,
  1817. .elevator_name = "cfq",
  1818. .elevator_owner = THIS_MODULE,
  1819. };
  1820. static int __init cfq_init(void)
  1821. {
  1822. int ret;
  1823. /*
  1824. * could be 0 on HZ < 1000 setups
  1825. */
  1826. if (!cfq_slice_async)
  1827. cfq_slice_async = 1;
  1828. if (!cfq_slice_idle)
  1829. cfq_slice_idle = 1;
  1830. if (cfq_slab_setup())
  1831. return -ENOMEM;
  1832. ret = elv_register(&iosched_cfq);
  1833. if (ret)
  1834. cfq_slab_kill();
  1835. return ret;
  1836. }
  1837. static void __exit cfq_exit(void)
  1838. {
  1839. DECLARE_COMPLETION_ONSTACK(all_gone);
  1840. elv_unregister(&iosched_cfq);
  1841. ioc_gone = &all_gone;
  1842. /* ioc_gone's update must be visible before reading ioc_count */
  1843. smp_wmb();
  1844. if (elv_ioc_count_read(ioc_count))
  1845. wait_for_completion(ioc_gone);
  1846. synchronize_rcu();
  1847. cfq_slab_kill();
  1848. }
  1849. module_init(cfq_init);
  1850. module_exit(cfq_exit);
  1851. MODULE_AUTHOR("Jens Axboe");
  1852. MODULE_LICENSE("GPL");
  1853. MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");