cfq-iosched.c 54 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268
  1. /*
  2. * CFQ, or complete fairness queueing, disk scheduler.
  3. *
  4. * Based on ideas from a previously unfinished io
  5. * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  6. *
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/elevator.h>
  12. #include <linux/hash.h>
  13. #include <linux/rbtree.h>
  14. #include <linux/ioprio.h>
  15. /*
  16. * tunables
  17. */
  18. static const int cfq_quantum = 4; /* max queue in one round of service */
  19. static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  20. static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
  21. static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
  22. static const int cfq_slice_sync = HZ / 10;
  23. static int cfq_slice_async = HZ / 25;
  24. static const int cfq_slice_async_rq = 2;
  25. static int cfq_slice_idle = HZ / 125;
  26. /*
  27. * grace period before allowing idle class to get disk access
  28. */
  29. #define CFQ_IDLE_GRACE (HZ / 10)
  30. /*
  31. * below this threshold, we consider thinktime immediate
  32. */
  33. #define CFQ_MIN_TT (2)
  34. #define CFQ_SLICE_SCALE (5)
  35. #define CFQ_KEY_ASYNC (0)
  36. /*
  37. * for the hash of cfqq inside the cfqd
  38. */
  39. #define CFQ_QHASH_SHIFT 6
  40. #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
  41. #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
  42. #define RQ_CFQQ(rq) ((rq)->elevator_private2)
  43. static struct kmem_cache *cfq_pool;
  44. static struct kmem_cache *cfq_ioc_pool;
  45. static DEFINE_PER_CPU(unsigned long, ioc_count);
  46. static struct completion *ioc_gone;
  47. #define CFQ_PRIO_LISTS IOPRIO_BE_NR
  48. #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  49. #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  50. #define ASYNC (0)
  51. #define SYNC (1)
  52. #define cfq_cfqq_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC)
  53. #define sample_valid(samples) ((samples) > 80)
  54. /*
  55. * Most of our rbtree usage is for sorting with min extraction, so
  56. * if we cache the leftmost node we don't have to walk down the tree
  57. * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  58. * move this into the elevator for the rq sorting as well.
  59. */
  60. struct cfq_rb_root {
  61. struct rb_root rb;
  62. struct rb_node *left;
  63. };
  64. #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
  65. /*
  66. * Per block device queue structure
  67. */
  68. struct cfq_data {
  69. request_queue_t *queue;
  70. /*
  71. * rr list of queues with requests and the count of them
  72. */
  73. struct cfq_rb_root service_tree;
  74. unsigned int busy_queues;
  75. /*
  76. * cfqq lookup hash
  77. */
  78. struct hlist_head *cfq_hash;
  79. int rq_in_driver;
  80. int hw_tag;
  81. /*
  82. * idle window management
  83. */
  84. struct timer_list idle_slice_timer;
  85. struct work_struct unplug_work;
  86. struct cfq_queue *active_queue;
  87. struct cfq_io_context *active_cic;
  88. unsigned int dispatch_slice;
  89. struct timer_list idle_class_timer;
  90. sector_t last_position;
  91. unsigned long last_end_request;
  92. /*
  93. * tunables, see top of file
  94. */
  95. unsigned int cfq_quantum;
  96. unsigned int cfq_fifo_expire[2];
  97. unsigned int cfq_back_penalty;
  98. unsigned int cfq_back_max;
  99. unsigned int cfq_slice[2];
  100. unsigned int cfq_slice_async_rq;
  101. unsigned int cfq_slice_idle;
  102. struct list_head cic_list;
  103. sector_t new_seek_mean;
  104. u64 new_seek_total;
  105. };
  106. /*
  107. * Per process-grouping structure
  108. */
  109. struct cfq_queue {
  110. /* reference count */
  111. atomic_t ref;
  112. /* parent cfq_data */
  113. struct cfq_data *cfqd;
  114. /* cfqq lookup hash */
  115. struct hlist_node cfq_hash;
  116. /* hash key */
  117. unsigned int key;
  118. /* service_tree member */
  119. struct rb_node rb_node;
  120. /* service_tree key */
  121. unsigned long rb_key;
  122. /* sorted list of pending requests */
  123. struct rb_root sort_list;
  124. /* if fifo isn't expired, next request to serve */
  125. struct request *next_rq;
  126. /* requests queued in sort_list */
  127. int queued[2];
  128. /* currently allocated requests */
  129. int allocated[2];
  130. /* pending metadata requests */
  131. int meta_pending;
  132. /* fifo list of requests in sort_list */
  133. struct list_head fifo;
  134. unsigned long slice_end;
  135. long slice_resid;
  136. /* number of requests that are on the dispatch list or inside driver */
  137. int dispatched;
  138. /* io prio of this group */
  139. unsigned short ioprio, org_ioprio;
  140. unsigned short ioprio_class, org_ioprio_class;
  141. /* various state flags, see below */
  142. unsigned int flags;
  143. sector_t last_request_pos;
  144. };
  145. enum cfqq_state_flags {
  146. CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
  147. CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
  148. CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
  149. CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
  150. CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
  151. CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
  152. CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
  153. CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
  154. CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
  155. CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
  156. };
  157. #define CFQ_CFQQ_FNS(name) \
  158. static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
  159. { \
  160. cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
  161. } \
  162. static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
  163. { \
  164. cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
  165. } \
  166. static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
  167. { \
  168. return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
  169. }
  170. CFQ_CFQQ_FNS(on_rr);
  171. CFQ_CFQQ_FNS(wait_request);
  172. CFQ_CFQQ_FNS(must_alloc);
  173. CFQ_CFQQ_FNS(must_alloc_slice);
  174. CFQ_CFQQ_FNS(must_dispatch);
  175. CFQ_CFQQ_FNS(fifo_expire);
  176. CFQ_CFQQ_FNS(idle_window);
  177. CFQ_CFQQ_FNS(prio_changed);
  178. CFQ_CFQQ_FNS(queue_new);
  179. CFQ_CFQQ_FNS(slice_new);
  180. #undef CFQ_CFQQ_FNS
  181. static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
  182. static void cfq_dispatch_insert(request_queue_t *, struct request *);
  183. static struct cfq_queue *cfq_get_queue(struct cfq_data *, unsigned int, struct task_struct *, gfp_t);
  184. /*
  185. * scheduler run of queue, if there are requests pending and no one in the
  186. * driver that will restart queueing
  187. */
  188. static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
  189. {
  190. if (cfqd->busy_queues)
  191. kblockd_schedule_work(&cfqd->unplug_work);
  192. }
  193. static int cfq_queue_empty(request_queue_t *q)
  194. {
  195. struct cfq_data *cfqd = q->elevator->elevator_data;
  196. return !cfqd->busy_queues;
  197. }
  198. static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
  199. {
  200. /*
  201. * Use the per-process queue, for read requests and syncronous writes
  202. */
  203. if (!(rw & REQ_RW) || is_sync)
  204. return task->pid;
  205. return CFQ_KEY_ASYNC;
  206. }
  207. /*
  208. * Scale schedule slice based on io priority. Use the sync time slice only
  209. * if a queue is marked sync and has sync io queued. A sync queue with async
  210. * io only, should not get full sync slice length.
  211. */
  212. static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
  213. unsigned short prio)
  214. {
  215. const int base_slice = cfqd->cfq_slice[sync];
  216. WARN_ON(prio >= IOPRIO_BE_NR);
  217. return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
  218. }
  219. static inline int
  220. cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  221. {
  222. return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
  223. }
  224. static inline void
  225. cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  226. {
  227. cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
  228. }
  229. /*
  230. * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
  231. * isn't valid until the first request from the dispatch is activated
  232. * and the slice time set.
  233. */
  234. static inline int cfq_slice_used(struct cfq_queue *cfqq)
  235. {
  236. if (cfq_cfqq_slice_new(cfqq))
  237. return 0;
  238. if (time_before(jiffies, cfqq->slice_end))
  239. return 0;
  240. return 1;
  241. }
  242. /*
  243. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  244. * We choose the request that is closest to the head right now. Distance
  245. * behind the head is penalized and only allowed to a certain extent.
  246. */
  247. static struct request *
  248. cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
  249. {
  250. sector_t last, s1, s2, d1 = 0, d2 = 0;
  251. unsigned long back_max;
  252. #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  253. #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  254. unsigned wrap = 0; /* bit mask: requests behind the disk head? */
  255. if (rq1 == NULL || rq1 == rq2)
  256. return rq2;
  257. if (rq2 == NULL)
  258. return rq1;
  259. if (rq_is_sync(rq1) && !rq_is_sync(rq2))
  260. return rq1;
  261. else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
  262. return rq2;
  263. if (rq_is_meta(rq1) && !rq_is_meta(rq2))
  264. return rq1;
  265. else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
  266. return rq2;
  267. s1 = rq1->sector;
  268. s2 = rq2->sector;
  269. last = cfqd->last_position;
  270. /*
  271. * by definition, 1KiB is 2 sectors
  272. */
  273. back_max = cfqd->cfq_back_max * 2;
  274. /*
  275. * Strict one way elevator _except_ in the case where we allow
  276. * short backward seeks which are biased as twice the cost of a
  277. * similar forward seek.
  278. */
  279. if (s1 >= last)
  280. d1 = s1 - last;
  281. else if (s1 + back_max >= last)
  282. d1 = (last - s1) * cfqd->cfq_back_penalty;
  283. else
  284. wrap |= CFQ_RQ1_WRAP;
  285. if (s2 >= last)
  286. d2 = s2 - last;
  287. else if (s2 + back_max >= last)
  288. d2 = (last - s2) * cfqd->cfq_back_penalty;
  289. else
  290. wrap |= CFQ_RQ2_WRAP;
  291. /* Found required data */
  292. /*
  293. * By doing switch() on the bit mask "wrap" we avoid having to
  294. * check two variables for all permutations: --> faster!
  295. */
  296. switch (wrap) {
  297. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  298. if (d1 < d2)
  299. return rq1;
  300. else if (d2 < d1)
  301. return rq2;
  302. else {
  303. if (s1 >= s2)
  304. return rq1;
  305. else
  306. return rq2;
  307. }
  308. case CFQ_RQ2_WRAP:
  309. return rq1;
  310. case CFQ_RQ1_WRAP:
  311. return rq2;
  312. case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
  313. default:
  314. /*
  315. * Since both rqs are wrapped,
  316. * start with the one that's further behind head
  317. * (--> only *one* back seek required),
  318. * since back seek takes more time than forward.
  319. */
  320. if (s1 <= s2)
  321. return rq1;
  322. else
  323. return rq2;
  324. }
  325. }
  326. /*
  327. * The below is leftmost cache rbtree addon
  328. */
  329. static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
  330. {
  331. if (!root->left)
  332. root->left = rb_first(&root->rb);
  333. return root->left;
  334. }
  335. static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
  336. {
  337. if (root->left == n)
  338. root->left = NULL;
  339. rb_erase(n, &root->rb);
  340. RB_CLEAR_NODE(n);
  341. }
  342. /*
  343. * would be nice to take fifo expire time into account as well
  344. */
  345. static struct request *
  346. cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  347. struct request *last)
  348. {
  349. struct rb_node *rbnext = rb_next(&last->rb_node);
  350. struct rb_node *rbprev = rb_prev(&last->rb_node);
  351. struct request *next = NULL, *prev = NULL;
  352. BUG_ON(RB_EMPTY_NODE(&last->rb_node));
  353. if (rbprev)
  354. prev = rb_entry_rq(rbprev);
  355. if (rbnext)
  356. next = rb_entry_rq(rbnext);
  357. else {
  358. rbnext = rb_first(&cfqq->sort_list);
  359. if (rbnext && rbnext != &last->rb_node)
  360. next = rb_entry_rq(rbnext);
  361. }
  362. return cfq_choose_req(cfqd, next, prev);
  363. }
  364. static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  365. struct cfq_queue *cfqq)
  366. {
  367. /*
  368. * just an approximation, should be ok.
  369. */
  370. return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
  371. cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
  372. }
  373. /*
  374. * The cfqd->service_tree holds all pending cfq_queue's that have
  375. * requests waiting to be processed. It is sorted in the order that
  376. * we will service the queues.
  377. */
  378. static void cfq_service_tree_add(struct cfq_data *cfqd,
  379. struct cfq_queue *cfqq, int add_front)
  380. {
  381. struct rb_node **p = &cfqd->service_tree.rb.rb_node;
  382. struct rb_node *parent = NULL;
  383. unsigned long rb_key;
  384. int left;
  385. if (!add_front) {
  386. rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
  387. rb_key += cfqq->slice_resid;
  388. cfqq->slice_resid = 0;
  389. } else
  390. rb_key = 0;
  391. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  392. /*
  393. * same position, nothing more to do
  394. */
  395. if (rb_key == cfqq->rb_key)
  396. return;
  397. cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
  398. }
  399. left = 1;
  400. while (*p) {
  401. struct cfq_queue *__cfqq;
  402. struct rb_node **n;
  403. parent = *p;
  404. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  405. /*
  406. * sort RT queues first, we always want to give
  407. * preference to them. IDLE queues goes to the back.
  408. * after that, sort on the next service time.
  409. */
  410. if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
  411. n = &(*p)->rb_left;
  412. else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
  413. n = &(*p)->rb_right;
  414. else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
  415. n = &(*p)->rb_left;
  416. else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
  417. n = &(*p)->rb_right;
  418. else if (rb_key < __cfqq->rb_key)
  419. n = &(*p)->rb_left;
  420. else
  421. n = &(*p)->rb_right;
  422. if (n == &(*p)->rb_right)
  423. left = 0;
  424. p = n;
  425. }
  426. if (left)
  427. cfqd->service_tree.left = &cfqq->rb_node;
  428. cfqq->rb_key = rb_key;
  429. rb_link_node(&cfqq->rb_node, parent, p);
  430. rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
  431. }
  432. /*
  433. * Update cfqq's position in the service tree.
  434. */
  435. static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  436. {
  437. /*
  438. * Resorting requires the cfqq to be on the RR list already.
  439. */
  440. if (cfq_cfqq_on_rr(cfqq))
  441. cfq_service_tree_add(cfqd, cfqq, 0);
  442. }
  443. /*
  444. * add to busy list of queues for service, trying to be fair in ordering
  445. * the pending list according to last request service
  446. */
  447. static inline void
  448. cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  449. {
  450. BUG_ON(cfq_cfqq_on_rr(cfqq));
  451. cfq_mark_cfqq_on_rr(cfqq);
  452. cfqd->busy_queues++;
  453. cfq_resort_rr_list(cfqd, cfqq);
  454. }
  455. /*
  456. * Called when the cfqq no longer has requests pending, remove it from
  457. * the service tree.
  458. */
  459. static inline void
  460. cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  461. {
  462. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  463. cfq_clear_cfqq_on_rr(cfqq);
  464. if (!RB_EMPTY_NODE(&cfqq->rb_node))
  465. cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
  466. BUG_ON(!cfqd->busy_queues);
  467. cfqd->busy_queues--;
  468. }
  469. /*
  470. * rb tree support functions
  471. */
  472. static inline void cfq_del_rq_rb(struct request *rq)
  473. {
  474. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  475. struct cfq_data *cfqd = cfqq->cfqd;
  476. const int sync = rq_is_sync(rq);
  477. BUG_ON(!cfqq->queued[sync]);
  478. cfqq->queued[sync]--;
  479. elv_rb_del(&cfqq->sort_list, rq);
  480. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  481. cfq_del_cfqq_rr(cfqd, cfqq);
  482. }
  483. static void cfq_add_rq_rb(struct request *rq)
  484. {
  485. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  486. struct cfq_data *cfqd = cfqq->cfqd;
  487. struct request *__alias;
  488. cfqq->queued[rq_is_sync(rq)]++;
  489. /*
  490. * looks a little odd, but the first insert might return an alias.
  491. * if that happens, put the alias on the dispatch list
  492. */
  493. while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
  494. cfq_dispatch_insert(cfqd->queue, __alias);
  495. if (!cfq_cfqq_on_rr(cfqq))
  496. cfq_add_cfqq_rr(cfqd, cfqq);
  497. /*
  498. * check if this request is a better next-serve candidate
  499. */
  500. cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
  501. BUG_ON(!cfqq->next_rq);
  502. }
  503. static inline void
  504. cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
  505. {
  506. elv_rb_del(&cfqq->sort_list, rq);
  507. cfqq->queued[rq_is_sync(rq)]--;
  508. cfq_add_rq_rb(rq);
  509. }
  510. static struct request *
  511. cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
  512. {
  513. struct task_struct *tsk = current;
  514. pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
  515. struct cfq_queue *cfqq;
  516. cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
  517. if (cfqq) {
  518. sector_t sector = bio->bi_sector + bio_sectors(bio);
  519. return elv_rb_find(&cfqq->sort_list, sector);
  520. }
  521. return NULL;
  522. }
  523. static void cfq_activate_request(request_queue_t *q, struct request *rq)
  524. {
  525. struct cfq_data *cfqd = q->elevator->elevator_data;
  526. cfqd->rq_in_driver++;
  527. /*
  528. * If the depth is larger 1, it really could be queueing. But lets
  529. * make the mark a little higher - idling could still be good for
  530. * low queueing, and a low queueing number could also just indicate
  531. * a SCSI mid layer like behaviour where limit+1 is often seen.
  532. */
  533. if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
  534. cfqd->hw_tag = 1;
  535. cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
  536. }
  537. static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
  538. {
  539. struct cfq_data *cfqd = q->elevator->elevator_data;
  540. WARN_ON(!cfqd->rq_in_driver);
  541. cfqd->rq_in_driver--;
  542. }
  543. static void cfq_remove_request(struct request *rq)
  544. {
  545. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  546. if (cfqq->next_rq == rq)
  547. cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
  548. list_del_init(&rq->queuelist);
  549. cfq_del_rq_rb(rq);
  550. if (rq_is_meta(rq)) {
  551. WARN_ON(!cfqq->meta_pending);
  552. cfqq->meta_pending--;
  553. }
  554. }
  555. static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
  556. {
  557. struct cfq_data *cfqd = q->elevator->elevator_data;
  558. struct request *__rq;
  559. __rq = cfq_find_rq_fmerge(cfqd, bio);
  560. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  561. *req = __rq;
  562. return ELEVATOR_FRONT_MERGE;
  563. }
  564. return ELEVATOR_NO_MERGE;
  565. }
  566. static void cfq_merged_request(request_queue_t *q, struct request *req,
  567. int type)
  568. {
  569. if (type == ELEVATOR_FRONT_MERGE) {
  570. struct cfq_queue *cfqq = RQ_CFQQ(req);
  571. cfq_reposition_rq_rb(cfqq, req);
  572. }
  573. }
  574. static void
  575. cfq_merged_requests(request_queue_t *q, struct request *rq,
  576. struct request *next)
  577. {
  578. /*
  579. * reposition in fifo if next is older than rq
  580. */
  581. if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  582. time_before(next->start_time, rq->start_time))
  583. list_move(&rq->queuelist, &next->queuelist);
  584. cfq_remove_request(next);
  585. }
  586. static int cfq_allow_merge(request_queue_t *q, struct request *rq,
  587. struct bio *bio)
  588. {
  589. struct cfq_data *cfqd = q->elevator->elevator_data;
  590. const int rw = bio_data_dir(bio);
  591. struct cfq_queue *cfqq;
  592. pid_t key;
  593. /*
  594. * Disallow merge of a sync bio into an async request.
  595. */
  596. if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
  597. return 0;
  598. /*
  599. * Lookup the cfqq that this bio will be queued with. Allow
  600. * merge only if rq is queued there.
  601. */
  602. key = cfq_queue_pid(current, rw, bio_sync(bio));
  603. cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
  604. if (cfqq == RQ_CFQQ(rq))
  605. return 1;
  606. return 0;
  607. }
  608. static inline void
  609. __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  610. {
  611. if (cfqq) {
  612. /*
  613. * stop potential idle class queues waiting service
  614. */
  615. del_timer(&cfqd->idle_class_timer);
  616. cfqq->slice_end = 0;
  617. cfq_clear_cfqq_must_alloc_slice(cfqq);
  618. cfq_clear_cfqq_fifo_expire(cfqq);
  619. cfq_mark_cfqq_slice_new(cfqq);
  620. cfq_clear_cfqq_queue_new(cfqq);
  621. }
  622. cfqd->active_queue = cfqq;
  623. }
  624. /*
  625. * current cfqq expired its slice (or was too idle), select new one
  626. */
  627. static void
  628. __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  629. int preempted, int timed_out)
  630. {
  631. if (cfq_cfqq_wait_request(cfqq))
  632. del_timer(&cfqd->idle_slice_timer);
  633. cfq_clear_cfqq_must_dispatch(cfqq);
  634. cfq_clear_cfqq_wait_request(cfqq);
  635. /*
  636. * store what was left of this slice, if the queue idled out
  637. * or was preempted
  638. */
  639. if (timed_out && !cfq_cfqq_slice_new(cfqq))
  640. cfqq->slice_resid = cfqq->slice_end - jiffies;
  641. cfq_resort_rr_list(cfqd, cfqq);
  642. if (cfqq == cfqd->active_queue)
  643. cfqd->active_queue = NULL;
  644. if (cfqd->active_cic) {
  645. put_io_context(cfqd->active_cic->ioc);
  646. cfqd->active_cic = NULL;
  647. }
  648. cfqd->dispatch_slice = 0;
  649. }
  650. static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted,
  651. int timed_out)
  652. {
  653. struct cfq_queue *cfqq = cfqd->active_queue;
  654. if (cfqq)
  655. __cfq_slice_expired(cfqd, cfqq, preempted, timed_out);
  656. }
  657. /*
  658. * Get next queue for service. Unless we have a queue preemption,
  659. * we'll simply select the first cfqq in the service tree.
  660. */
  661. static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
  662. {
  663. struct cfq_queue *cfqq;
  664. struct rb_node *n;
  665. if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
  666. return NULL;
  667. n = cfq_rb_first(&cfqd->service_tree);
  668. cfqq = rb_entry(n, struct cfq_queue, rb_node);
  669. if (cfq_class_idle(cfqq)) {
  670. unsigned long end;
  671. /*
  672. * if we have idle queues and no rt or be queues had
  673. * pending requests, either allow immediate service if
  674. * the grace period has passed or arm the idle grace
  675. * timer
  676. */
  677. end = cfqd->last_end_request + CFQ_IDLE_GRACE;
  678. if (time_before(jiffies, end)) {
  679. mod_timer(&cfqd->idle_class_timer, end);
  680. cfqq = NULL;
  681. }
  682. }
  683. return cfqq;
  684. }
  685. /*
  686. * Get and set a new active queue for service.
  687. */
  688. static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
  689. {
  690. struct cfq_queue *cfqq;
  691. cfqq = cfq_get_next_queue(cfqd);
  692. __cfq_set_active_queue(cfqd, cfqq);
  693. return cfqq;
  694. }
  695. static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
  696. struct request *rq)
  697. {
  698. if (rq->sector >= cfqd->last_position)
  699. return rq->sector - cfqd->last_position;
  700. else
  701. return cfqd->last_position - rq->sector;
  702. }
  703. static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
  704. {
  705. struct cfq_io_context *cic = cfqd->active_cic;
  706. if (!sample_valid(cic->seek_samples))
  707. return 0;
  708. return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
  709. }
  710. static int cfq_close_cooperator(struct cfq_data *cfq_data,
  711. struct cfq_queue *cfqq)
  712. {
  713. /*
  714. * We should notice if some of the queues are cooperating, eg
  715. * working closely on the same area of the disk. In that case,
  716. * we can group them together and don't waste time idling.
  717. */
  718. return 0;
  719. }
  720. #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
  721. static void cfq_arm_slice_timer(struct cfq_data *cfqd)
  722. {
  723. struct cfq_queue *cfqq = cfqd->active_queue;
  724. struct cfq_io_context *cic;
  725. unsigned long sl;
  726. WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
  727. WARN_ON(cfq_cfqq_slice_new(cfqq));
  728. /*
  729. * idle is disabled, either manually or by past process history
  730. */
  731. if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
  732. return;
  733. /*
  734. * task has exited, don't wait
  735. */
  736. cic = cfqd->active_cic;
  737. if (!cic || !cic->ioc->task)
  738. return;
  739. /*
  740. * See if this prio level has a good candidate
  741. */
  742. if (cfq_close_cooperator(cfqd, cfqq) &&
  743. (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
  744. return;
  745. cfq_mark_cfqq_must_dispatch(cfqq);
  746. cfq_mark_cfqq_wait_request(cfqq);
  747. /*
  748. * we don't want to idle for seeks, but we do want to allow
  749. * fair distribution of slice time for a process doing back-to-back
  750. * seeks. so allow a little bit of time for him to submit a new rq
  751. */
  752. sl = cfqd->cfq_slice_idle;
  753. if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
  754. sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
  755. mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
  756. }
  757. /*
  758. * Move request from internal lists to the request queue dispatch list.
  759. */
  760. static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
  761. {
  762. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  763. cfq_remove_request(rq);
  764. cfqq->dispatched++;
  765. elv_dispatch_sort(q, rq);
  766. }
  767. /*
  768. * return expired entry, or NULL to just start from scratch in rbtree
  769. */
  770. static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
  771. {
  772. struct cfq_data *cfqd = cfqq->cfqd;
  773. struct request *rq;
  774. int fifo;
  775. if (cfq_cfqq_fifo_expire(cfqq))
  776. return NULL;
  777. cfq_mark_cfqq_fifo_expire(cfqq);
  778. if (list_empty(&cfqq->fifo))
  779. return NULL;
  780. fifo = cfq_cfqq_sync(cfqq);
  781. rq = rq_entry_fifo(cfqq->fifo.next);
  782. if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
  783. return NULL;
  784. return rq;
  785. }
  786. static inline int
  787. cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  788. {
  789. const int base_rq = cfqd->cfq_slice_async_rq;
  790. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  791. return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
  792. }
  793. /*
  794. * Select a queue for service. If we have a current active queue,
  795. * check whether to continue servicing it, or retrieve and set a new one.
  796. */
  797. static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
  798. {
  799. struct cfq_queue *cfqq;
  800. cfqq = cfqd->active_queue;
  801. if (!cfqq)
  802. goto new_queue;
  803. /*
  804. * The active queue has run out of time, expire it and select new.
  805. */
  806. if (cfq_slice_used(cfqq))
  807. goto expire;
  808. /*
  809. * The active queue has requests and isn't expired, allow it to
  810. * dispatch.
  811. */
  812. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  813. goto keep_queue;
  814. /*
  815. * No requests pending. If the active queue still has requests in
  816. * flight or is idling for a new request, allow either of these
  817. * conditions to happen (or time out) before selecting a new queue.
  818. */
  819. if (cfqq->dispatched || timer_pending(&cfqd->idle_slice_timer)) {
  820. cfqq = NULL;
  821. goto keep_queue;
  822. }
  823. expire:
  824. cfq_slice_expired(cfqd, 0, 0);
  825. new_queue:
  826. cfqq = cfq_set_active_queue(cfqd);
  827. keep_queue:
  828. return cfqq;
  829. }
  830. /*
  831. * Dispatch some requests from cfqq, moving them to the request queue
  832. * dispatch list.
  833. */
  834. static int
  835. __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  836. int max_dispatch)
  837. {
  838. int dispatched = 0;
  839. BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  840. do {
  841. struct request *rq;
  842. /*
  843. * follow expired path, else get first next available
  844. */
  845. if ((rq = cfq_check_fifo(cfqq)) == NULL)
  846. rq = cfqq->next_rq;
  847. /*
  848. * finally, insert request into driver dispatch list
  849. */
  850. cfq_dispatch_insert(cfqd->queue, rq);
  851. cfqd->dispatch_slice++;
  852. dispatched++;
  853. if (!cfqd->active_cic) {
  854. atomic_inc(&RQ_CIC(rq)->ioc->refcount);
  855. cfqd->active_cic = RQ_CIC(rq);
  856. }
  857. if (RB_EMPTY_ROOT(&cfqq->sort_list))
  858. break;
  859. } while (dispatched < max_dispatch);
  860. /*
  861. * expire an async queue immediately if it has used up its slice. idle
  862. * queue always expire after 1 dispatch round.
  863. */
  864. if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
  865. cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  866. cfq_class_idle(cfqq))) {
  867. cfqq->slice_end = jiffies + 1;
  868. cfq_slice_expired(cfqd, 0, 0);
  869. }
  870. return dispatched;
  871. }
  872. static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
  873. {
  874. int dispatched = 0;
  875. while (cfqq->next_rq) {
  876. cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  877. dispatched++;
  878. }
  879. BUG_ON(!list_empty(&cfqq->fifo));
  880. return dispatched;
  881. }
  882. /*
  883. * Drain our current requests. Used for barriers and when switching
  884. * io schedulers on-the-fly.
  885. */
  886. static int cfq_forced_dispatch(struct cfq_data *cfqd)
  887. {
  888. int dispatched = 0;
  889. struct rb_node *n;
  890. while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
  891. struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
  892. dispatched += __cfq_forced_dispatch_cfqq(cfqq);
  893. }
  894. cfq_slice_expired(cfqd, 0, 0);
  895. BUG_ON(cfqd->busy_queues);
  896. return dispatched;
  897. }
  898. static int cfq_dispatch_requests(request_queue_t *q, int force)
  899. {
  900. struct cfq_data *cfqd = q->elevator->elevator_data;
  901. struct cfq_queue *cfqq;
  902. int dispatched;
  903. if (!cfqd->busy_queues)
  904. return 0;
  905. if (unlikely(force))
  906. return cfq_forced_dispatch(cfqd);
  907. dispatched = 0;
  908. while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
  909. int max_dispatch;
  910. if (cfqd->busy_queues > 1) {
  911. /*
  912. * So we have dispatched before in this round, if the
  913. * next queue has idling enabled (must be sync), don't
  914. * allow it service until the previous have completed.
  915. */
  916. if (cfqd->rq_in_driver && cfq_cfqq_idle_window(cfqq) &&
  917. dispatched)
  918. break;
  919. if (cfqq->dispatched >= cfqd->cfq_quantum)
  920. break;
  921. }
  922. cfq_clear_cfqq_must_dispatch(cfqq);
  923. cfq_clear_cfqq_wait_request(cfqq);
  924. del_timer(&cfqd->idle_slice_timer);
  925. max_dispatch = cfqd->cfq_quantum;
  926. if (cfq_class_idle(cfqq))
  927. max_dispatch = 1;
  928. dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
  929. }
  930. return dispatched;
  931. }
  932. /*
  933. * task holds one reference to the queue, dropped when task exits. each rq
  934. * in-flight on this queue also holds a reference, dropped when rq is freed.
  935. *
  936. * queue lock must be held here.
  937. */
  938. static void cfq_put_queue(struct cfq_queue *cfqq)
  939. {
  940. struct cfq_data *cfqd = cfqq->cfqd;
  941. BUG_ON(atomic_read(&cfqq->ref) <= 0);
  942. if (!atomic_dec_and_test(&cfqq->ref))
  943. return;
  944. BUG_ON(rb_first(&cfqq->sort_list));
  945. BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
  946. BUG_ON(cfq_cfqq_on_rr(cfqq));
  947. if (unlikely(cfqd->active_queue == cfqq)) {
  948. __cfq_slice_expired(cfqd, cfqq, 0, 0);
  949. cfq_schedule_dispatch(cfqd);
  950. }
  951. /*
  952. * it's on the empty list and still hashed
  953. */
  954. hlist_del(&cfqq->cfq_hash);
  955. kmem_cache_free(cfq_pool, cfqq);
  956. }
  957. static struct cfq_queue *
  958. __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
  959. const int hashval)
  960. {
  961. struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
  962. struct hlist_node *entry;
  963. struct cfq_queue *__cfqq;
  964. hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
  965. const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
  966. if (__cfqq->key == key && (__p == prio || !prio))
  967. return __cfqq;
  968. }
  969. return NULL;
  970. }
  971. static struct cfq_queue *
  972. cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
  973. {
  974. return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
  975. }
  976. static void cfq_free_io_context(struct io_context *ioc)
  977. {
  978. struct cfq_io_context *__cic;
  979. struct rb_node *n;
  980. int freed = 0;
  981. while ((n = rb_first(&ioc->cic_root)) != NULL) {
  982. __cic = rb_entry(n, struct cfq_io_context, rb_node);
  983. rb_erase(&__cic->rb_node, &ioc->cic_root);
  984. kmem_cache_free(cfq_ioc_pool, __cic);
  985. freed++;
  986. }
  987. elv_ioc_count_mod(ioc_count, -freed);
  988. if (ioc_gone && !elv_ioc_count_read(ioc_count))
  989. complete(ioc_gone);
  990. }
  991. static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  992. {
  993. if (unlikely(cfqq == cfqd->active_queue)) {
  994. __cfq_slice_expired(cfqd, cfqq, 0, 0);
  995. cfq_schedule_dispatch(cfqd);
  996. }
  997. cfq_put_queue(cfqq);
  998. }
  999. static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
  1000. struct cfq_io_context *cic)
  1001. {
  1002. list_del_init(&cic->queue_list);
  1003. smp_wmb();
  1004. cic->key = NULL;
  1005. if (cic->cfqq[ASYNC]) {
  1006. cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
  1007. cic->cfqq[ASYNC] = NULL;
  1008. }
  1009. if (cic->cfqq[SYNC]) {
  1010. cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
  1011. cic->cfqq[SYNC] = NULL;
  1012. }
  1013. }
  1014. static void cfq_exit_single_io_context(struct cfq_io_context *cic)
  1015. {
  1016. struct cfq_data *cfqd = cic->key;
  1017. if (cfqd) {
  1018. request_queue_t *q = cfqd->queue;
  1019. spin_lock_irq(q->queue_lock);
  1020. __cfq_exit_single_io_context(cfqd, cic);
  1021. spin_unlock_irq(q->queue_lock);
  1022. }
  1023. }
  1024. /*
  1025. * The process that ioc belongs to has exited, we need to clean up
  1026. * and put the internal structures we have that belongs to that process.
  1027. */
  1028. static void cfq_exit_io_context(struct io_context *ioc)
  1029. {
  1030. struct cfq_io_context *__cic;
  1031. struct rb_node *n;
  1032. /*
  1033. * put the reference this task is holding to the various queues
  1034. */
  1035. n = rb_first(&ioc->cic_root);
  1036. while (n != NULL) {
  1037. __cic = rb_entry(n, struct cfq_io_context, rb_node);
  1038. cfq_exit_single_io_context(__cic);
  1039. n = rb_next(n);
  1040. }
  1041. }
  1042. static struct cfq_io_context *
  1043. cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  1044. {
  1045. struct cfq_io_context *cic;
  1046. cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
  1047. if (cic) {
  1048. memset(cic, 0, sizeof(*cic));
  1049. cic->last_end_request = jiffies;
  1050. INIT_LIST_HEAD(&cic->queue_list);
  1051. cic->dtor = cfq_free_io_context;
  1052. cic->exit = cfq_exit_io_context;
  1053. elv_ioc_count_inc(ioc_count);
  1054. }
  1055. return cic;
  1056. }
  1057. static void cfq_init_prio_data(struct cfq_queue *cfqq)
  1058. {
  1059. struct task_struct *tsk = current;
  1060. int ioprio_class;
  1061. if (!cfq_cfqq_prio_changed(cfqq))
  1062. return;
  1063. ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
  1064. switch (ioprio_class) {
  1065. default:
  1066. printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
  1067. case IOPRIO_CLASS_NONE:
  1068. /*
  1069. * no prio set, place us in the middle of the BE classes
  1070. */
  1071. cfqq->ioprio = task_nice_ioprio(tsk);
  1072. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1073. break;
  1074. case IOPRIO_CLASS_RT:
  1075. cfqq->ioprio = task_ioprio(tsk);
  1076. cfqq->ioprio_class = IOPRIO_CLASS_RT;
  1077. break;
  1078. case IOPRIO_CLASS_BE:
  1079. cfqq->ioprio = task_ioprio(tsk);
  1080. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1081. break;
  1082. case IOPRIO_CLASS_IDLE:
  1083. cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  1084. cfqq->ioprio = 7;
  1085. cfq_clear_cfqq_idle_window(cfqq);
  1086. break;
  1087. }
  1088. /*
  1089. * keep track of original prio settings in case we have to temporarily
  1090. * elevate the priority of this queue
  1091. */
  1092. cfqq->org_ioprio = cfqq->ioprio;
  1093. cfqq->org_ioprio_class = cfqq->ioprio_class;
  1094. cfq_clear_cfqq_prio_changed(cfqq);
  1095. }
  1096. static inline void changed_ioprio(struct cfq_io_context *cic)
  1097. {
  1098. struct cfq_data *cfqd = cic->key;
  1099. struct cfq_queue *cfqq;
  1100. unsigned long flags;
  1101. if (unlikely(!cfqd))
  1102. return;
  1103. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1104. cfqq = cic->cfqq[ASYNC];
  1105. if (cfqq) {
  1106. struct cfq_queue *new_cfqq;
  1107. new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
  1108. GFP_ATOMIC);
  1109. if (new_cfqq) {
  1110. cic->cfqq[ASYNC] = new_cfqq;
  1111. cfq_put_queue(cfqq);
  1112. }
  1113. }
  1114. cfqq = cic->cfqq[SYNC];
  1115. if (cfqq)
  1116. cfq_mark_cfqq_prio_changed(cfqq);
  1117. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1118. }
  1119. static void cfq_ioc_set_ioprio(struct io_context *ioc)
  1120. {
  1121. struct cfq_io_context *cic;
  1122. struct rb_node *n;
  1123. ioc->ioprio_changed = 0;
  1124. n = rb_first(&ioc->cic_root);
  1125. while (n != NULL) {
  1126. cic = rb_entry(n, struct cfq_io_context, rb_node);
  1127. changed_ioprio(cic);
  1128. n = rb_next(n);
  1129. }
  1130. }
  1131. static struct cfq_queue *
  1132. cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
  1133. gfp_t gfp_mask)
  1134. {
  1135. const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
  1136. struct cfq_queue *cfqq, *new_cfqq = NULL;
  1137. unsigned short ioprio;
  1138. retry:
  1139. ioprio = tsk->ioprio;
  1140. cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
  1141. if (!cfqq) {
  1142. if (new_cfqq) {
  1143. cfqq = new_cfqq;
  1144. new_cfqq = NULL;
  1145. } else if (gfp_mask & __GFP_WAIT) {
  1146. /*
  1147. * Inform the allocator of the fact that we will
  1148. * just repeat this allocation if it fails, to allow
  1149. * the allocator to do whatever it needs to attempt to
  1150. * free memory.
  1151. */
  1152. spin_unlock_irq(cfqd->queue->queue_lock);
  1153. new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
  1154. spin_lock_irq(cfqd->queue->queue_lock);
  1155. goto retry;
  1156. } else {
  1157. cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
  1158. if (!cfqq)
  1159. goto out;
  1160. }
  1161. memset(cfqq, 0, sizeof(*cfqq));
  1162. INIT_HLIST_NODE(&cfqq->cfq_hash);
  1163. RB_CLEAR_NODE(&cfqq->rb_node);
  1164. INIT_LIST_HEAD(&cfqq->fifo);
  1165. cfqq->key = key;
  1166. hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
  1167. atomic_set(&cfqq->ref, 0);
  1168. cfqq->cfqd = cfqd;
  1169. if (key != CFQ_KEY_ASYNC)
  1170. cfq_mark_cfqq_idle_window(cfqq);
  1171. cfq_mark_cfqq_prio_changed(cfqq);
  1172. cfq_mark_cfqq_queue_new(cfqq);
  1173. cfq_init_prio_data(cfqq);
  1174. }
  1175. if (new_cfqq)
  1176. kmem_cache_free(cfq_pool, new_cfqq);
  1177. atomic_inc(&cfqq->ref);
  1178. out:
  1179. WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
  1180. return cfqq;
  1181. }
  1182. /*
  1183. * We drop cfq io contexts lazily, so we may find a dead one.
  1184. */
  1185. static void
  1186. cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
  1187. {
  1188. WARN_ON(!list_empty(&cic->queue_list));
  1189. rb_erase(&cic->rb_node, &ioc->cic_root);
  1190. kmem_cache_free(cfq_ioc_pool, cic);
  1191. elv_ioc_count_dec(ioc_count);
  1192. }
  1193. static struct cfq_io_context *
  1194. cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
  1195. {
  1196. struct rb_node *n;
  1197. struct cfq_io_context *cic;
  1198. void *k, *key = cfqd;
  1199. restart:
  1200. n = ioc->cic_root.rb_node;
  1201. while (n) {
  1202. cic = rb_entry(n, struct cfq_io_context, rb_node);
  1203. /* ->key must be copied to avoid race with cfq_exit_queue() */
  1204. k = cic->key;
  1205. if (unlikely(!k)) {
  1206. cfq_drop_dead_cic(ioc, cic);
  1207. goto restart;
  1208. }
  1209. if (key < k)
  1210. n = n->rb_left;
  1211. else if (key > k)
  1212. n = n->rb_right;
  1213. else
  1214. return cic;
  1215. }
  1216. return NULL;
  1217. }
  1218. static inline void
  1219. cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
  1220. struct cfq_io_context *cic)
  1221. {
  1222. struct rb_node **p;
  1223. struct rb_node *parent;
  1224. struct cfq_io_context *__cic;
  1225. unsigned long flags;
  1226. void *k;
  1227. cic->ioc = ioc;
  1228. cic->key = cfqd;
  1229. restart:
  1230. parent = NULL;
  1231. p = &ioc->cic_root.rb_node;
  1232. while (*p) {
  1233. parent = *p;
  1234. __cic = rb_entry(parent, struct cfq_io_context, rb_node);
  1235. /* ->key must be copied to avoid race with cfq_exit_queue() */
  1236. k = __cic->key;
  1237. if (unlikely(!k)) {
  1238. cfq_drop_dead_cic(ioc, __cic);
  1239. goto restart;
  1240. }
  1241. if (cic->key < k)
  1242. p = &(*p)->rb_left;
  1243. else if (cic->key > k)
  1244. p = &(*p)->rb_right;
  1245. else
  1246. BUG();
  1247. }
  1248. rb_link_node(&cic->rb_node, parent, p);
  1249. rb_insert_color(&cic->rb_node, &ioc->cic_root);
  1250. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1251. list_add(&cic->queue_list, &cfqd->cic_list);
  1252. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1253. }
  1254. /*
  1255. * Setup general io context and cfq io context. There can be several cfq
  1256. * io contexts per general io context, if this process is doing io to more
  1257. * than one device managed by cfq.
  1258. */
  1259. static struct cfq_io_context *
  1260. cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  1261. {
  1262. struct io_context *ioc = NULL;
  1263. struct cfq_io_context *cic;
  1264. might_sleep_if(gfp_mask & __GFP_WAIT);
  1265. ioc = get_io_context(gfp_mask, cfqd->queue->node);
  1266. if (!ioc)
  1267. return NULL;
  1268. cic = cfq_cic_rb_lookup(cfqd, ioc);
  1269. if (cic)
  1270. goto out;
  1271. cic = cfq_alloc_io_context(cfqd, gfp_mask);
  1272. if (cic == NULL)
  1273. goto err;
  1274. cfq_cic_link(cfqd, ioc, cic);
  1275. out:
  1276. smp_read_barrier_depends();
  1277. if (unlikely(ioc->ioprio_changed))
  1278. cfq_ioc_set_ioprio(ioc);
  1279. return cic;
  1280. err:
  1281. put_io_context(ioc);
  1282. return NULL;
  1283. }
  1284. static void
  1285. cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
  1286. {
  1287. unsigned long elapsed = jiffies - cic->last_end_request;
  1288. unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
  1289. cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
  1290. cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
  1291. cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
  1292. }
  1293. static void
  1294. cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
  1295. struct request *rq)
  1296. {
  1297. sector_t sdist;
  1298. u64 total;
  1299. if (cic->last_request_pos < rq->sector)
  1300. sdist = rq->sector - cic->last_request_pos;
  1301. else
  1302. sdist = cic->last_request_pos - rq->sector;
  1303. if (!cic->seek_samples) {
  1304. cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
  1305. cfqd->new_seek_mean = cfqd->new_seek_total / 256;
  1306. }
  1307. /*
  1308. * Don't allow the seek distance to get too large from the
  1309. * odd fragment, pagein, etc
  1310. */
  1311. if (cic->seek_samples <= 60) /* second&third seek */
  1312. sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
  1313. else
  1314. sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
  1315. cic->seek_samples = (7*cic->seek_samples + 256) / 8;
  1316. cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
  1317. total = cic->seek_total + (cic->seek_samples/2);
  1318. do_div(total, cic->seek_samples);
  1319. cic->seek_mean = (sector_t)total;
  1320. }
  1321. /*
  1322. * Disable idle window if the process thinks too long or seeks so much that
  1323. * it doesn't matter
  1324. */
  1325. static void
  1326. cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1327. struct cfq_io_context *cic)
  1328. {
  1329. int enable_idle = cfq_cfqq_idle_window(cfqq);
  1330. if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
  1331. (cfqd->hw_tag && CIC_SEEKY(cic)))
  1332. enable_idle = 0;
  1333. else if (sample_valid(cic->ttime_samples)) {
  1334. if (cic->ttime_mean > cfqd->cfq_slice_idle)
  1335. enable_idle = 0;
  1336. else
  1337. enable_idle = 1;
  1338. }
  1339. if (enable_idle)
  1340. cfq_mark_cfqq_idle_window(cfqq);
  1341. else
  1342. cfq_clear_cfqq_idle_window(cfqq);
  1343. }
  1344. /*
  1345. * Check if new_cfqq should preempt the currently active queue. Return 0 for
  1346. * no or if we aren't sure, a 1 will cause a preempt.
  1347. */
  1348. static int
  1349. cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  1350. struct request *rq)
  1351. {
  1352. struct cfq_queue *cfqq;
  1353. cfqq = cfqd->active_queue;
  1354. if (!cfqq)
  1355. return 0;
  1356. if (cfq_slice_used(cfqq))
  1357. return 1;
  1358. if (cfq_class_idle(new_cfqq))
  1359. return 0;
  1360. if (cfq_class_idle(cfqq))
  1361. return 1;
  1362. /*
  1363. * if the new request is sync, but the currently running queue is
  1364. * not, let the sync request have priority.
  1365. */
  1366. if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
  1367. return 1;
  1368. /*
  1369. * So both queues are sync. Let the new request get disk time if
  1370. * it's a metadata request and the current queue is doing regular IO.
  1371. */
  1372. if (rq_is_meta(rq) && !cfqq->meta_pending)
  1373. return 1;
  1374. if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
  1375. return 0;
  1376. /*
  1377. * if this request is as-good as one we would expect from the
  1378. * current cfqq, let it preempt
  1379. */
  1380. if (cfq_rq_close(cfqd, rq))
  1381. return 1;
  1382. return 0;
  1383. }
  1384. /*
  1385. * cfqq preempts the active queue. if we allowed preempt with no slice left,
  1386. * let it have half of its nominal slice.
  1387. */
  1388. static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1389. {
  1390. cfq_slice_expired(cfqd, 1, 1);
  1391. /*
  1392. * Put the new queue at the front of the of the current list,
  1393. * so we know that it will be selected next.
  1394. */
  1395. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  1396. cfq_service_tree_add(cfqd, cfqq, 1);
  1397. cfqq->slice_end = 0;
  1398. cfq_mark_cfqq_slice_new(cfqq);
  1399. }
  1400. /*
  1401. * Called when a new fs request (rq) is added (to cfqq). Check if there's
  1402. * something we should do about it
  1403. */
  1404. static void
  1405. cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1406. struct request *rq)
  1407. {
  1408. struct cfq_io_context *cic = RQ_CIC(rq);
  1409. if (rq_is_meta(rq))
  1410. cfqq->meta_pending++;
  1411. cfq_update_io_thinktime(cfqd, cic);
  1412. cfq_update_io_seektime(cfqd, cic, rq);
  1413. cfq_update_idle_window(cfqd, cfqq, cic);
  1414. cic->last_request_pos = rq->sector + rq->nr_sectors;
  1415. cfqq->last_request_pos = cic->last_request_pos;
  1416. if (cfqq == cfqd->active_queue) {
  1417. /*
  1418. * if we are waiting for a request for this queue, let it rip
  1419. * immediately and flag that we must not expire this queue
  1420. * just now
  1421. */
  1422. if (cfq_cfqq_wait_request(cfqq)) {
  1423. cfq_mark_cfqq_must_dispatch(cfqq);
  1424. del_timer(&cfqd->idle_slice_timer);
  1425. blk_start_queueing(cfqd->queue);
  1426. }
  1427. } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
  1428. /*
  1429. * not the active queue - expire current slice if it is
  1430. * idle and has expired it's mean thinktime or this new queue
  1431. * has some old slice time left and is of higher priority
  1432. */
  1433. cfq_preempt_queue(cfqd, cfqq);
  1434. cfq_mark_cfqq_must_dispatch(cfqq);
  1435. blk_start_queueing(cfqd->queue);
  1436. }
  1437. }
  1438. static void cfq_insert_request(request_queue_t *q, struct request *rq)
  1439. {
  1440. struct cfq_data *cfqd = q->elevator->elevator_data;
  1441. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1442. cfq_init_prio_data(cfqq);
  1443. cfq_add_rq_rb(rq);
  1444. list_add_tail(&rq->queuelist, &cfqq->fifo);
  1445. cfq_rq_enqueued(cfqd, cfqq, rq);
  1446. }
  1447. static void cfq_completed_request(request_queue_t *q, struct request *rq)
  1448. {
  1449. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1450. struct cfq_data *cfqd = cfqq->cfqd;
  1451. const int sync = rq_is_sync(rq);
  1452. unsigned long now;
  1453. now = jiffies;
  1454. WARN_ON(!cfqd->rq_in_driver);
  1455. WARN_ON(!cfqq->dispatched);
  1456. cfqd->rq_in_driver--;
  1457. cfqq->dispatched--;
  1458. if (!cfq_class_idle(cfqq))
  1459. cfqd->last_end_request = now;
  1460. if (sync)
  1461. RQ_CIC(rq)->last_end_request = now;
  1462. /*
  1463. * If this is the active queue, check if it needs to be expired,
  1464. * or if we want to idle in case it has no pending requests.
  1465. */
  1466. if (cfqd->active_queue == cfqq) {
  1467. if (cfq_cfqq_slice_new(cfqq)) {
  1468. cfq_set_prio_slice(cfqd, cfqq);
  1469. cfq_clear_cfqq_slice_new(cfqq);
  1470. }
  1471. if (cfq_slice_used(cfqq))
  1472. cfq_slice_expired(cfqd, 0, 1);
  1473. else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
  1474. cfq_arm_slice_timer(cfqd);
  1475. }
  1476. if (!cfqd->rq_in_driver)
  1477. cfq_schedule_dispatch(cfqd);
  1478. }
  1479. /*
  1480. * we temporarily boost lower priority queues if they are holding fs exclusive
  1481. * resources. they are boosted to normal prio (CLASS_BE/4)
  1482. */
  1483. static void cfq_prio_boost(struct cfq_queue *cfqq)
  1484. {
  1485. if (has_fs_excl()) {
  1486. /*
  1487. * boost idle prio on transactions that would lock out other
  1488. * users of the filesystem
  1489. */
  1490. if (cfq_class_idle(cfqq))
  1491. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1492. if (cfqq->ioprio > IOPRIO_NORM)
  1493. cfqq->ioprio = IOPRIO_NORM;
  1494. } else {
  1495. /*
  1496. * check if we need to unboost the queue
  1497. */
  1498. if (cfqq->ioprio_class != cfqq->org_ioprio_class)
  1499. cfqq->ioprio_class = cfqq->org_ioprio_class;
  1500. if (cfqq->ioprio != cfqq->org_ioprio)
  1501. cfqq->ioprio = cfqq->org_ioprio;
  1502. }
  1503. }
  1504. static inline int __cfq_may_queue(struct cfq_queue *cfqq)
  1505. {
  1506. if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
  1507. !cfq_cfqq_must_alloc_slice(cfqq)) {
  1508. cfq_mark_cfqq_must_alloc_slice(cfqq);
  1509. return ELV_MQUEUE_MUST;
  1510. }
  1511. return ELV_MQUEUE_MAY;
  1512. }
  1513. static int cfq_may_queue(request_queue_t *q, int rw)
  1514. {
  1515. struct cfq_data *cfqd = q->elevator->elevator_data;
  1516. struct task_struct *tsk = current;
  1517. struct cfq_queue *cfqq;
  1518. unsigned int key;
  1519. key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
  1520. /*
  1521. * don't force setup of a queue from here, as a call to may_queue
  1522. * does not necessarily imply that a request actually will be queued.
  1523. * so just lookup a possibly existing queue, or return 'may queue'
  1524. * if that fails
  1525. */
  1526. cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
  1527. if (cfqq) {
  1528. cfq_init_prio_data(cfqq);
  1529. cfq_prio_boost(cfqq);
  1530. return __cfq_may_queue(cfqq);
  1531. }
  1532. return ELV_MQUEUE_MAY;
  1533. }
  1534. /*
  1535. * queue lock held here
  1536. */
  1537. static void cfq_put_request(struct request *rq)
  1538. {
  1539. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1540. if (cfqq) {
  1541. const int rw = rq_data_dir(rq);
  1542. BUG_ON(!cfqq->allocated[rw]);
  1543. cfqq->allocated[rw]--;
  1544. put_io_context(RQ_CIC(rq)->ioc);
  1545. rq->elevator_private = NULL;
  1546. rq->elevator_private2 = NULL;
  1547. cfq_put_queue(cfqq);
  1548. }
  1549. }
  1550. /*
  1551. * Allocate cfq data structures associated with this request.
  1552. */
  1553. static int
  1554. cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
  1555. {
  1556. struct cfq_data *cfqd = q->elevator->elevator_data;
  1557. struct task_struct *tsk = current;
  1558. struct cfq_io_context *cic;
  1559. const int rw = rq_data_dir(rq);
  1560. const int is_sync = rq_is_sync(rq);
  1561. pid_t key = cfq_queue_pid(tsk, rw, is_sync);
  1562. struct cfq_queue *cfqq;
  1563. unsigned long flags;
  1564. might_sleep_if(gfp_mask & __GFP_WAIT);
  1565. cic = cfq_get_io_context(cfqd, gfp_mask);
  1566. spin_lock_irqsave(q->queue_lock, flags);
  1567. if (!cic)
  1568. goto queue_fail;
  1569. if (!cic->cfqq[is_sync]) {
  1570. cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
  1571. if (!cfqq)
  1572. goto queue_fail;
  1573. cic->cfqq[is_sync] = cfqq;
  1574. } else
  1575. cfqq = cic->cfqq[is_sync];
  1576. cfqq->allocated[rw]++;
  1577. cfq_clear_cfqq_must_alloc(cfqq);
  1578. atomic_inc(&cfqq->ref);
  1579. spin_unlock_irqrestore(q->queue_lock, flags);
  1580. rq->elevator_private = cic;
  1581. rq->elevator_private2 = cfqq;
  1582. return 0;
  1583. queue_fail:
  1584. if (cic)
  1585. put_io_context(cic->ioc);
  1586. cfq_schedule_dispatch(cfqd);
  1587. spin_unlock_irqrestore(q->queue_lock, flags);
  1588. return 1;
  1589. }
  1590. static void cfq_kick_queue(struct work_struct *work)
  1591. {
  1592. struct cfq_data *cfqd =
  1593. container_of(work, struct cfq_data, unplug_work);
  1594. request_queue_t *q = cfqd->queue;
  1595. unsigned long flags;
  1596. spin_lock_irqsave(q->queue_lock, flags);
  1597. blk_start_queueing(q);
  1598. spin_unlock_irqrestore(q->queue_lock, flags);
  1599. }
  1600. /*
  1601. * Timer running if the active_queue is currently idling inside its time slice
  1602. */
  1603. static void cfq_idle_slice_timer(unsigned long data)
  1604. {
  1605. struct cfq_data *cfqd = (struct cfq_data *) data;
  1606. struct cfq_queue *cfqq;
  1607. unsigned long flags;
  1608. int timed_out = 1;
  1609. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1610. if ((cfqq = cfqd->active_queue) != NULL) {
  1611. timed_out = 0;
  1612. /*
  1613. * expired
  1614. */
  1615. if (cfq_slice_used(cfqq))
  1616. goto expire;
  1617. /*
  1618. * only expire and reinvoke request handler, if there are
  1619. * other queues with pending requests
  1620. */
  1621. if (!cfqd->busy_queues)
  1622. goto out_cont;
  1623. /*
  1624. * not expired and it has a request pending, let it dispatch
  1625. */
  1626. if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
  1627. cfq_mark_cfqq_must_dispatch(cfqq);
  1628. goto out_kick;
  1629. }
  1630. }
  1631. expire:
  1632. cfq_slice_expired(cfqd, 0, timed_out);
  1633. out_kick:
  1634. cfq_schedule_dispatch(cfqd);
  1635. out_cont:
  1636. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1637. }
  1638. /*
  1639. * Timer running if an idle class queue is waiting for service
  1640. */
  1641. static void cfq_idle_class_timer(unsigned long data)
  1642. {
  1643. struct cfq_data *cfqd = (struct cfq_data *) data;
  1644. unsigned long flags, end;
  1645. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1646. /*
  1647. * race with a non-idle queue, reset timer
  1648. */
  1649. end = cfqd->last_end_request + CFQ_IDLE_GRACE;
  1650. if (!time_after_eq(jiffies, end))
  1651. mod_timer(&cfqd->idle_class_timer, end);
  1652. else
  1653. cfq_schedule_dispatch(cfqd);
  1654. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1655. }
  1656. static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  1657. {
  1658. del_timer_sync(&cfqd->idle_slice_timer);
  1659. del_timer_sync(&cfqd->idle_class_timer);
  1660. blk_sync_queue(cfqd->queue);
  1661. }
  1662. static void cfq_exit_queue(elevator_t *e)
  1663. {
  1664. struct cfq_data *cfqd = e->elevator_data;
  1665. request_queue_t *q = cfqd->queue;
  1666. cfq_shutdown_timer_wq(cfqd);
  1667. spin_lock_irq(q->queue_lock);
  1668. if (cfqd->active_queue)
  1669. __cfq_slice_expired(cfqd, cfqd->active_queue, 0, 0);
  1670. while (!list_empty(&cfqd->cic_list)) {
  1671. struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
  1672. struct cfq_io_context,
  1673. queue_list);
  1674. __cfq_exit_single_io_context(cfqd, cic);
  1675. }
  1676. spin_unlock_irq(q->queue_lock);
  1677. cfq_shutdown_timer_wq(cfqd);
  1678. kfree(cfqd->cfq_hash);
  1679. kfree(cfqd);
  1680. }
  1681. static void *cfq_init_queue(request_queue_t *q)
  1682. {
  1683. struct cfq_data *cfqd;
  1684. int i;
  1685. cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
  1686. if (!cfqd)
  1687. return NULL;
  1688. memset(cfqd, 0, sizeof(*cfqd));
  1689. cfqd->service_tree = CFQ_RB_ROOT;
  1690. INIT_LIST_HEAD(&cfqd->cic_list);
  1691. cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
  1692. if (!cfqd->cfq_hash)
  1693. goto out_free;
  1694. for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
  1695. INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
  1696. cfqd->queue = q;
  1697. init_timer(&cfqd->idle_slice_timer);
  1698. cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  1699. cfqd->idle_slice_timer.data = (unsigned long) cfqd;
  1700. init_timer(&cfqd->idle_class_timer);
  1701. cfqd->idle_class_timer.function = cfq_idle_class_timer;
  1702. cfqd->idle_class_timer.data = (unsigned long) cfqd;
  1703. INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
  1704. cfqd->cfq_quantum = cfq_quantum;
  1705. cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  1706. cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
  1707. cfqd->cfq_back_max = cfq_back_max;
  1708. cfqd->cfq_back_penalty = cfq_back_penalty;
  1709. cfqd->cfq_slice[0] = cfq_slice_async;
  1710. cfqd->cfq_slice[1] = cfq_slice_sync;
  1711. cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  1712. cfqd->cfq_slice_idle = cfq_slice_idle;
  1713. return cfqd;
  1714. out_free:
  1715. kfree(cfqd);
  1716. return NULL;
  1717. }
  1718. static void cfq_slab_kill(void)
  1719. {
  1720. if (cfq_pool)
  1721. kmem_cache_destroy(cfq_pool);
  1722. if (cfq_ioc_pool)
  1723. kmem_cache_destroy(cfq_ioc_pool);
  1724. }
  1725. static int __init cfq_slab_setup(void)
  1726. {
  1727. cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
  1728. NULL, NULL);
  1729. if (!cfq_pool)
  1730. goto fail;
  1731. cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
  1732. sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
  1733. if (!cfq_ioc_pool)
  1734. goto fail;
  1735. return 0;
  1736. fail:
  1737. cfq_slab_kill();
  1738. return -ENOMEM;
  1739. }
  1740. /*
  1741. * sysfs parts below -->
  1742. */
  1743. static ssize_t
  1744. cfq_var_show(unsigned int var, char *page)
  1745. {
  1746. return sprintf(page, "%d\n", var);
  1747. }
  1748. static ssize_t
  1749. cfq_var_store(unsigned int *var, const char *page, size_t count)
  1750. {
  1751. char *p = (char *) page;
  1752. *var = simple_strtoul(p, &p, 10);
  1753. return count;
  1754. }
  1755. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  1756. static ssize_t __FUNC(elevator_t *e, char *page) \
  1757. { \
  1758. struct cfq_data *cfqd = e->elevator_data; \
  1759. unsigned int __data = __VAR; \
  1760. if (__CONV) \
  1761. __data = jiffies_to_msecs(__data); \
  1762. return cfq_var_show(__data, (page)); \
  1763. }
  1764. SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
  1765. SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  1766. SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
  1767. SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  1768. SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
  1769. SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
  1770. SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  1771. SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  1772. SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
  1773. #undef SHOW_FUNCTION
  1774. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  1775. static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
  1776. { \
  1777. struct cfq_data *cfqd = e->elevator_data; \
  1778. unsigned int __data; \
  1779. int ret = cfq_var_store(&__data, (page), count); \
  1780. if (__data < (MIN)) \
  1781. __data = (MIN); \
  1782. else if (__data > (MAX)) \
  1783. __data = (MAX); \
  1784. if (__CONV) \
  1785. *(__PTR) = msecs_to_jiffies(__data); \
  1786. else \
  1787. *(__PTR) = __data; \
  1788. return ret; \
  1789. }
  1790. STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
  1791. STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
  1792. STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
  1793. STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
  1794. STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
  1795. STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
  1796. STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  1797. STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
  1798. STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
  1799. #undef STORE_FUNCTION
  1800. #define CFQ_ATTR(name) \
  1801. __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  1802. static struct elv_fs_entry cfq_attrs[] = {
  1803. CFQ_ATTR(quantum),
  1804. CFQ_ATTR(fifo_expire_sync),
  1805. CFQ_ATTR(fifo_expire_async),
  1806. CFQ_ATTR(back_seek_max),
  1807. CFQ_ATTR(back_seek_penalty),
  1808. CFQ_ATTR(slice_sync),
  1809. CFQ_ATTR(slice_async),
  1810. CFQ_ATTR(slice_async_rq),
  1811. CFQ_ATTR(slice_idle),
  1812. __ATTR_NULL
  1813. };
  1814. static struct elevator_type iosched_cfq = {
  1815. .ops = {
  1816. .elevator_merge_fn = cfq_merge,
  1817. .elevator_merged_fn = cfq_merged_request,
  1818. .elevator_merge_req_fn = cfq_merged_requests,
  1819. .elevator_allow_merge_fn = cfq_allow_merge,
  1820. .elevator_dispatch_fn = cfq_dispatch_requests,
  1821. .elevator_add_req_fn = cfq_insert_request,
  1822. .elevator_activate_req_fn = cfq_activate_request,
  1823. .elevator_deactivate_req_fn = cfq_deactivate_request,
  1824. .elevator_queue_empty_fn = cfq_queue_empty,
  1825. .elevator_completed_req_fn = cfq_completed_request,
  1826. .elevator_former_req_fn = elv_rb_former_request,
  1827. .elevator_latter_req_fn = elv_rb_latter_request,
  1828. .elevator_set_req_fn = cfq_set_request,
  1829. .elevator_put_req_fn = cfq_put_request,
  1830. .elevator_may_queue_fn = cfq_may_queue,
  1831. .elevator_init_fn = cfq_init_queue,
  1832. .elevator_exit_fn = cfq_exit_queue,
  1833. .trim = cfq_free_io_context,
  1834. },
  1835. .elevator_attrs = cfq_attrs,
  1836. .elevator_name = "cfq",
  1837. .elevator_owner = THIS_MODULE,
  1838. };
  1839. static int __init cfq_init(void)
  1840. {
  1841. int ret;
  1842. /*
  1843. * could be 0 on HZ < 1000 setups
  1844. */
  1845. if (!cfq_slice_async)
  1846. cfq_slice_async = 1;
  1847. if (!cfq_slice_idle)
  1848. cfq_slice_idle = 1;
  1849. if (cfq_slab_setup())
  1850. return -ENOMEM;
  1851. ret = elv_register(&iosched_cfq);
  1852. if (ret)
  1853. cfq_slab_kill();
  1854. return ret;
  1855. }
  1856. static void __exit cfq_exit(void)
  1857. {
  1858. DECLARE_COMPLETION_ONSTACK(all_gone);
  1859. elv_unregister(&iosched_cfq);
  1860. ioc_gone = &all_gone;
  1861. /* ioc_gone's update must be visible before reading ioc_count */
  1862. smp_wmb();
  1863. if (elv_ioc_count_read(ioc_count))
  1864. wait_for_completion(ioc_gone);
  1865. synchronize_rcu();
  1866. cfq_slab_kill();
  1867. }
  1868. module_init(cfq_init);
  1869. module_exit(cfq_exit);
  1870. MODULE_AUTHOR("Jens Axboe");
  1871. MODULE_LICENSE("GPL");
  1872. MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");