cfq-iosched.c 54 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273
  1. /*
  2. * CFQ, or complete fairness queueing, disk scheduler.
  3. *
  4. * Based on ideas from a previously unfinished io
  5. * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  6. *
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/elevator.h>
  12. #include <linux/hash.h>
  13. #include <linux/rbtree.h>
  14. #include <linux/ioprio.h>
  15. /*
  16. * tunables
  17. */
  18. static const int cfq_quantum = 4; /* max queue in one round of service */
  19. static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  20. static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
  21. static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
  22. static const int cfq_slice_sync = HZ / 10;
  23. static int cfq_slice_async = HZ / 25;
  24. static const int cfq_slice_async_rq = 2;
  25. static int cfq_slice_idle = HZ / 125;
  26. /*
  27. * grace period before allowing idle class to get disk access
  28. */
  29. #define CFQ_IDLE_GRACE (HZ / 10)
  30. /*
  31. * below this threshold, we consider thinktime immediate
  32. */
  33. #define CFQ_MIN_TT (2)
  34. #define CFQ_SLICE_SCALE (5)
  35. #define CFQ_KEY_ASYNC (0)
  36. /*
  37. * for the hash of cfqq inside the cfqd
  38. */
  39. #define CFQ_QHASH_SHIFT 6
  40. #define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
  41. #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private)
  42. #define RQ_CFQQ(rq) ((rq)->elevator_private2)
  43. static struct kmem_cache *cfq_pool;
  44. static struct kmem_cache *cfq_ioc_pool;
  45. static DEFINE_PER_CPU(unsigned long, ioc_count);
  46. static struct completion *ioc_gone;
  47. #define CFQ_PRIO_LISTS IOPRIO_BE_NR
  48. #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  49. #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  50. #define ASYNC (0)
  51. #define SYNC (1)
  52. #define cfq_cfqq_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC)
  53. #define sample_valid(samples) ((samples) > 80)
  54. /*
  55. * Most of our rbtree usage is for sorting with min extraction, so
  56. * if we cache the leftmost node we don't have to walk down the tree
  57. * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  58. * move this into the elevator for the rq sorting as well.
  59. */
  60. struct cfq_rb_root {
  61. struct rb_root rb;
  62. struct rb_node *left;
  63. };
  64. #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, }
  65. /*
  66. * Per block device queue structure
  67. */
  68. struct cfq_data {
  69. request_queue_t *queue;
  70. /*
  71. * rr list of queues with requests and the count of them
  72. */
  73. struct cfq_rb_root service_tree;
  74. unsigned int busy_queues;
  75. /*
  76. * cfqq lookup hash
  77. */
  78. struct hlist_head *cfq_hash;
  79. int rq_in_driver;
  80. int sync_flight;
  81. int hw_tag;
  82. /*
  83. * idle window management
  84. */
  85. struct timer_list idle_slice_timer;
  86. struct work_struct unplug_work;
  87. struct cfq_queue *active_queue;
  88. struct cfq_io_context *active_cic;
  89. struct timer_list idle_class_timer;
  90. sector_t last_position;
  91. unsigned long last_end_request;
  92. /*
  93. * tunables, see top of file
  94. */
  95. unsigned int cfq_quantum;
  96. unsigned int cfq_fifo_expire[2];
  97. unsigned int cfq_back_penalty;
  98. unsigned int cfq_back_max;
  99. unsigned int cfq_slice[2];
  100. unsigned int cfq_slice_async_rq;
  101. unsigned int cfq_slice_idle;
  102. struct list_head cic_list;
  103. sector_t new_seek_mean;
  104. u64 new_seek_total;
  105. };
  106. /*
  107. * Per process-grouping structure
  108. */
  109. struct cfq_queue {
  110. /* reference count */
  111. atomic_t ref;
  112. /* parent cfq_data */
  113. struct cfq_data *cfqd;
  114. /* cfqq lookup hash */
  115. struct hlist_node cfq_hash;
  116. /* hash key */
  117. unsigned int key;
  118. /* service_tree member */
  119. struct rb_node rb_node;
  120. /* service_tree key */
  121. unsigned long rb_key;
  122. /* sorted list of pending requests */
  123. struct rb_root sort_list;
  124. /* if fifo isn't expired, next request to serve */
  125. struct request *next_rq;
  126. /* requests queued in sort_list */
  127. int queued[2];
  128. /* currently allocated requests */
  129. int allocated[2];
  130. /* pending metadata requests */
  131. int meta_pending;
  132. /* fifo list of requests in sort_list */
  133. struct list_head fifo;
  134. unsigned long slice_end;
  135. long slice_resid;
  136. /* number of requests that are on the dispatch list or inside driver */
  137. int dispatched;
  138. /* io prio of this group */
  139. unsigned short ioprio, org_ioprio;
  140. unsigned short ioprio_class, org_ioprio_class;
  141. /* various state flags, see below */
  142. unsigned int flags;
  143. sector_t last_request_pos;
  144. };
  145. enum cfqq_state_flags {
  146. CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
  147. CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
  148. CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
  149. CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
  150. CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
  151. CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
  152. CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
  153. CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
  154. CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
  155. CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
  156. };
  157. #define CFQ_CFQQ_FNS(name) \
  158. static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
  159. { \
  160. cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
  161. } \
  162. static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
  163. { \
  164. cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
  165. } \
  166. static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
  167. { \
  168. return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
  169. }
  170. CFQ_CFQQ_FNS(on_rr);
  171. CFQ_CFQQ_FNS(wait_request);
  172. CFQ_CFQQ_FNS(must_alloc);
  173. CFQ_CFQQ_FNS(must_alloc_slice);
  174. CFQ_CFQQ_FNS(must_dispatch);
  175. CFQ_CFQQ_FNS(fifo_expire);
  176. CFQ_CFQQ_FNS(idle_window);
  177. CFQ_CFQQ_FNS(prio_changed);
  178. CFQ_CFQQ_FNS(queue_new);
  179. CFQ_CFQQ_FNS(slice_new);
  180. #undef CFQ_CFQQ_FNS
  181. static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
  182. static void cfq_dispatch_insert(request_queue_t *, struct request *);
  183. static struct cfq_queue *cfq_get_queue(struct cfq_data *, unsigned int, struct task_struct *, gfp_t);
  184. /*
  185. * scheduler run of queue, if there are requests pending and no one in the
  186. * driver that will restart queueing
  187. */
  188. static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
  189. {
  190. if (cfqd->busy_queues)
  191. kblockd_schedule_work(&cfqd->unplug_work);
  192. }
  193. static int cfq_queue_empty(request_queue_t *q)
  194. {
  195. struct cfq_data *cfqd = q->elevator->elevator_data;
  196. return !cfqd->busy_queues;
  197. }
  198. static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
  199. {
  200. /*
  201. * Use the per-process queue, for read requests and syncronous writes
  202. */
  203. if (!(rw & REQ_RW) || is_sync)
  204. return task->pid;
  205. return CFQ_KEY_ASYNC;
  206. }
  207. /*
  208. * Scale schedule slice based on io priority. Use the sync time slice only
  209. * if a queue is marked sync and has sync io queued. A sync queue with async
  210. * io only, should not get full sync slice length.
  211. */
  212. static inline int cfq_prio_slice(struct cfq_data *cfqd, int sync,
  213. unsigned short prio)
  214. {
  215. const int base_slice = cfqd->cfq_slice[sync];
  216. WARN_ON(prio >= IOPRIO_BE_NR);
  217. return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
  218. }
  219. static inline int
  220. cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  221. {
  222. return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
  223. }
  224. static inline void
  225. cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  226. {
  227. cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
  228. }
  229. /*
  230. * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
  231. * isn't valid until the first request from the dispatch is activated
  232. * and the slice time set.
  233. */
  234. static inline int cfq_slice_used(struct cfq_queue *cfqq)
  235. {
  236. if (cfq_cfqq_slice_new(cfqq))
  237. return 0;
  238. if (time_before(jiffies, cfqq->slice_end))
  239. return 0;
  240. return 1;
  241. }
  242. /*
  243. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  244. * We choose the request that is closest to the head right now. Distance
  245. * behind the head is penalized and only allowed to a certain extent.
  246. */
  247. static struct request *
  248. cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
  249. {
  250. sector_t last, s1, s2, d1 = 0, d2 = 0;
  251. unsigned long back_max;
  252. #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  253. #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  254. unsigned wrap = 0; /* bit mask: requests behind the disk head? */
  255. if (rq1 == NULL || rq1 == rq2)
  256. return rq2;
  257. if (rq2 == NULL)
  258. return rq1;
  259. if (rq_is_sync(rq1) && !rq_is_sync(rq2))
  260. return rq1;
  261. else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
  262. return rq2;
  263. if (rq_is_meta(rq1) && !rq_is_meta(rq2))
  264. return rq1;
  265. else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
  266. return rq2;
  267. s1 = rq1->sector;
  268. s2 = rq2->sector;
  269. last = cfqd->last_position;
  270. /*
  271. * by definition, 1KiB is 2 sectors
  272. */
  273. back_max = cfqd->cfq_back_max * 2;
  274. /*
  275. * Strict one way elevator _except_ in the case where we allow
  276. * short backward seeks which are biased as twice the cost of a
  277. * similar forward seek.
  278. */
  279. if (s1 >= last)
  280. d1 = s1 - last;
  281. else if (s1 + back_max >= last)
  282. d1 = (last - s1) * cfqd->cfq_back_penalty;
  283. else
  284. wrap |= CFQ_RQ1_WRAP;
  285. if (s2 >= last)
  286. d2 = s2 - last;
  287. else if (s2 + back_max >= last)
  288. d2 = (last - s2) * cfqd->cfq_back_penalty;
  289. else
  290. wrap |= CFQ_RQ2_WRAP;
  291. /* Found required data */
  292. /*
  293. * By doing switch() on the bit mask "wrap" we avoid having to
  294. * check two variables for all permutations: --> faster!
  295. */
  296. switch (wrap) {
  297. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  298. if (d1 < d2)
  299. return rq1;
  300. else if (d2 < d1)
  301. return rq2;
  302. else {
  303. if (s1 >= s2)
  304. return rq1;
  305. else
  306. return rq2;
  307. }
  308. case CFQ_RQ2_WRAP:
  309. return rq1;
  310. case CFQ_RQ1_WRAP:
  311. return rq2;
  312. case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
  313. default:
  314. /*
  315. * Since both rqs are wrapped,
  316. * start with the one that's further behind head
  317. * (--> only *one* back seek required),
  318. * since back seek takes more time than forward.
  319. */
  320. if (s1 <= s2)
  321. return rq1;
  322. else
  323. return rq2;
  324. }
  325. }
  326. /*
  327. * The below is leftmost cache rbtree addon
  328. */
  329. static struct rb_node *cfq_rb_first(struct cfq_rb_root *root)
  330. {
  331. if (!root->left)
  332. root->left = rb_first(&root->rb);
  333. return root->left;
  334. }
  335. static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
  336. {
  337. if (root->left == n)
  338. root->left = NULL;
  339. rb_erase(n, &root->rb);
  340. RB_CLEAR_NODE(n);
  341. }
  342. /*
  343. * would be nice to take fifo expire time into account as well
  344. */
  345. static struct request *
  346. cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  347. struct request *last)
  348. {
  349. struct rb_node *rbnext = rb_next(&last->rb_node);
  350. struct rb_node *rbprev = rb_prev(&last->rb_node);
  351. struct request *next = NULL, *prev = NULL;
  352. BUG_ON(RB_EMPTY_NODE(&last->rb_node));
  353. if (rbprev)
  354. prev = rb_entry_rq(rbprev);
  355. if (rbnext)
  356. next = rb_entry_rq(rbnext);
  357. else {
  358. rbnext = rb_first(&cfqq->sort_list);
  359. if (rbnext && rbnext != &last->rb_node)
  360. next = rb_entry_rq(rbnext);
  361. }
  362. return cfq_choose_req(cfqd, next, prev);
  363. }
  364. static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  365. struct cfq_queue *cfqq)
  366. {
  367. /*
  368. * just an approximation, should be ok.
  369. */
  370. return (cfqd->busy_queues - 1) * (cfq_prio_slice(cfqd, 1, 0) -
  371. cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
  372. }
  373. /*
  374. * The cfqd->service_tree holds all pending cfq_queue's that have
  375. * requests waiting to be processed. It is sorted in the order that
  376. * we will service the queues.
  377. */
  378. static void cfq_service_tree_add(struct cfq_data *cfqd,
  379. struct cfq_queue *cfqq, int add_front)
  380. {
  381. struct rb_node **p = &cfqd->service_tree.rb.rb_node;
  382. struct rb_node *parent = NULL;
  383. unsigned long rb_key;
  384. int left;
  385. if (!add_front) {
  386. rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
  387. rb_key += cfqq->slice_resid;
  388. cfqq->slice_resid = 0;
  389. } else
  390. rb_key = 0;
  391. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  392. /*
  393. * same position, nothing more to do
  394. */
  395. if (rb_key == cfqq->rb_key)
  396. return;
  397. cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
  398. }
  399. left = 1;
  400. while (*p) {
  401. struct cfq_queue *__cfqq;
  402. struct rb_node **n;
  403. parent = *p;
  404. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  405. /*
  406. * sort RT queues first, we always want to give
  407. * preference to them. IDLE queues goes to the back.
  408. * after that, sort on the next service time.
  409. */
  410. if (cfq_class_rt(cfqq) > cfq_class_rt(__cfqq))
  411. n = &(*p)->rb_left;
  412. else if (cfq_class_rt(cfqq) < cfq_class_rt(__cfqq))
  413. n = &(*p)->rb_right;
  414. else if (cfq_class_idle(cfqq) < cfq_class_idle(__cfqq))
  415. n = &(*p)->rb_left;
  416. else if (cfq_class_idle(cfqq) > cfq_class_idle(__cfqq))
  417. n = &(*p)->rb_right;
  418. else if (rb_key < __cfqq->rb_key)
  419. n = &(*p)->rb_left;
  420. else
  421. n = &(*p)->rb_right;
  422. if (n == &(*p)->rb_right)
  423. left = 0;
  424. p = n;
  425. }
  426. if (left)
  427. cfqd->service_tree.left = &cfqq->rb_node;
  428. cfqq->rb_key = rb_key;
  429. rb_link_node(&cfqq->rb_node, parent, p);
  430. rb_insert_color(&cfqq->rb_node, &cfqd->service_tree.rb);
  431. }
  432. /*
  433. * Update cfqq's position in the service tree.
  434. */
  435. static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  436. {
  437. /*
  438. * Resorting requires the cfqq to be on the RR list already.
  439. */
  440. if (cfq_cfqq_on_rr(cfqq))
  441. cfq_service_tree_add(cfqd, cfqq, 0);
  442. }
  443. /*
  444. * add to busy list of queues for service, trying to be fair in ordering
  445. * the pending list according to last request service
  446. */
  447. static inline void
  448. cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  449. {
  450. BUG_ON(cfq_cfqq_on_rr(cfqq));
  451. cfq_mark_cfqq_on_rr(cfqq);
  452. cfqd->busy_queues++;
  453. cfq_resort_rr_list(cfqd, cfqq);
  454. }
  455. /*
  456. * Called when the cfqq no longer has requests pending, remove it from
  457. * the service tree.
  458. */
  459. static inline void
  460. cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  461. {
  462. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  463. cfq_clear_cfqq_on_rr(cfqq);
  464. if (!RB_EMPTY_NODE(&cfqq->rb_node))
  465. cfq_rb_erase(&cfqq->rb_node, &cfqd->service_tree);
  466. BUG_ON(!cfqd->busy_queues);
  467. cfqd->busy_queues--;
  468. }
  469. /*
  470. * rb tree support functions
  471. */
  472. static inline void cfq_del_rq_rb(struct request *rq)
  473. {
  474. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  475. struct cfq_data *cfqd = cfqq->cfqd;
  476. const int sync = rq_is_sync(rq);
  477. BUG_ON(!cfqq->queued[sync]);
  478. cfqq->queued[sync]--;
  479. elv_rb_del(&cfqq->sort_list, rq);
  480. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  481. cfq_del_cfqq_rr(cfqd, cfqq);
  482. }
  483. static void cfq_add_rq_rb(struct request *rq)
  484. {
  485. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  486. struct cfq_data *cfqd = cfqq->cfqd;
  487. struct request *__alias;
  488. cfqq->queued[rq_is_sync(rq)]++;
  489. /*
  490. * looks a little odd, but the first insert might return an alias.
  491. * if that happens, put the alias on the dispatch list
  492. */
  493. while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
  494. cfq_dispatch_insert(cfqd->queue, __alias);
  495. if (!cfq_cfqq_on_rr(cfqq))
  496. cfq_add_cfqq_rr(cfqd, cfqq);
  497. /*
  498. * check if this request is a better next-serve candidate
  499. */
  500. cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq);
  501. BUG_ON(!cfqq->next_rq);
  502. }
  503. static inline void
  504. cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
  505. {
  506. elv_rb_del(&cfqq->sort_list, rq);
  507. cfqq->queued[rq_is_sync(rq)]--;
  508. cfq_add_rq_rb(rq);
  509. }
  510. static struct request *
  511. cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
  512. {
  513. struct task_struct *tsk = current;
  514. pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
  515. struct cfq_queue *cfqq;
  516. cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
  517. if (cfqq) {
  518. sector_t sector = bio->bi_sector + bio_sectors(bio);
  519. return elv_rb_find(&cfqq->sort_list, sector);
  520. }
  521. return NULL;
  522. }
  523. static void cfq_activate_request(request_queue_t *q, struct request *rq)
  524. {
  525. struct cfq_data *cfqd = q->elevator->elevator_data;
  526. cfqd->rq_in_driver++;
  527. /*
  528. * If the depth is larger 1, it really could be queueing. But lets
  529. * make the mark a little higher - idling could still be good for
  530. * low queueing, and a low queueing number could also just indicate
  531. * a SCSI mid layer like behaviour where limit+1 is often seen.
  532. */
  533. if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
  534. cfqd->hw_tag = 1;
  535. cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors;
  536. }
  537. static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
  538. {
  539. struct cfq_data *cfqd = q->elevator->elevator_data;
  540. WARN_ON(!cfqd->rq_in_driver);
  541. cfqd->rq_in_driver--;
  542. }
  543. static void cfq_remove_request(struct request *rq)
  544. {
  545. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  546. if (cfqq->next_rq == rq)
  547. cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
  548. list_del_init(&rq->queuelist);
  549. cfq_del_rq_rb(rq);
  550. if (rq_is_meta(rq)) {
  551. WARN_ON(!cfqq->meta_pending);
  552. cfqq->meta_pending--;
  553. }
  554. }
  555. static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
  556. {
  557. struct cfq_data *cfqd = q->elevator->elevator_data;
  558. struct request *__rq;
  559. __rq = cfq_find_rq_fmerge(cfqd, bio);
  560. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  561. *req = __rq;
  562. return ELEVATOR_FRONT_MERGE;
  563. }
  564. return ELEVATOR_NO_MERGE;
  565. }
  566. static void cfq_merged_request(request_queue_t *q, struct request *req,
  567. int type)
  568. {
  569. if (type == ELEVATOR_FRONT_MERGE) {
  570. struct cfq_queue *cfqq = RQ_CFQQ(req);
  571. cfq_reposition_rq_rb(cfqq, req);
  572. }
  573. }
  574. static void
  575. cfq_merged_requests(request_queue_t *q, struct request *rq,
  576. struct request *next)
  577. {
  578. /*
  579. * reposition in fifo if next is older than rq
  580. */
  581. if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  582. time_before(next->start_time, rq->start_time))
  583. list_move(&rq->queuelist, &next->queuelist);
  584. cfq_remove_request(next);
  585. }
  586. static int cfq_allow_merge(request_queue_t *q, struct request *rq,
  587. struct bio *bio)
  588. {
  589. struct cfq_data *cfqd = q->elevator->elevator_data;
  590. const int rw = bio_data_dir(bio);
  591. struct cfq_queue *cfqq;
  592. pid_t key;
  593. /*
  594. * Disallow merge of a sync bio into an async request.
  595. */
  596. if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
  597. return 0;
  598. /*
  599. * Lookup the cfqq that this bio will be queued with. Allow
  600. * merge only if rq is queued there.
  601. */
  602. key = cfq_queue_pid(current, rw, bio_sync(bio));
  603. cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);
  604. if (cfqq == RQ_CFQQ(rq))
  605. return 1;
  606. return 0;
  607. }
  608. static inline void
  609. __cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  610. {
  611. if (cfqq) {
  612. /*
  613. * stop potential idle class queues waiting service
  614. */
  615. del_timer(&cfqd->idle_class_timer);
  616. cfqq->slice_end = 0;
  617. cfq_clear_cfqq_must_alloc_slice(cfqq);
  618. cfq_clear_cfqq_fifo_expire(cfqq);
  619. cfq_mark_cfqq_slice_new(cfqq);
  620. cfq_clear_cfqq_queue_new(cfqq);
  621. }
  622. cfqd->active_queue = cfqq;
  623. }
  624. /*
  625. * current cfqq expired its slice (or was too idle), select new one
  626. */
  627. static void
  628. __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  629. int timed_out)
  630. {
  631. if (cfq_cfqq_wait_request(cfqq))
  632. del_timer(&cfqd->idle_slice_timer);
  633. cfq_clear_cfqq_must_dispatch(cfqq);
  634. cfq_clear_cfqq_wait_request(cfqq);
  635. /*
  636. * store what was left of this slice, if the queue idled/timed out
  637. */
  638. if (timed_out && !cfq_cfqq_slice_new(cfqq))
  639. cfqq->slice_resid = cfqq->slice_end - jiffies;
  640. cfq_resort_rr_list(cfqd, cfqq);
  641. if (cfqq == cfqd->active_queue)
  642. cfqd->active_queue = NULL;
  643. if (cfqd->active_cic) {
  644. put_io_context(cfqd->active_cic->ioc);
  645. cfqd->active_cic = NULL;
  646. }
  647. }
  648. static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out)
  649. {
  650. struct cfq_queue *cfqq = cfqd->active_queue;
  651. if (cfqq)
  652. __cfq_slice_expired(cfqd, cfqq, timed_out);
  653. }
  654. /*
  655. * Get next queue for service. Unless we have a queue preemption,
  656. * we'll simply select the first cfqq in the service tree.
  657. */
  658. static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
  659. {
  660. struct cfq_queue *cfqq;
  661. struct rb_node *n;
  662. if (RB_EMPTY_ROOT(&cfqd->service_tree.rb))
  663. return NULL;
  664. n = cfq_rb_first(&cfqd->service_tree);
  665. cfqq = rb_entry(n, struct cfq_queue, rb_node);
  666. if (cfq_class_idle(cfqq)) {
  667. unsigned long end;
  668. /*
  669. * if we have idle queues and no rt or be queues had
  670. * pending requests, either allow immediate service if
  671. * the grace period has passed or arm the idle grace
  672. * timer
  673. */
  674. end = cfqd->last_end_request + CFQ_IDLE_GRACE;
  675. if (time_before(jiffies, end)) {
  676. mod_timer(&cfqd->idle_class_timer, end);
  677. cfqq = NULL;
  678. }
  679. }
  680. return cfqq;
  681. }
  682. /*
  683. * Get and set a new active queue for service.
  684. */
  685. static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
  686. {
  687. struct cfq_queue *cfqq;
  688. cfqq = cfq_get_next_queue(cfqd);
  689. __cfq_set_active_queue(cfqd, cfqq);
  690. return cfqq;
  691. }
  692. static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
  693. struct request *rq)
  694. {
  695. if (rq->sector >= cfqd->last_position)
  696. return rq->sector - cfqd->last_position;
  697. else
  698. return cfqd->last_position - rq->sector;
  699. }
  700. static inline int cfq_rq_close(struct cfq_data *cfqd, struct request *rq)
  701. {
  702. struct cfq_io_context *cic = cfqd->active_cic;
  703. if (!sample_valid(cic->seek_samples))
  704. return 0;
  705. return cfq_dist_from_last(cfqd, rq) <= cic->seek_mean;
  706. }
  707. static int cfq_close_cooperator(struct cfq_data *cfq_data,
  708. struct cfq_queue *cfqq)
  709. {
  710. /*
  711. * We should notice if some of the queues are cooperating, eg
  712. * working closely on the same area of the disk. In that case,
  713. * we can group them together and don't waste time idling.
  714. */
  715. return 0;
  716. }
  717. #define CIC_SEEKY(cic) ((cic)->seek_mean > (8 * 1024))
  718. static void cfq_arm_slice_timer(struct cfq_data *cfqd)
  719. {
  720. struct cfq_queue *cfqq = cfqd->active_queue;
  721. struct cfq_io_context *cic;
  722. unsigned long sl;
  723. WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
  724. WARN_ON(cfq_cfqq_slice_new(cfqq));
  725. /*
  726. * idle is disabled, either manually or by past process history
  727. */
  728. if (!cfqd->cfq_slice_idle || !cfq_cfqq_idle_window(cfqq))
  729. return;
  730. /*
  731. * task has exited, don't wait
  732. */
  733. cic = cfqd->active_cic;
  734. if (!cic || !cic->ioc->task)
  735. return;
  736. /*
  737. * See if this prio level has a good candidate
  738. */
  739. if (cfq_close_cooperator(cfqd, cfqq) &&
  740. (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
  741. return;
  742. cfq_mark_cfqq_must_dispatch(cfqq);
  743. cfq_mark_cfqq_wait_request(cfqq);
  744. /*
  745. * we don't want to idle for seeks, but we do want to allow
  746. * fair distribution of slice time for a process doing back-to-back
  747. * seeks. so allow a little bit of time for him to submit a new rq
  748. */
  749. sl = cfqd->cfq_slice_idle;
  750. if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
  751. sl = min(sl, msecs_to_jiffies(CFQ_MIN_TT));
  752. mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
  753. }
  754. /*
  755. * Move request from internal lists to the request queue dispatch list.
  756. */
  757. static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
  758. {
  759. struct cfq_data *cfqd = q->elevator->elevator_data;
  760. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  761. cfq_remove_request(rq);
  762. cfqq->dispatched++;
  763. elv_dispatch_sort(q, rq);
  764. if (cfq_cfqq_sync(cfqq))
  765. cfqd->sync_flight++;
  766. }
  767. /*
  768. * return expired entry, or NULL to just start from scratch in rbtree
  769. */
  770. static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
  771. {
  772. struct cfq_data *cfqd = cfqq->cfqd;
  773. struct request *rq;
  774. int fifo;
  775. if (cfq_cfqq_fifo_expire(cfqq))
  776. return NULL;
  777. cfq_mark_cfqq_fifo_expire(cfqq);
  778. if (list_empty(&cfqq->fifo))
  779. return NULL;
  780. fifo = cfq_cfqq_sync(cfqq);
  781. rq = rq_entry_fifo(cfqq->fifo.next);
  782. if (time_before(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo]))
  783. return NULL;
  784. return rq;
  785. }
  786. static inline int
  787. cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  788. {
  789. const int base_rq = cfqd->cfq_slice_async_rq;
  790. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  791. return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
  792. }
  793. /*
  794. * Select a queue for service. If we have a current active queue,
  795. * check whether to continue servicing it, or retrieve and set a new one.
  796. */
  797. static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
  798. {
  799. struct cfq_queue *cfqq;
  800. cfqq = cfqd->active_queue;
  801. if (!cfqq)
  802. goto new_queue;
  803. /*
  804. * The active queue has run out of time, expire it and select new.
  805. */
  806. if (cfq_slice_used(cfqq))
  807. goto expire;
  808. /*
  809. * The active queue has requests and isn't expired, allow it to
  810. * dispatch.
  811. */
  812. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  813. goto keep_queue;
  814. /*
  815. * No requests pending. If the active queue still has requests in
  816. * flight or is idling for a new request, allow either of these
  817. * conditions to happen (or time out) before selecting a new queue.
  818. */
  819. if (timer_pending(&cfqd->idle_slice_timer) ||
  820. (cfqq->dispatched && cfq_cfqq_idle_window(cfqq))) {
  821. cfqq = NULL;
  822. goto keep_queue;
  823. }
  824. expire:
  825. cfq_slice_expired(cfqd, 0);
  826. new_queue:
  827. cfqq = cfq_set_active_queue(cfqd);
  828. keep_queue:
  829. return cfqq;
  830. }
  831. /*
  832. * Dispatch some requests from cfqq, moving them to the request queue
  833. * dispatch list.
  834. */
  835. static int
  836. __cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  837. int max_dispatch)
  838. {
  839. int dispatched = 0;
  840. BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  841. do {
  842. struct request *rq;
  843. /*
  844. * follow expired path, else get first next available
  845. */
  846. if ((rq = cfq_check_fifo(cfqq)) == NULL)
  847. rq = cfqq->next_rq;
  848. /*
  849. * finally, insert request into driver dispatch list
  850. */
  851. cfq_dispatch_insert(cfqd->queue, rq);
  852. dispatched++;
  853. if (!cfqd->active_cic) {
  854. atomic_inc(&RQ_CIC(rq)->ioc->refcount);
  855. cfqd->active_cic = RQ_CIC(rq);
  856. }
  857. if (RB_EMPTY_ROOT(&cfqq->sort_list))
  858. break;
  859. } while (dispatched < max_dispatch);
  860. /*
  861. * expire an async queue immediately if it has used up its slice. idle
  862. * queue always expire after 1 dispatch round.
  863. */
  864. if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
  865. dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  866. cfq_class_idle(cfqq))) {
  867. cfqq->slice_end = jiffies + 1;
  868. cfq_slice_expired(cfqd, 0);
  869. }
  870. return dispatched;
  871. }
  872. static inline int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
  873. {
  874. int dispatched = 0;
  875. while (cfqq->next_rq) {
  876. cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  877. dispatched++;
  878. }
  879. BUG_ON(!list_empty(&cfqq->fifo));
  880. return dispatched;
  881. }
  882. /*
  883. * Drain our current requests. Used for barriers and when switching
  884. * io schedulers on-the-fly.
  885. */
  886. static int cfq_forced_dispatch(struct cfq_data *cfqd)
  887. {
  888. int dispatched = 0;
  889. struct rb_node *n;
  890. while ((n = cfq_rb_first(&cfqd->service_tree)) != NULL) {
  891. struct cfq_queue *cfqq = rb_entry(n, struct cfq_queue, rb_node);
  892. dispatched += __cfq_forced_dispatch_cfqq(cfqq);
  893. }
  894. cfq_slice_expired(cfqd, 0);
  895. BUG_ON(cfqd->busy_queues);
  896. return dispatched;
  897. }
  898. static int cfq_dispatch_requests(request_queue_t *q, int force)
  899. {
  900. struct cfq_data *cfqd = q->elevator->elevator_data;
  901. struct cfq_queue *cfqq;
  902. int dispatched;
  903. if (!cfqd->busy_queues)
  904. return 0;
  905. if (unlikely(force))
  906. return cfq_forced_dispatch(cfqd);
  907. dispatched = 0;
  908. while ((cfqq = cfq_select_queue(cfqd)) != NULL) {
  909. int max_dispatch;
  910. max_dispatch = cfqd->cfq_quantum;
  911. if (cfq_class_idle(cfqq))
  912. max_dispatch = 1;
  913. if (cfqq->dispatched >= max_dispatch) {
  914. if (cfqd->busy_queues > 1)
  915. break;
  916. if (cfqq->dispatched >= 4 * max_dispatch)
  917. break;
  918. }
  919. if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
  920. break;
  921. cfq_clear_cfqq_must_dispatch(cfqq);
  922. cfq_clear_cfqq_wait_request(cfqq);
  923. del_timer(&cfqd->idle_slice_timer);
  924. dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
  925. }
  926. return dispatched;
  927. }
  928. /*
  929. * task holds one reference to the queue, dropped when task exits. each rq
  930. * in-flight on this queue also holds a reference, dropped when rq is freed.
  931. *
  932. * queue lock must be held here.
  933. */
  934. static void cfq_put_queue(struct cfq_queue *cfqq)
  935. {
  936. struct cfq_data *cfqd = cfqq->cfqd;
  937. BUG_ON(atomic_read(&cfqq->ref) <= 0);
  938. if (!atomic_dec_and_test(&cfqq->ref))
  939. return;
  940. BUG_ON(rb_first(&cfqq->sort_list));
  941. BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
  942. BUG_ON(cfq_cfqq_on_rr(cfqq));
  943. if (unlikely(cfqd->active_queue == cfqq)) {
  944. __cfq_slice_expired(cfqd, cfqq, 0);
  945. cfq_schedule_dispatch(cfqd);
  946. }
  947. /*
  948. * it's on the empty list and still hashed
  949. */
  950. hlist_del(&cfqq->cfq_hash);
  951. kmem_cache_free(cfq_pool, cfqq);
  952. }
  953. static struct cfq_queue *
  954. __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
  955. const int hashval)
  956. {
  957. struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
  958. struct hlist_node *entry;
  959. struct cfq_queue *__cfqq;
  960. hlist_for_each_entry(__cfqq, entry, hash_list, cfq_hash) {
  961. const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio);
  962. if (__cfqq->key == key && (__p == prio || !prio))
  963. return __cfqq;
  964. }
  965. return NULL;
  966. }
  967. static struct cfq_queue *
  968. cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
  969. {
  970. return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
  971. }
  972. static void cfq_free_io_context(struct io_context *ioc)
  973. {
  974. struct cfq_io_context *__cic;
  975. struct rb_node *n;
  976. int freed = 0;
  977. while ((n = rb_first(&ioc->cic_root)) != NULL) {
  978. __cic = rb_entry(n, struct cfq_io_context, rb_node);
  979. rb_erase(&__cic->rb_node, &ioc->cic_root);
  980. kmem_cache_free(cfq_ioc_pool, __cic);
  981. freed++;
  982. }
  983. elv_ioc_count_mod(ioc_count, -freed);
  984. if (ioc_gone && !elv_ioc_count_read(ioc_count))
  985. complete(ioc_gone);
  986. }
  987. static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  988. {
  989. if (unlikely(cfqq == cfqd->active_queue)) {
  990. __cfq_slice_expired(cfqd, cfqq, 0);
  991. cfq_schedule_dispatch(cfqd);
  992. }
  993. cfq_put_queue(cfqq);
  994. }
  995. static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
  996. struct cfq_io_context *cic)
  997. {
  998. list_del_init(&cic->queue_list);
  999. smp_wmb();
  1000. cic->key = NULL;
  1001. if (cic->cfqq[ASYNC]) {
  1002. cfq_exit_cfqq(cfqd, cic->cfqq[ASYNC]);
  1003. cic->cfqq[ASYNC] = NULL;
  1004. }
  1005. if (cic->cfqq[SYNC]) {
  1006. cfq_exit_cfqq(cfqd, cic->cfqq[SYNC]);
  1007. cic->cfqq[SYNC] = NULL;
  1008. }
  1009. }
  1010. static void cfq_exit_single_io_context(struct cfq_io_context *cic)
  1011. {
  1012. struct cfq_data *cfqd = cic->key;
  1013. if (cfqd) {
  1014. request_queue_t *q = cfqd->queue;
  1015. spin_lock_irq(q->queue_lock);
  1016. __cfq_exit_single_io_context(cfqd, cic);
  1017. spin_unlock_irq(q->queue_lock);
  1018. }
  1019. }
  1020. /*
  1021. * The process that ioc belongs to has exited, we need to clean up
  1022. * and put the internal structures we have that belongs to that process.
  1023. */
  1024. static void cfq_exit_io_context(struct io_context *ioc)
  1025. {
  1026. struct cfq_io_context *__cic;
  1027. struct rb_node *n;
  1028. /*
  1029. * put the reference this task is holding to the various queues
  1030. */
  1031. n = rb_first(&ioc->cic_root);
  1032. while (n != NULL) {
  1033. __cic = rb_entry(n, struct cfq_io_context, rb_node);
  1034. cfq_exit_single_io_context(__cic);
  1035. n = rb_next(n);
  1036. }
  1037. }
  1038. static struct cfq_io_context *
  1039. cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  1040. {
  1041. struct cfq_io_context *cic;
  1042. cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask, cfqd->queue->node);
  1043. if (cic) {
  1044. memset(cic, 0, sizeof(*cic));
  1045. cic->last_end_request = jiffies;
  1046. INIT_LIST_HEAD(&cic->queue_list);
  1047. cic->dtor = cfq_free_io_context;
  1048. cic->exit = cfq_exit_io_context;
  1049. elv_ioc_count_inc(ioc_count);
  1050. }
  1051. return cic;
  1052. }
  1053. static void cfq_init_prio_data(struct cfq_queue *cfqq)
  1054. {
  1055. struct task_struct *tsk = current;
  1056. int ioprio_class;
  1057. if (!cfq_cfqq_prio_changed(cfqq))
  1058. return;
  1059. ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
  1060. switch (ioprio_class) {
  1061. default:
  1062. printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
  1063. case IOPRIO_CLASS_NONE:
  1064. /*
  1065. * no prio set, place us in the middle of the BE classes
  1066. */
  1067. cfqq->ioprio = task_nice_ioprio(tsk);
  1068. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1069. break;
  1070. case IOPRIO_CLASS_RT:
  1071. cfqq->ioprio = task_ioprio(tsk);
  1072. cfqq->ioprio_class = IOPRIO_CLASS_RT;
  1073. break;
  1074. case IOPRIO_CLASS_BE:
  1075. cfqq->ioprio = task_ioprio(tsk);
  1076. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1077. break;
  1078. case IOPRIO_CLASS_IDLE:
  1079. cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  1080. cfqq->ioprio = 7;
  1081. cfq_clear_cfqq_idle_window(cfqq);
  1082. break;
  1083. }
  1084. /*
  1085. * keep track of original prio settings in case we have to temporarily
  1086. * elevate the priority of this queue
  1087. */
  1088. cfqq->org_ioprio = cfqq->ioprio;
  1089. cfqq->org_ioprio_class = cfqq->ioprio_class;
  1090. cfq_clear_cfqq_prio_changed(cfqq);
  1091. }
  1092. static inline void changed_ioprio(struct cfq_io_context *cic)
  1093. {
  1094. struct cfq_data *cfqd = cic->key;
  1095. struct cfq_queue *cfqq;
  1096. unsigned long flags;
  1097. if (unlikely(!cfqd))
  1098. return;
  1099. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1100. cfqq = cic->cfqq[ASYNC];
  1101. if (cfqq) {
  1102. struct cfq_queue *new_cfqq;
  1103. new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, cic->ioc->task,
  1104. GFP_ATOMIC);
  1105. if (new_cfqq) {
  1106. cic->cfqq[ASYNC] = new_cfqq;
  1107. cfq_put_queue(cfqq);
  1108. }
  1109. }
  1110. cfqq = cic->cfqq[SYNC];
  1111. if (cfqq)
  1112. cfq_mark_cfqq_prio_changed(cfqq);
  1113. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1114. }
  1115. static void cfq_ioc_set_ioprio(struct io_context *ioc)
  1116. {
  1117. struct cfq_io_context *cic;
  1118. struct rb_node *n;
  1119. ioc->ioprio_changed = 0;
  1120. n = rb_first(&ioc->cic_root);
  1121. while (n != NULL) {
  1122. cic = rb_entry(n, struct cfq_io_context, rb_node);
  1123. changed_ioprio(cic);
  1124. n = rb_next(n);
  1125. }
  1126. }
  1127. static struct cfq_queue *
  1128. cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk,
  1129. gfp_t gfp_mask)
  1130. {
  1131. const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
  1132. struct cfq_queue *cfqq, *new_cfqq = NULL;
  1133. unsigned short ioprio;
  1134. retry:
  1135. ioprio = tsk->ioprio;
  1136. cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);
  1137. if (!cfqq) {
  1138. if (new_cfqq) {
  1139. cfqq = new_cfqq;
  1140. new_cfqq = NULL;
  1141. } else if (gfp_mask & __GFP_WAIT) {
  1142. /*
  1143. * Inform the allocator of the fact that we will
  1144. * just repeat this allocation if it fails, to allow
  1145. * the allocator to do whatever it needs to attempt to
  1146. * free memory.
  1147. */
  1148. spin_unlock_irq(cfqd->queue->queue_lock);
  1149. new_cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask|__GFP_NOFAIL, cfqd->queue->node);
  1150. spin_lock_irq(cfqd->queue->queue_lock);
  1151. goto retry;
  1152. } else {
  1153. cfqq = kmem_cache_alloc_node(cfq_pool, gfp_mask, cfqd->queue->node);
  1154. if (!cfqq)
  1155. goto out;
  1156. }
  1157. memset(cfqq, 0, sizeof(*cfqq));
  1158. INIT_HLIST_NODE(&cfqq->cfq_hash);
  1159. RB_CLEAR_NODE(&cfqq->rb_node);
  1160. INIT_LIST_HEAD(&cfqq->fifo);
  1161. cfqq->key = key;
  1162. hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
  1163. atomic_set(&cfqq->ref, 0);
  1164. cfqq->cfqd = cfqd;
  1165. if (key != CFQ_KEY_ASYNC)
  1166. cfq_mark_cfqq_idle_window(cfqq);
  1167. cfq_mark_cfqq_prio_changed(cfqq);
  1168. cfq_mark_cfqq_queue_new(cfqq);
  1169. cfq_init_prio_data(cfqq);
  1170. }
  1171. if (new_cfqq)
  1172. kmem_cache_free(cfq_pool, new_cfqq);
  1173. atomic_inc(&cfqq->ref);
  1174. out:
  1175. WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
  1176. return cfqq;
  1177. }
  1178. /*
  1179. * We drop cfq io contexts lazily, so we may find a dead one.
  1180. */
  1181. static void
  1182. cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic)
  1183. {
  1184. WARN_ON(!list_empty(&cic->queue_list));
  1185. rb_erase(&cic->rb_node, &ioc->cic_root);
  1186. kmem_cache_free(cfq_ioc_pool, cic);
  1187. elv_ioc_count_dec(ioc_count);
  1188. }
  1189. static struct cfq_io_context *
  1190. cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc)
  1191. {
  1192. struct rb_node *n;
  1193. struct cfq_io_context *cic;
  1194. void *k, *key = cfqd;
  1195. restart:
  1196. n = ioc->cic_root.rb_node;
  1197. while (n) {
  1198. cic = rb_entry(n, struct cfq_io_context, rb_node);
  1199. /* ->key must be copied to avoid race with cfq_exit_queue() */
  1200. k = cic->key;
  1201. if (unlikely(!k)) {
  1202. cfq_drop_dead_cic(ioc, cic);
  1203. goto restart;
  1204. }
  1205. if (key < k)
  1206. n = n->rb_left;
  1207. else if (key > k)
  1208. n = n->rb_right;
  1209. else
  1210. return cic;
  1211. }
  1212. return NULL;
  1213. }
  1214. static inline void
  1215. cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
  1216. struct cfq_io_context *cic)
  1217. {
  1218. struct rb_node **p;
  1219. struct rb_node *parent;
  1220. struct cfq_io_context *__cic;
  1221. unsigned long flags;
  1222. void *k;
  1223. cic->ioc = ioc;
  1224. cic->key = cfqd;
  1225. restart:
  1226. parent = NULL;
  1227. p = &ioc->cic_root.rb_node;
  1228. while (*p) {
  1229. parent = *p;
  1230. __cic = rb_entry(parent, struct cfq_io_context, rb_node);
  1231. /* ->key must be copied to avoid race with cfq_exit_queue() */
  1232. k = __cic->key;
  1233. if (unlikely(!k)) {
  1234. cfq_drop_dead_cic(ioc, __cic);
  1235. goto restart;
  1236. }
  1237. if (cic->key < k)
  1238. p = &(*p)->rb_left;
  1239. else if (cic->key > k)
  1240. p = &(*p)->rb_right;
  1241. else
  1242. BUG();
  1243. }
  1244. rb_link_node(&cic->rb_node, parent, p);
  1245. rb_insert_color(&cic->rb_node, &ioc->cic_root);
  1246. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1247. list_add(&cic->queue_list, &cfqd->cic_list);
  1248. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1249. }
  1250. /*
  1251. * Setup general io context and cfq io context. There can be several cfq
  1252. * io contexts per general io context, if this process is doing io to more
  1253. * than one device managed by cfq.
  1254. */
  1255. static struct cfq_io_context *
  1256. cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  1257. {
  1258. struct io_context *ioc = NULL;
  1259. struct cfq_io_context *cic;
  1260. might_sleep_if(gfp_mask & __GFP_WAIT);
  1261. ioc = get_io_context(gfp_mask, cfqd->queue->node);
  1262. if (!ioc)
  1263. return NULL;
  1264. cic = cfq_cic_rb_lookup(cfqd, ioc);
  1265. if (cic)
  1266. goto out;
  1267. cic = cfq_alloc_io_context(cfqd, gfp_mask);
  1268. if (cic == NULL)
  1269. goto err;
  1270. cfq_cic_link(cfqd, ioc, cic);
  1271. out:
  1272. smp_read_barrier_depends();
  1273. if (unlikely(ioc->ioprio_changed))
  1274. cfq_ioc_set_ioprio(ioc);
  1275. return cic;
  1276. err:
  1277. put_io_context(ioc);
  1278. return NULL;
  1279. }
  1280. static void
  1281. cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
  1282. {
  1283. unsigned long elapsed = jiffies - cic->last_end_request;
  1284. unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
  1285. cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
  1286. cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
  1287. cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
  1288. }
  1289. static void
  1290. cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_io_context *cic,
  1291. struct request *rq)
  1292. {
  1293. sector_t sdist;
  1294. u64 total;
  1295. if (cic->last_request_pos < rq->sector)
  1296. sdist = rq->sector - cic->last_request_pos;
  1297. else
  1298. sdist = cic->last_request_pos - rq->sector;
  1299. if (!cic->seek_samples) {
  1300. cfqd->new_seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
  1301. cfqd->new_seek_mean = cfqd->new_seek_total / 256;
  1302. }
  1303. /*
  1304. * Don't allow the seek distance to get too large from the
  1305. * odd fragment, pagein, etc
  1306. */
  1307. if (cic->seek_samples <= 60) /* second&third seek */
  1308. sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*1024);
  1309. else
  1310. sdist = min(sdist, (cic->seek_mean * 4) + 2*1024*64);
  1311. cic->seek_samples = (7*cic->seek_samples + 256) / 8;
  1312. cic->seek_total = (7*cic->seek_total + (u64)256*sdist) / 8;
  1313. total = cic->seek_total + (cic->seek_samples/2);
  1314. do_div(total, cic->seek_samples);
  1315. cic->seek_mean = (sector_t)total;
  1316. }
  1317. /*
  1318. * Disable idle window if the process thinks too long or seeks so much that
  1319. * it doesn't matter
  1320. */
  1321. static void
  1322. cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1323. struct cfq_io_context *cic)
  1324. {
  1325. int enable_idle;
  1326. if (!cfq_cfqq_sync(cfqq))
  1327. return;
  1328. enable_idle = cfq_cfqq_idle_window(cfqq);
  1329. if (!cic->ioc->task || !cfqd->cfq_slice_idle ||
  1330. (cfqd->hw_tag && CIC_SEEKY(cic)))
  1331. enable_idle = 0;
  1332. else if (sample_valid(cic->ttime_samples)) {
  1333. if (cic->ttime_mean > cfqd->cfq_slice_idle)
  1334. enable_idle = 0;
  1335. else
  1336. enable_idle = 1;
  1337. }
  1338. if (enable_idle)
  1339. cfq_mark_cfqq_idle_window(cfqq);
  1340. else
  1341. cfq_clear_cfqq_idle_window(cfqq);
  1342. }
  1343. /*
  1344. * Check if new_cfqq should preempt the currently active queue. Return 0 for
  1345. * no or if we aren't sure, a 1 will cause a preempt.
  1346. */
  1347. static int
  1348. cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  1349. struct request *rq)
  1350. {
  1351. struct cfq_queue *cfqq;
  1352. cfqq = cfqd->active_queue;
  1353. if (!cfqq)
  1354. return 0;
  1355. if (cfq_slice_used(cfqq))
  1356. return 1;
  1357. if (cfq_class_idle(new_cfqq))
  1358. return 0;
  1359. if (cfq_class_idle(cfqq))
  1360. return 1;
  1361. /*
  1362. * if the new request is sync, but the currently running queue is
  1363. * not, let the sync request have priority.
  1364. */
  1365. if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
  1366. return 1;
  1367. /*
  1368. * So both queues are sync. Let the new request get disk time if
  1369. * it's a metadata request and the current queue is doing regular IO.
  1370. */
  1371. if (rq_is_meta(rq) && !cfqq->meta_pending)
  1372. return 1;
  1373. if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
  1374. return 0;
  1375. /*
  1376. * if this request is as-good as one we would expect from the
  1377. * current cfqq, let it preempt
  1378. */
  1379. if (cfq_rq_close(cfqd, rq))
  1380. return 1;
  1381. return 0;
  1382. }
  1383. /*
  1384. * cfqq preempts the active queue. if we allowed preempt with no slice left,
  1385. * let it have half of its nominal slice.
  1386. */
  1387. static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1388. {
  1389. cfq_slice_expired(cfqd, 1);
  1390. /*
  1391. * Put the new queue at the front of the of the current list,
  1392. * so we know that it will be selected next.
  1393. */
  1394. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  1395. cfq_service_tree_add(cfqd, cfqq, 1);
  1396. cfqq->slice_end = 0;
  1397. cfq_mark_cfqq_slice_new(cfqq);
  1398. }
  1399. /*
  1400. * Called when a new fs request (rq) is added (to cfqq). Check if there's
  1401. * something we should do about it
  1402. */
  1403. static void
  1404. cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1405. struct request *rq)
  1406. {
  1407. struct cfq_io_context *cic = RQ_CIC(rq);
  1408. if (rq_is_meta(rq))
  1409. cfqq->meta_pending++;
  1410. cfq_update_io_thinktime(cfqd, cic);
  1411. cfq_update_io_seektime(cfqd, cic, rq);
  1412. cfq_update_idle_window(cfqd, cfqq, cic);
  1413. cic->last_request_pos = rq->sector + rq->nr_sectors;
  1414. cfqq->last_request_pos = cic->last_request_pos;
  1415. if (cfqq == cfqd->active_queue) {
  1416. /*
  1417. * if we are waiting for a request for this queue, let it rip
  1418. * immediately and flag that we must not expire this queue
  1419. * just now
  1420. */
  1421. if (cfq_cfqq_wait_request(cfqq)) {
  1422. cfq_mark_cfqq_must_dispatch(cfqq);
  1423. del_timer(&cfqd->idle_slice_timer);
  1424. blk_start_queueing(cfqd->queue);
  1425. }
  1426. } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
  1427. /*
  1428. * not the active queue - expire current slice if it is
  1429. * idle and has expired it's mean thinktime or this new queue
  1430. * has some old slice time left and is of higher priority
  1431. */
  1432. cfq_preempt_queue(cfqd, cfqq);
  1433. cfq_mark_cfqq_must_dispatch(cfqq);
  1434. blk_start_queueing(cfqd->queue);
  1435. }
  1436. }
  1437. static void cfq_insert_request(request_queue_t *q, struct request *rq)
  1438. {
  1439. struct cfq_data *cfqd = q->elevator->elevator_data;
  1440. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1441. cfq_init_prio_data(cfqq);
  1442. cfq_add_rq_rb(rq);
  1443. list_add_tail(&rq->queuelist, &cfqq->fifo);
  1444. cfq_rq_enqueued(cfqd, cfqq, rq);
  1445. }
  1446. static void cfq_completed_request(request_queue_t *q, struct request *rq)
  1447. {
  1448. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1449. struct cfq_data *cfqd = cfqq->cfqd;
  1450. const int sync = rq_is_sync(rq);
  1451. unsigned long now;
  1452. now = jiffies;
  1453. WARN_ON(!cfqd->rq_in_driver);
  1454. WARN_ON(!cfqq->dispatched);
  1455. cfqd->rq_in_driver--;
  1456. cfqq->dispatched--;
  1457. if (cfq_cfqq_sync(cfqq))
  1458. cfqd->sync_flight--;
  1459. if (!cfq_class_idle(cfqq))
  1460. cfqd->last_end_request = now;
  1461. if (sync)
  1462. RQ_CIC(rq)->last_end_request = now;
  1463. /*
  1464. * If this is the active queue, check if it needs to be expired,
  1465. * or if we want to idle in case it has no pending requests.
  1466. */
  1467. if (cfqd->active_queue == cfqq) {
  1468. if (cfq_cfqq_slice_new(cfqq)) {
  1469. cfq_set_prio_slice(cfqd, cfqq);
  1470. cfq_clear_cfqq_slice_new(cfqq);
  1471. }
  1472. if (cfq_slice_used(cfqq))
  1473. cfq_slice_expired(cfqd, 1);
  1474. else if (sync && RB_EMPTY_ROOT(&cfqq->sort_list))
  1475. cfq_arm_slice_timer(cfqd);
  1476. }
  1477. if (!cfqd->rq_in_driver)
  1478. cfq_schedule_dispatch(cfqd);
  1479. }
  1480. /*
  1481. * we temporarily boost lower priority queues if they are holding fs exclusive
  1482. * resources. they are boosted to normal prio (CLASS_BE/4)
  1483. */
  1484. static void cfq_prio_boost(struct cfq_queue *cfqq)
  1485. {
  1486. if (has_fs_excl()) {
  1487. /*
  1488. * boost idle prio on transactions that would lock out other
  1489. * users of the filesystem
  1490. */
  1491. if (cfq_class_idle(cfqq))
  1492. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  1493. if (cfqq->ioprio > IOPRIO_NORM)
  1494. cfqq->ioprio = IOPRIO_NORM;
  1495. } else {
  1496. /*
  1497. * check if we need to unboost the queue
  1498. */
  1499. if (cfqq->ioprio_class != cfqq->org_ioprio_class)
  1500. cfqq->ioprio_class = cfqq->org_ioprio_class;
  1501. if (cfqq->ioprio != cfqq->org_ioprio)
  1502. cfqq->ioprio = cfqq->org_ioprio;
  1503. }
  1504. }
  1505. static inline int __cfq_may_queue(struct cfq_queue *cfqq)
  1506. {
  1507. if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
  1508. !cfq_cfqq_must_alloc_slice(cfqq)) {
  1509. cfq_mark_cfqq_must_alloc_slice(cfqq);
  1510. return ELV_MQUEUE_MUST;
  1511. }
  1512. return ELV_MQUEUE_MAY;
  1513. }
  1514. static int cfq_may_queue(request_queue_t *q, int rw)
  1515. {
  1516. struct cfq_data *cfqd = q->elevator->elevator_data;
  1517. struct task_struct *tsk = current;
  1518. struct cfq_queue *cfqq;
  1519. unsigned int key;
  1520. key = cfq_queue_pid(tsk, rw, rw & REQ_RW_SYNC);
  1521. /*
  1522. * don't force setup of a queue from here, as a call to may_queue
  1523. * does not necessarily imply that a request actually will be queued.
  1524. * so just lookup a possibly existing queue, or return 'may queue'
  1525. * if that fails
  1526. */
  1527. cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
  1528. if (cfqq) {
  1529. cfq_init_prio_data(cfqq);
  1530. cfq_prio_boost(cfqq);
  1531. return __cfq_may_queue(cfqq);
  1532. }
  1533. return ELV_MQUEUE_MAY;
  1534. }
  1535. /*
  1536. * queue lock held here
  1537. */
  1538. static void cfq_put_request(struct request *rq)
  1539. {
  1540. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1541. if (cfqq) {
  1542. const int rw = rq_data_dir(rq);
  1543. BUG_ON(!cfqq->allocated[rw]);
  1544. cfqq->allocated[rw]--;
  1545. put_io_context(RQ_CIC(rq)->ioc);
  1546. rq->elevator_private = NULL;
  1547. rq->elevator_private2 = NULL;
  1548. cfq_put_queue(cfqq);
  1549. }
  1550. }
  1551. /*
  1552. * Allocate cfq data structures associated with this request.
  1553. */
  1554. static int
  1555. cfq_set_request(request_queue_t *q, struct request *rq, gfp_t gfp_mask)
  1556. {
  1557. struct cfq_data *cfqd = q->elevator->elevator_data;
  1558. struct task_struct *tsk = current;
  1559. struct cfq_io_context *cic;
  1560. const int rw = rq_data_dir(rq);
  1561. const int is_sync = rq_is_sync(rq);
  1562. pid_t key = cfq_queue_pid(tsk, rw, is_sync);
  1563. struct cfq_queue *cfqq;
  1564. unsigned long flags;
  1565. might_sleep_if(gfp_mask & __GFP_WAIT);
  1566. cic = cfq_get_io_context(cfqd, gfp_mask);
  1567. spin_lock_irqsave(q->queue_lock, flags);
  1568. if (!cic)
  1569. goto queue_fail;
  1570. if (!cic->cfqq[is_sync]) {
  1571. cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask);
  1572. if (!cfqq)
  1573. goto queue_fail;
  1574. cic->cfqq[is_sync] = cfqq;
  1575. } else
  1576. cfqq = cic->cfqq[is_sync];
  1577. cfqq->allocated[rw]++;
  1578. cfq_clear_cfqq_must_alloc(cfqq);
  1579. atomic_inc(&cfqq->ref);
  1580. spin_unlock_irqrestore(q->queue_lock, flags);
  1581. rq->elevator_private = cic;
  1582. rq->elevator_private2 = cfqq;
  1583. return 0;
  1584. queue_fail:
  1585. if (cic)
  1586. put_io_context(cic->ioc);
  1587. cfq_schedule_dispatch(cfqd);
  1588. spin_unlock_irqrestore(q->queue_lock, flags);
  1589. return 1;
  1590. }
  1591. static void cfq_kick_queue(struct work_struct *work)
  1592. {
  1593. struct cfq_data *cfqd =
  1594. container_of(work, struct cfq_data, unplug_work);
  1595. request_queue_t *q = cfqd->queue;
  1596. unsigned long flags;
  1597. spin_lock_irqsave(q->queue_lock, flags);
  1598. blk_start_queueing(q);
  1599. spin_unlock_irqrestore(q->queue_lock, flags);
  1600. }
  1601. /*
  1602. * Timer running if the active_queue is currently idling inside its time slice
  1603. */
  1604. static void cfq_idle_slice_timer(unsigned long data)
  1605. {
  1606. struct cfq_data *cfqd = (struct cfq_data *) data;
  1607. struct cfq_queue *cfqq;
  1608. unsigned long flags;
  1609. int timed_out = 1;
  1610. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1611. if ((cfqq = cfqd->active_queue) != NULL) {
  1612. timed_out = 0;
  1613. /*
  1614. * expired
  1615. */
  1616. if (cfq_slice_used(cfqq))
  1617. goto expire;
  1618. /*
  1619. * only expire and reinvoke request handler, if there are
  1620. * other queues with pending requests
  1621. */
  1622. if (!cfqd->busy_queues)
  1623. goto out_cont;
  1624. /*
  1625. * not expired and it has a request pending, let it dispatch
  1626. */
  1627. if (!RB_EMPTY_ROOT(&cfqq->sort_list)) {
  1628. cfq_mark_cfqq_must_dispatch(cfqq);
  1629. goto out_kick;
  1630. }
  1631. }
  1632. expire:
  1633. cfq_slice_expired(cfqd, timed_out);
  1634. out_kick:
  1635. cfq_schedule_dispatch(cfqd);
  1636. out_cont:
  1637. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1638. }
  1639. /*
  1640. * Timer running if an idle class queue is waiting for service
  1641. */
  1642. static void cfq_idle_class_timer(unsigned long data)
  1643. {
  1644. struct cfq_data *cfqd = (struct cfq_data *) data;
  1645. unsigned long flags, end;
  1646. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  1647. /*
  1648. * race with a non-idle queue, reset timer
  1649. */
  1650. end = cfqd->last_end_request + CFQ_IDLE_GRACE;
  1651. if (!time_after_eq(jiffies, end))
  1652. mod_timer(&cfqd->idle_class_timer, end);
  1653. else
  1654. cfq_schedule_dispatch(cfqd);
  1655. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  1656. }
  1657. static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  1658. {
  1659. del_timer_sync(&cfqd->idle_slice_timer);
  1660. del_timer_sync(&cfqd->idle_class_timer);
  1661. blk_sync_queue(cfqd->queue);
  1662. }
  1663. static void cfq_exit_queue(elevator_t *e)
  1664. {
  1665. struct cfq_data *cfqd = e->elevator_data;
  1666. request_queue_t *q = cfqd->queue;
  1667. cfq_shutdown_timer_wq(cfqd);
  1668. spin_lock_irq(q->queue_lock);
  1669. if (cfqd->active_queue)
  1670. __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
  1671. while (!list_empty(&cfqd->cic_list)) {
  1672. struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
  1673. struct cfq_io_context,
  1674. queue_list);
  1675. __cfq_exit_single_io_context(cfqd, cic);
  1676. }
  1677. spin_unlock_irq(q->queue_lock);
  1678. cfq_shutdown_timer_wq(cfqd);
  1679. kfree(cfqd->cfq_hash);
  1680. kfree(cfqd);
  1681. }
  1682. static void *cfq_init_queue(request_queue_t *q)
  1683. {
  1684. struct cfq_data *cfqd;
  1685. int i;
  1686. cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL, q->node);
  1687. if (!cfqd)
  1688. return NULL;
  1689. memset(cfqd, 0, sizeof(*cfqd));
  1690. cfqd->service_tree = CFQ_RB_ROOT;
  1691. INIT_LIST_HEAD(&cfqd->cic_list);
  1692. cfqd->cfq_hash = kmalloc_node(sizeof(struct hlist_head) * CFQ_QHASH_ENTRIES, GFP_KERNEL, q->node);
  1693. if (!cfqd->cfq_hash)
  1694. goto out_free;
  1695. for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
  1696. INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
  1697. cfqd->queue = q;
  1698. init_timer(&cfqd->idle_slice_timer);
  1699. cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  1700. cfqd->idle_slice_timer.data = (unsigned long) cfqd;
  1701. init_timer(&cfqd->idle_class_timer);
  1702. cfqd->idle_class_timer.function = cfq_idle_class_timer;
  1703. cfqd->idle_class_timer.data = (unsigned long) cfqd;
  1704. INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
  1705. cfqd->cfq_quantum = cfq_quantum;
  1706. cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  1707. cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
  1708. cfqd->cfq_back_max = cfq_back_max;
  1709. cfqd->cfq_back_penalty = cfq_back_penalty;
  1710. cfqd->cfq_slice[0] = cfq_slice_async;
  1711. cfqd->cfq_slice[1] = cfq_slice_sync;
  1712. cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  1713. cfqd->cfq_slice_idle = cfq_slice_idle;
  1714. return cfqd;
  1715. out_free:
  1716. kfree(cfqd);
  1717. return NULL;
  1718. }
  1719. static void cfq_slab_kill(void)
  1720. {
  1721. if (cfq_pool)
  1722. kmem_cache_destroy(cfq_pool);
  1723. if (cfq_ioc_pool)
  1724. kmem_cache_destroy(cfq_ioc_pool);
  1725. }
  1726. static int __init cfq_slab_setup(void)
  1727. {
  1728. cfq_pool = kmem_cache_create("cfq_pool", sizeof(struct cfq_queue), 0, 0,
  1729. NULL, NULL);
  1730. if (!cfq_pool)
  1731. goto fail;
  1732. cfq_ioc_pool = kmem_cache_create("cfq_ioc_pool",
  1733. sizeof(struct cfq_io_context), 0, 0, NULL, NULL);
  1734. if (!cfq_ioc_pool)
  1735. goto fail;
  1736. return 0;
  1737. fail:
  1738. cfq_slab_kill();
  1739. return -ENOMEM;
  1740. }
  1741. /*
  1742. * sysfs parts below -->
  1743. */
  1744. static ssize_t
  1745. cfq_var_show(unsigned int var, char *page)
  1746. {
  1747. return sprintf(page, "%d\n", var);
  1748. }
  1749. static ssize_t
  1750. cfq_var_store(unsigned int *var, const char *page, size_t count)
  1751. {
  1752. char *p = (char *) page;
  1753. *var = simple_strtoul(p, &p, 10);
  1754. return count;
  1755. }
  1756. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  1757. static ssize_t __FUNC(elevator_t *e, char *page) \
  1758. { \
  1759. struct cfq_data *cfqd = e->elevator_data; \
  1760. unsigned int __data = __VAR; \
  1761. if (__CONV) \
  1762. __data = jiffies_to_msecs(__data); \
  1763. return cfq_var_show(__data, (page)); \
  1764. }
  1765. SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
  1766. SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  1767. SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
  1768. SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  1769. SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
  1770. SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
  1771. SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  1772. SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  1773. SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
  1774. #undef SHOW_FUNCTION
  1775. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  1776. static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
  1777. { \
  1778. struct cfq_data *cfqd = e->elevator_data; \
  1779. unsigned int __data; \
  1780. int ret = cfq_var_store(&__data, (page), count); \
  1781. if (__data < (MIN)) \
  1782. __data = (MIN); \
  1783. else if (__data > (MAX)) \
  1784. __data = (MAX); \
  1785. if (__CONV) \
  1786. *(__PTR) = msecs_to_jiffies(__data); \
  1787. else \
  1788. *(__PTR) = __data; \
  1789. return ret; \
  1790. }
  1791. STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
  1792. STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1);
  1793. STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1);
  1794. STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
  1795. STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0);
  1796. STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
  1797. STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  1798. STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
  1799. STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0);
  1800. #undef STORE_FUNCTION
  1801. #define CFQ_ATTR(name) \
  1802. __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  1803. static struct elv_fs_entry cfq_attrs[] = {
  1804. CFQ_ATTR(quantum),
  1805. CFQ_ATTR(fifo_expire_sync),
  1806. CFQ_ATTR(fifo_expire_async),
  1807. CFQ_ATTR(back_seek_max),
  1808. CFQ_ATTR(back_seek_penalty),
  1809. CFQ_ATTR(slice_sync),
  1810. CFQ_ATTR(slice_async),
  1811. CFQ_ATTR(slice_async_rq),
  1812. CFQ_ATTR(slice_idle),
  1813. __ATTR_NULL
  1814. };
  1815. static struct elevator_type iosched_cfq = {
  1816. .ops = {
  1817. .elevator_merge_fn = cfq_merge,
  1818. .elevator_merged_fn = cfq_merged_request,
  1819. .elevator_merge_req_fn = cfq_merged_requests,
  1820. .elevator_allow_merge_fn = cfq_allow_merge,
  1821. .elevator_dispatch_fn = cfq_dispatch_requests,
  1822. .elevator_add_req_fn = cfq_insert_request,
  1823. .elevator_activate_req_fn = cfq_activate_request,
  1824. .elevator_deactivate_req_fn = cfq_deactivate_request,
  1825. .elevator_queue_empty_fn = cfq_queue_empty,
  1826. .elevator_completed_req_fn = cfq_completed_request,
  1827. .elevator_former_req_fn = elv_rb_former_request,
  1828. .elevator_latter_req_fn = elv_rb_latter_request,
  1829. .elevator_set_req_fn = cfq_set_request,
  1830. .elevator_put_req_fn = cfq_put_request,
  1831. .elevator_may_queue_fn = cfq_may_queue,
  1832. .elevator_init_fn = cfq_init_queue,
  1833. .elevator_exit_fn = cfq_exit_queue,
  1834. .trim = cfq_free_io_context,
  1835. },
  1836. .elevator_attrs = cfq_attrs,
  1837. .elevator_name = "cfq",
  1838. .elevator_owner = THIS_MODULE,
  1839. };
  1840. static int __init cfq_init(void)
  1841. {
  1842. int ret;
  1843. /*
  1844. * could be 0 on HZ < 1000 setups
  1845. */
  1846. if (!cfq_slice_async)
  1847. cfq_slice_async = 1;
  1848. if (!cfq_slice_idle)
  1849. cfq_slice_idle = 1;
  1850. if (cfq_slab_setup())
  1851. return -ENOMEM;
  1852. ret = elv_register(&iosched_cfq);
  1853. if (ret)
  1854. cfq_slab_kill();
  1855. return ret;
  1856. }
  1857. static void __exit cfq_exit(void)
  1858. {
  1859. DECLARE_COMPLETION_ONSTACK(all_gone);
  1860. elv_unregister(&iosched_cfq);
  1861. ioc_gone = &all_gone;
  1862. /* ioc_gone's update must be visible before reading ioc_count */
  1863. smp_wmb();
  1864. if (elv_ioc_count_read(ioc_count))
  1865. wait_for_completion(ioc_gone);
  1866. synchronize_rcu();
  1867. cfq_slab_kill();
  1868. }
  1869. module_init(cfq_init);
  1870. module_exit(cfq_exit);
  1871. MODULE_AUTHOR("Jens Axboe");
  1872. MODULE_LICENSE("GPL");
  1873. MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");