cfq-iosched.c 99 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905
  1. /*
  2. * CFQ, or complete fairness queueing, disk scheduler.
  3. *
  4. * Based on ideas from a previously unfinished io
  5. * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  6. *
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/blkdev.h>
  12. #include <linux/elevator.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/ioprio.h>
  16. #include <linux/blktrace_api.h>
  17. #include "blk.h"
  18. #include "cfq.h"
  19. /*
  20. * tunables
  21. */
  22. /* max queue in one round of service */
  23. static const int cfq_quantum = 8;
  24. static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  25. /* maximum backwards seek, in KiB */
  26. static const int cfq_back_max = 16 * 1024;
  27. /* penalty of a backwards seek */
  28. static const int cfq_back_penalty = 2;
  29. static const int cfq_slice_sync = HZ / 10;
  30. static int cfq_slice_async = HZ / 25;
  31. static const int cfq_slice_async_rq = 2;
  32. static int cfq_slice_idle = HZ / 125;
  33. static int cfq_group_idle = HZ / 125;
  34. static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  35. static const int cfq_hist_divisor = 4;
  36. /*
  37. * offset from end of service tree
  38. */
  39. #define CFQ_IDLE_DELAY (HZ / 5)
  40. /*
  41. * below this threshold, we consider thinktime immediate
  42. */
  43. #define CFQ_MIN_TT (2)
  44. #define CFQ_SLICE_SCALE (5)
  45. #define CFQ_HW_QUEUE_MIN (5)
  46. #define CFQ_SERVICE_SHIFT 12
  47. #define CFQQ_SEEK_THR (sector_t)(8 * 100)
  48. #define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
  49. #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
  50. #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
  51. #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
  52. #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
  53. #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
  54. static struct kmem_cache *cfq_pool;
  55. #define CFQ_PRIO_LISTS IOPRIO_BE_NR
  56. #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  57. #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  58. #define sample_valid(samples) ((samples) > 80)
  59. #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
  60. struct cfq_ttime {
  61. unsigned long last_end_request;
  62. unsigned long ttime_total;
  63. unsigned long ttime_samples;
  64. unsigned long ttime_mean;
  65. };
  66. /*
  67. * Most of our rbtree usage is for sorting with min extraction, so
  68. * if we cache the leftmost node we don't have to walk down the tree
  69. * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  70. * move this into the elevator for the rq sorting as well.
  71. */
  72. struct cfq_rb_root {
  73. struct rb_root rb;
  74. struct rb_node *left;
  75. unsigned count;
  76. unsigned total_weight;
  77. u64 min_vdisktime;
  78. struct cfq_ttime ttime;
  79. };
  80. #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
  81. .ttime = {.last_end_request = jiffies,},}
  82. /*
  83. * Per process-grouping structure
  84. */
  85. struct cfq_queue {
  86. /* reference count */
  87. int ref;
  88. /* various state flags, see below */
  89. unsigned int flags;
  90. /* parent cfq_data */
  91. struct cfq_data *cfqd;
  92. /* service_tree member */
  93. struct rb_node rb_node;
  94. /* service_tree key */
  95. unsigned long rb_key;
  96. /* prio tree member */
  97. struct rb_node p_node;
  98. /* prio tree root we belong to, if any */
  99. struct rb_root *p_root;
  100. /* sorted list of pending requests */
  101. struct rb_root sort_list;
  102. /* if fifo isn't expired, next request to serve */
  103. struct request *next_rq;
  104. /* requests queued in sort_list */
  105. int queued[2];
  106. /* currently allocated requests */
  107. int allocated[2];
  108. /* fifo list of requests in sort_list */
  109. struct list_head fifo;
  110. /* time when queue got scheduled in to dispatch first request. */
  111. unsigned long dispatch_start;
  112. unsigned int allocated_slice;
  113. unsigned int slice_dispatch;
  114. /* time when first request from queue completed and slice started. */
  115. unsigned long slice_start;
  116. unsigned long slice_end;
  117. long slice_resid;
  118. /* pending priority requests */
  119. int prio_pending;
  120. /* number of requests that are on the dispatch list or inside driver */
  121. int dispatched;
  122. /* io prio of this group */
  123. unsigned short ioprio, org_ioprio;
  124. unsigned short ioprio_class;
  125. pid_t pid;
  126. u32 seek_history;
  127. sector_t last_request_pos;
  128. struct cfq_rb_root *service_tree;
  129. struct cfq_queue *new_cfqq;
  130. struct cfq_group *cfqg;
  131. /* Number of sectors dispatched from queue in single dispatch round */
  132. unsigned long nr_sectors;
  133. };
  134. /*
  135. * First index in the service_trees.
  136. * IDLE is handled separately, so it has negative index
  137. */
  138. enum wl_prio_t {
  139. BE_WORKLOAD = 0,
  140. RT_WORKLOAD = 1,
  141. IDLE_WORKLOAD = 2,
  142. CFQ_PRIO_NR,
  143. };
  144. /*
  145. * Second index in the service_trees.
  146. */
  147. enum wl_type_t {
  148. ASYNC_WORKLOAD = 0,
  149. SYNC_NOIDLE_WORKLOAD = 1,
  150. SYNC_WORKLOAD = 2
  151. };
  152. /* This is per cgroup per device grouping structure */
  153. struct cfq_group {
  154. /* group service_tree member */
  155. struct rb_node rb_node;
  156. /* group service_tree key */
  157. u64 vdisktime;
  158. unsigned int weight;
  159. unsigned int new_weight;
  160. bool needs_update;
  161. /* number of cfqq currently on this group */
  162. int nr_cfqq;
  163. /*
  164. * Per group busy queues average. Useful for workload slice calc. We
  165. * create the array for each prio class but at run time it is used
  166. * only for RT and BE class and slot for IDLE class remains unused.
  167. * This is primarily done to avoid confusion and a gcc warning.
  168. */
  169. unsigned int busy_queues_avg[CFQ_PRIO_NR];
  170. /*
  171. * rr lists of queues with requests. We maintain service trees for
  172. * RT and BE classes. These trees are subdivided in subclasses
  173. * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
  174. * class there is no subclassification and all the cfq queues go on
  175. * a single tree service_tree_idle.
  176. * Counts are embedded in the cfq_rb_root
  177. */
  178. struct cfq_rb_root service_trees[2][3];
  179. struct cfq_rb_root service_tree_idle;
  180. unsigned long saved_workload_slice;
  181. enum wl_type_t saved_workload;
  182. enum wl_prio_t saved_serving_prio;
  183. struct blkio_group blkg;
  184. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  185. struct hlist_node cfqd_node;
  186. int ref;
  187. #endif
  188. /* number of requests that are on the dispatch list or inside driver */
  189. int dispatched;
  190. struct cfq_ttime ttime;
  191. };
  192. struct cfq_io_cq {
  193. struct io_cq icq; /* must be the first member */
  194. struct cfq_queue *cfqq[2];
  195. struct cfq_ttime ttime;
  196. };
  197. /*
  198. * Per block device queue structure
  199. */
  200. struct cfq_data {
  201. struct request_queue *queue;
  202. /* Root service tree for cfq_groups */
  203. struct cfq_rb_root grp_service_tree;
  204. struct cfq_group *root_group;
  205. /*
  206. * The priority currently being served
  207. */
  208. enum wl_prio_t serving_prio;
  209. enum wl_type_t serving_type;
  210. unsigned long workload_expires;
  211. struct cfq_group *serving_group;
  212. /*
  213. * Each priority tree is sorted by next_request position. These
  214. * trees are used when determining if two or more queues are
  215. * interleaving requests (see cfq_close_cooperator).
  216. */
  217. struct rb_root prio_trees[CFQ_PRIO_LISTS];
  218. unsigned int busy_queues;
  219. unsigned int busy_sync_queues;
  220. int rq_in_driver;
  221. int rq_in_flight[2];
  222. /*
  223. * queue-depth detection
  224. */
  225. int rq_queued;
  226. int hw_tag;
  227. /*
  228. * hw_tag can be
  229. * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
  230. * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
  231. * 0 => no NCQ
  232. */
  233. int hw_tag_est_depth;
  234. unsigned int hw_tag_samples;
  235. /*
  236. * idle window management
  237. */
  238. struct timer_list idle_slice_timer;
  239. struct work_struct unplug_work;
  240. struct cfq_queue *active_queue;
  241. struct cfq_io_cq *active_cic;
  242. /*
  243. * async queue for each priority case
  244. */
  245. struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
  246. struct cfq_queue *async_idle_cfqq;
  247. sector_t last_position;
  248. /*
  249. * tunables, see top of file
  250. */
  251. unsigned int cfq_quantum;
  252. unsigned int cfq_fifo_expire[2];
  253. unsigned int cfq_back_penalty;
  254. unsigned int cfq_back_max;
  255. unsigned int cfq_slice[2];
  256. unsigned int cfq_slice_async_rq;
  257. unsigned int cfq_slice_idle;
  258. unsigned int cfq_group_idle;
  259. unsigned int cfq_latency;
  260. /*
  261. * Fallback dummy cfqq for extreme OOM conditions
  262. */
  263. struct cfq_queue oom_cfqq;
  264. unsigned long last_delayed_sync;
  265. /* List of cfq groups being managed on this device*/
  266. struct hlist_head cfqg_list;
  267. /* Number of groups which are on blkcg->blkg_list */
  268. unsigned int nr_blkcg_linked_grps;
  269. };
  270. static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
  271. static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
  272. enum wl_prio_t prio,
  273. enum wl_type_t type)
  274. {
  275. if (!cfqg)
  276. return NULL;
  277. if (prio == IDLE_WORKLOAD)
  278. return &cfqg->service_tree_idle;
  279. return &cfqg->service_trees[prio][type];
  280. }
  281. enum cfqq_state_flags {
  282. CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
  283. CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
  284. CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
  285. CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
  286. CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
  287. CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
  288. CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
  289. CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
  290. CFQ_CFQQ_FLAG_sync, /* synchronous queue */
  291. CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
  292. CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
  293. CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
  294. CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
  295. };
  296. #define CFQ_CFQQ_FNS(name) \
  297. static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
  298. { \
  299. (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
  300. } \
  301. static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
  302. { \
  303. (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
  304. } \
  305. static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
  306. { \
  307. return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
  308. }
  309. CFQ_CFQQ_FNS(on_rr);
  310. CFQ_CFQQ_FNS(wait_request);
  311. CFQ_CFQQ_FNS(must_dispatch);
  312. CFQ_CFQQ_FNS(must_alloc_slice);
  313. CFQ_CFQQ_FNS(fifo_expire);
  314. CFQ_CFQQ_FNS(idle_window);
  315. CFQ_CFQQ_FNS(prio_changed);
  316. CFQ_CFQQ_FNS(slice_new);
  317. CFQ_CFQQ_FNS(sync);
  318. CFQ_CFQQ_FNS(coop);
  319. CFQ_CFQQ_FNS(split_coop);
  320. CFQ_CFQQ_FNS(deep);
  321. CFQ_CFQQ_FNS(wait_busy);
  322. #undef CFQ_CFQQ_FNS
  323. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  324. #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
  325. blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
  326. cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
  327. blkg_path(&(cfqq)->cfqg->blkg), ##args)
  328. #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
  329. blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
  330. blkg_path(&(cfqg)->blkg), ##args) \
  331. #else
  332. #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
  333. blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
  334. #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
  335. #endif
  336. #define cfq_log(cfqd, fmt, args...) \
  337. blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
  338. /* Traverses through cfq group service trees */
  339. #define for_each_cfqg_st(cfqg, i, j, st) \
  340. for (i = 0; i <= IDLE_WORKLOAD; i++) \
  341. for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
  342. : &cfqg->service_tree_idle; \
  343. (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
  344. (i == IDLE_WORKLOAD && j == 0); \
  345. j++, st = i < IDLE_WORKLOAD ? \
  346. &cfqg->service_trees[i][j]: NULL) \
  347. static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
  348. struct cfq_ttime *ttime, bool group_idle)
  349. {
  350. unsigned long slice;
  351. if (!sample_valid(ttime->ttime_samples))
  352. return false;
  353. if (group_idle)
  354. slice = cfqd->cfq_group_idle;
  355. else
  356. slice = cfqd->cfq_slice_idle;
  357. return ttime->ttime_mean > slice;
  358. }
  359. static inline bool iops_mode(struct cfq_data *cfqd)
  360. {
  361. /*
  362. * If we are not idling on queues and it is a NCQ drive, parallel
  363. * execution of requests is on and measuring time is not possible
  364. * in most of the cases until and unless we drive shallower queue
  365. * depths and that becomes a performance bottleneck. In such cases
  366. * switch to start providing fairness in terms of number of IOs.
  367. */
  368. if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
  369. return true;
  370. else
  371. return false;
  372. }
  373. static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
  374. {
  375. if (cfq_class_idle(cfqq))
  376. return IDLE_WORKLOAD;
  377. if (cfq_class_rt(cfqq))
  378. return RT_WORKLOAD;
  379. return BE_WORKLOAD;
  380. }
  381. static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
  382. {
  383. if (!cfq_cfqq_sync(cfqq))
  384. return ASYNC_WORKLOAD;
  385. if (!cfq_cfqq_idle_window(cfqq))
  386. return SYNC_NOIDLE_WORKLOAD;
  387. return SYNC_WORKLOAD;
  388. }
  389. static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
  390. struct cfq_data *cfqd,
  391. struct cfq_group *cfqg)
  392. {
  393. if (wl == IDLE_WORKLOAD)
  394. return cfqg->service_tree_idle.count;
  395. return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
  396. + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
  397. + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
  398. }
  399. static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
  400. struct cfq_group *cfqg)
  401. {
  402. return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
  403. + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
  404. }
  405. static void cfq_dispatch_insert(struct request_queue *, struct request *);
  406. static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
  407. struct io_context *, gfp_t);
  408. static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
  409. {
  410. /* cic->icq is the first member, %NULL will convert to %NULL */
  411. return container_of(icq, struct cfq_io_cq, icq);
  412. }
  413. static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
  414. struct io_context *ioc)
  415. {
  416. if (ioc)
  417. return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
  418. return NULL;
  419. }
  420. static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
  421. {
  422. return cic->cfqq[is_sync];
  423. }
  424. static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
  425. bool is_sync)
  426. {
  427. cic->cfqq[is_sync] = cfqq;
  428. }
  429. static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
  430. {
  431. return cic->icq.q->elevator->elevator_data;
  432. }
  433. /*
  434. * We regard a request as SYNC, if it's either a read or has the SYNC bit
  435. * set (in which case it could also be direct WRITE).
  436. */
  437. static inline bool cfq_bio_sync(struct bio *bio)
  438. {
  439. return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
  440. }
  441. /*
  442. * scheduler run of queue, if there are requests pending and no one in the
  443. * driver that will restart queueing
  444. */
  445. static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
  446. {
  447. if (cfqd->busy_queues) {
  448. cfq_log(cfqd, "schedule dispatch");
  449. kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
  450. }
  451. }
  452. /*
  453. * Scale schedule slice based on io priority. Use the sync time slice only
  454. * if a queue is marked sync and has sync io queued. A sync queue with async
  455. * io only, should not get full sync slice length.
  456. */
  457. static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
  458. unsigned short prio)
  459. {
  460. const int base_slice = cfqd->cfq_slice[sync];
  461. WARN_ON(prio >= IOPRIO_BE_NR);
  462. return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
  463. }
  464. static inline int
  465. cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  466. {
  467. return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
  468. }
  469. static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
  470. {
  471. u64 d = delta << CFQ_SERVICE_SHIFT;
  472. d = d * BLKIO_WEIGHT_DEFAULT;
  473. do_div(d, cfqg->weight);
  474. return d;
  475. }
  476. static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
  477. {
  478. s64 delta = (s64)(vdisktime - min_vdisktime);
  479. if (delta > 0)
  480. min_vdisktime = vdisktime;
  481. return min_vdisktime;
  482. }
  483. static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
  484. {
  485. s64 delta = (s64)(vdisktime - min_vdisktime);
  486. if (delta < 0)
  487. min_vdisktime = vdisktime;
  488. return min_vdisktime;
  489. }
  490. static void update_min_vdisktime(struct cfq_rb_root *st)
  491. {
  492. struct cfq_group *cfqg;
  493. if (st->left) {
  494. cfqg = rb_entry_cfqg(st->left);
  495. st->min_vdisktime = max_vdisktime(st->min_vdisktime,
  496. cfqg->vdisktime);
  497. }
  498. }
  499. /*
  500. * get averaged number of queues of RT/BE priority.
  501. * average is updated, with a formula that gives more weight to higher numbers,
  502. * to quickly follows sudden increases and decrease slowly
  503. */
  504. static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
  505. struct cfq_group *cfqg, bool rt)
  506. {
  507. unsigned min_q, max_q;
  508. unsigned mult = cfq_hist_divisor - 1;
  509. unsigned round = cfq_hist_divisor / 2;
  510. unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
  511. min_q = min(cfqg->busy_queues_avg[rt], busy);
  512. max_q = max(cfqg->busy_queues_avg[rt], busy);
  513. cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
  514. cfq_hist_divisor;
  515. return cfqg->busy_queues_avg[rt];
  516. }
  517. static inline unsigned
  518. cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
  519. {
  520. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  521. return cfq_target_latency * cfqg->weight / st->total_weight;
  522. }
  523. static inline unsigned
  524. cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  525. {
  526. unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
  527. if (cfqd->cfq_latency) {
  528. /*
  529. * interested queues (we consider only the ones with the same
  530. * priority class in the cfq group)
  531. */
  532. unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
  533. cfq_class_rt(cfqq));
  534. unsigned sync_slice = cfqd->cfq_slice[1];
  535. unsigned expect_latency = sync_slice * iq;
  536. unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
  537. if (expect_latency > group_slice) {
  538. unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
  539. /* scale low_slice according to IO priority
  540. * and sync vs async */
  541. unsigned low_slice =
  542. min(slice, base_low_slice * slice / sync_slice);
  543. /* the adapted slice value is scaled to fit all iqs
  544. * into the target latency */
  545. slice = max(slice * group_slice / expect_latency,
  546. low_slice);
  547. }
  548. }
  549. return slice;
  550. }
  551. static inline void
  552. cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  553. {
  554. unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
  555. cfqq->slice_start = jiffies;
  556. cfqq->slice_end = jiffies + slice;
  557. cfqq->allocated_slice = slice;
  558. cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
  559. }
  560. /*
  561. * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
  562. * isn't valid until the first request from the dispatch is activated
  563. * and the slice time set.
  564. */
  565. static inline bool cfq_slice_used(struct cfq_queue *cfqq)
  566. {
  567. if (cfq_cfqq_slice_new(cfqq))
  568. return false;
  569. if (time_before(jiffies, cfqq->slice_end))
  570. return false;
  571. return true;
  572. }
  573. /*
  574. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  575. * We choose the request that is closest to the head right now. Distance
  576. * behind the head is penalized and only allowed to a certain extent.
  577. */
  578. static struct request *
  579. cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
  580. {
  581. sector_t s1, s2, d1 = 0, d2 = 0;
  582. unsigned long back_max;
  583. #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  584. #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  585. unsigned wrap = 0; /* bit mask: requests behind the disk head? */
  586. if (rq1 == NULL || rq1 == rq2)
  587. return rq2;
  588. if (rq2 == NULL)
  589. return rq1;
  590. if (rq_is_sync(rq1) != rq_is_sync(rq2))
  591. return rq_is_sync(rq1) ? rq1 : rq2;
  592. if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
  593. return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
  594. s1 = blk_rq_pos(rq1);
  595. s2 = blk_rq_pos(rq2);
  596. /*
  597. * by definition, 1KiB is 2 sectors
  598. */
  599. back_max = cfqd->cfq_back_max * 2;
  600. /*
  601. * Strict one way elevator _except_ in the case where we allow
  602. * short backward seeks which are biased as twice the cost of a
  603. * similar forward seek.
  604. */
  605. if (s1 >= last)
  606. d1 = s1 - last;
  607. else if (s1 + back_max >= last)
  608. d1 = (last - s1) * cfqd->cfq_back_penalty;
  609. else
  610. wrap |= CFQ_RQ1_WRAP;
  611. if (s2 >= last)
  612. d2 = s2 - last;
  613. else if (s2 + back_max >= last)
  614. d2 = (last - s2) * cfqd->cfq_back_penalty;
  615. else
  616. wrap |= CFQ_RQ2_WRAP;
  617. /* Found required data */
  618. /*
  619. * By doing switch() on the bit mask "wrap" we avoid having to
  620. * check two variables for all permutations: --> faster!
  621. */
  622. switch (wrap) {
  623. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  624. if (d1 < d2)
  625. return rq1;
  626. else if (d2 < d1)
  627. return rq2;
  628. else {
  629. if (s1 >= s2)
  630. return rq1;
  631. else
  632. return rq2;
  633. }
  634. case CFQ_RQ2_WRAP:
  635. return rq1;
  636. case CFQ_RQ1_WRAP:
  637. return rq2;
  638. case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
  639. default:
  640. /*
  641. * Since both rqs are wrapped,
  642. * start with the one that's further behind head
  643. * (--> only *one* back seek required),
  644. * since back seek takes more time than forward.
  645. */
  646. if (s1 <= s2)
  647. return rq1;
  648. else
  649. return rq2;
  650. }
  651. }
  652. /*
  653. * The below is leftmost cache rbtree addon
  654. */
  655. static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
  656. {
  657. /* Service tree is empty */
  658. if (!root->count)
  659. return NULL;
  660. if (!root->left)
  661. root->left = rb_first(&root->rb);
  662. if (root->left)
  663. return rb_entry(root->left, struct cfq_queue, rb_node);
  664. return NULL;
  665. }
  666. static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
  667. {
  668. if (!root->left)
  669. root->left = rb_first(&root->rb);
  670. if (root->left)
  671. return rb_entry_cfqg(root->left);
  672. return NULL;
  673. }
  674. static void rb_erase_init(struct rb_node *n, struct rb_root *root)
  675. {
  676. rb_erase(n, root);
  677. RB_CLEAR_NODE(n);
  678. }
  679. static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
  680. {
  681. if (root->left == n)
  682. root->left = NULL;
  683. rb_erase_init(n, &root->rb);
  684. --root->count;
  685. }
  686. /*
  687. * would be nice to take fifo expire time into account as well
  688. */
  689. static struct request *
  690. cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  691. struct request *last)
  692. {
  693. struct rb_node *rbnext = rb_next(&last->rb_node);
  694. struct rb_node *rbprev = rb_prev(&last->rb_node);
  695. struct request *next = NULL, *prev = NULL;
  696. BUG_ON(RB_EMPTY_NODE(&last->rb_node));
  697. if (rbprev)
  698. prev = rb_entry_rq(rbprev);
  699. if (rbnext)
  700. next = rb_entry_rq(rbnext);
  701. else {
  702. rbnext = rb_first(&cfqq->sort_list);
  703. if (rbnext && rbnext != &last->rb_node)
  704. next = rb_entry_rq(rbnext);
  705. }
  706. return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
  707. }
  708. static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  709. struct cfq_queue *cfqq)
  710. {
  711. /*
  712. * just an approximation, should be ok.
  713. */
  714. return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
  715. cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
  716. }
  717. static inline s64
  718. cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
  719. {
  720. return cfqg->vdisktime - st->min_vdisktime;
  721. }
  722. static void
  723. __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  724. {
  725. struct rb_node **node = &st->rb.rb_node;
  726. struct rb_node *parent = NULL;
  727. struct cfq_group *__cfqg;
  728. s64 key = cfqg_key(st, cfqg);
  729. int left = 1;
  730. while (*node != NULL) {
  731. parent = *node;
  732. __cfqg = rb_entry_cfqg(parent);
  733. if (key < cfqg_key(st, __cfqg))
  734. node = &parent->rb_left;
  735. else {
  736. node = &parent->rb_right;
  737. left = 0;
  738. }
  739. }
  740. if (left)
  741. st->left = &cfqg->rb_node;
  742. rb_link_node(&cfqg->rb_node, parent, node);
  743. rb_insert_color(&cfqg->rb_node, &st->rb);
  744. }
  745. static void
  746. cfq_update_group_weight(struct cfq_group *cfqg)
  747. {
  748. BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  749. if (cfqg->needs_update) {
  750. cfqg->weight = cfqg->new_weight;
  751. cfqg->needs_update = false;
  752. }
  753. }
  754. static void
  755. cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  756. {
  757. BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  758. cfq_update_group_weight(cfqg);
  759. __cfq_group_service_tree_add(st, cfqg);
  760. st->total_weight += cfqg->weight;
  761. }
  762. static void
  763. cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
  764. {
  765. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  766. struct cfq_group *__cfqg;
  767. struct rb_node *n;
  768. cfqg->nr_cfqq++;
  769. if (!RB_EMPTY_NODE(&cfqg->rb_node))
  770. return;
  771. /*
  772. * Currently put the group at the end. Later implement something
  773. * so that groups get lesser vtime based on their weights, so that
  774. * if group does not loose all if it was not continuously backlogged.
  775. */
  776. n = rb_last(&st->rb);
  777. if (n) {
  778. __cfqg = rb_entry_cfqg(n);
  779. cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
  780. } else
  781. cfqg->vdisktime = st->min_vdisktime;
  782. cfq_group_service_tree_add(st, cfqg);
  783. }
  784. static void
  785. cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
  786. {
  787. st->total_weight -= cfqg->weight;
  788. if (!RB_EMPTY_NODE(&cfqg->rb_node))
  789. cfq_rb_erase(&cfqg->rb_node, st);
  790. }
  791. static void
  792. cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
  793. {
  794. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  795. BUG_ON(cfqg->nr_cfqq < 1);
  796. cfqg->nr_cfqq--;
  797. /* If there are other cfq queues under this group, don't delete it */
  798. if (cfqg->nr_cfqq)
  799. return;
  800. cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
  801. cfq_group_service_tree_del(st, cfqg);
  802. cfqg->saved_workload_slice = 0;
  803. cfq_blkiocg_update_dequeue_stats(&cfqg->blkg, 1);
  804. }
  805. static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
  806. unsigned int *unaccounted_time)
  807. {
  808. unsigned int slice_used;
  809. /*
  810. * Queue got expired before even a single request completed or
  811. * got expired immediately after first request completion.
  812. */
  813. if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
  814. /*
  815. * Also charge the seek time incurred to the group, otherwise
  816. * if there are mutiple queues in the group, each can dispatch
  817. * a single request on seeky media and cause lots of seek time
  818. * and group will never know it.
  819. */
  820. slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
  821. 1);
  822. } else {
  823. slice_used = jiffies - cfqq->slice_start;
  824. if (slice_used > cfqq->allocated_slice) {
  825. *unaccounted_time = slice_used - cfqq->allocated_slice;
  826. slice_used = cfqq->allocated_slice;
  827. }
  828. if (time_after(cfqq->slice_start, cfqq->dispatch_start))
  829. *unaccounted_time += cfqq->slice_start -
  830. cfqq->dispatch_start;
  831. }
  832. return slice_used;
  833. }
  834. static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
  835. struct cfq_queue *cfqq)
  836. {
  837. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  838. unsigned int used_sl, charge, unaccounted_sl = 0;
  839. int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
  840. - cfqg->service_tree_idle.count;
  841. BUG_ON(nr_sync < 0);
  842. used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
  843. if (iops_mode(cfqd))
  844. charge = cfqq->slice_dispatch;
  845. else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
  846. charge = cfqq->allocated_slice;
  847. /* Can't update vdisktime while group is on service tree */
  848. cfq_group_service_tree_del(st, cfqg);
  849. cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
  850. /* If a new weight was requested, update now, off tree */
  851. cfq_group_service_tree_add(st, cfqg);
  852. /* This group is being expired. Save the context */
  853. if (time_after(cfqd->workload_expires, jiffies)) {
  854. cfqg->saved_workload_slice = cfqd->workload_expires
  855. - jiffies;
  856. cfqg->saved_workload = cfqd->serving_type;
  857. cfqg->saved_serving_prio = cfqd->serving_prio;
  858. } else
  859. cfqg->saved_workload_slice = 0;
  860. cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
  861. st->min_vdisktime);
  862. cfq_log_cfqq(cfqq->cfqd, cfqq,
  863. "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
  864. used_sl, cfqq->slice_dispatch, charge,
  865. iops_mode(cfqd), cfqq->nr_sectors);
  866. cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl,
  867. unaccounted_sl);
  868. cfq_blkiocg_set_start_empty_time(&cfqg->blkg);
  869. }
  870. /**
  871. * cfq_init_cfqg_base - initialize base part of a cfq_group
  872. * @cfqg: cfq_group to initialize
  873. *
  874. * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
  875. * is enabled or not.
  876. */
  877. static void cfq_init_cfqg_base(struct cfq_group *cfqg)
  878. {
  879. struct cfq_rb_root *st;
  880. int i, j;
  881. for_each_cfqg_st(cfqg, i, j, st)
  882. *st = CFQ_RB_ROOT;
  883. RB_CLEAR_NODE(&cfqg->rb_node);
  884. cfqg->ttime.last_end_request = jiffies;
  885. }
  886. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  887. static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
  888. {
  889. if (blkg)
  890. return container_of(blkg, struct cfq_group, blkg);
  891. return NULL;
  892. }
  893. static void cfq_update_blkio_group_weight(struct request_queue *q,
  894. struct blkio_group *blkg,
  895. unsigned int weight)
  896. {
  897. struct cfq_group *cfqg = cfqg_of_blkg(blkg);
  898. cfqg->new_weight = weight;
  899. cfqg->needs_update = true;
  900. }
  901. static void cfq_link_blkio_group(struct request_queue *q,
  902. struct blkio_group *blkg)
  903. {
  904. struct cfq_data *cfqd = q->elevator->elevator_data;
  905. struct backing_dev_info *bdi = &q->backing_dev_info;
  906. struct cfq_group *cfqg = cfqg_of_blkg(blkg);
  907. unsigned int major, minor;
  908. /*
  909. * Add group onto cgroup list. It might happen that bdi->dev is
  910. * not initialized yet. Initialize this new group without major
  911. * and minor info and this info will be filled in once a new thread
  912. * comes for IO.
  913. */
  914. if (bdi->dev) {
  915. sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
  916. blkg->dev = MKDEV(major, minor);
  917. }
  918. cfqd->nr_blkcg_linked_grps++;
  919. /* Add group on cfqd list */
  920. hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
  921. }
  922. static struct blkio_group *cfq_alloc_blkio_group(struct request_queue *q,
  923. struct blkio_cgroup *blkcg)
  924. {
  925. struct cfq_group *cfqg;
  926. cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, q->node);
  927. if (!cfqg)
  928. return NULL;
  929. cfq_init_cfqg_base(cfqg);
  930. cfqg->weight = blkcg_get_weight(blkcg, cfqg->blkg.dev);
  931. /*
  932. * Take the initial reference that will be released on destroy
  933. * This can be thought of a joint reference by cgroup and
  934. * elevator which will be dropped by either elevator exit
  935. * or cgroup deletion path depending on who is exiting first.
  936. */
  937. cfqg->ref = 1;
  938. return &cfqg->blkg;
  939. }
  940. /*
  941. * Search for the cfq group current task belongs to. request_queue lock must
  942. * be held.
  943. */
  944. static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
  945. struct blkio_cgroup *blkcg)
  946. {
  947. struct request_queue *q = cfqd->queue;
  948. struct backing_dev_info *bdi = &q->backing_dev_info;
  949. struct cfq_group *cfqg = NULL;
  950. /* avoid lookup for the common case where there's no blkio cgroup */
  951. if (blkcg == &blkio_root_cgroup) {
  952. cfqg = cfqd->root_group;
  953. } else {
  954. struct blkio_group *blkg;
  955. blkg = blkg_lookup_create(blkcg, q, BLKIO_POLICY_PROP, false);
  956. if (!IS_ERR(blkg))
  957. cfqg = cfqg_of_blkg(blkg);
  958. }
  959. if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
  960. unsigned int major, minor;
  961. sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
  962. cfqg->blkg.dev = MKDEV(major, minor);
  963. }
  964. return cfqg;
  965. }
  966. static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
  967. {
  968. cfqg->ref++;
  969. return cfqg;
  970. }
  971. static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
  972. {
  973. /* Currently, all async queues are mapped to root group */
  974. if (!cfq_cfqq_sync(cfqq))
  975. cfqg = cfqq->cfqd->root_group;
  976. cfqq->cfqg = cfqg;
  977. /* cfqq reference on cfqg */
  978. cfqq->cfqg->ref++;
  979. }
  980. static void cfq_put_cfqg(struct cfq_group *cfqg)
  981. {
  982. struct cfq_rb_root *st;
  983. int i, j;
  984. BUG_ON(cfqg->ref <= 0);
  985. cfqg->ref--;
  986. if (cfqg->ref)
  987. return;
  988. for_each_cfqg_st(cfqg, i, j, st)
  989. BUG_ON(!RB_EMPTY_ROOT(&st->rb));
  990. free_percpu(cfqg->blkg.stats_cpu);
  991. kfree(cfqg);
  992. }
  993. static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
  994. {
  995. /* Something wrong if we are trying to remove same group twice */
  996. BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
  997. hlist_del_init(&cfqg->cfqd_node);
  998. BUG_ON(cfqd->nr_blkcg_linked_grps <= 0);
  999. cfqd->nr_blkcg_linked_grps--;
  1000. /*
  1001. * Put the reference taken at the time of creation so that when all
  1002. * queues are gone, group can be destroyed.
  1003. */
  1004. cfq_put_cfqg(cfqg);
  1005. }
  1006. static bool cfq_release_cfq_groups(struct cfq_data *cfqd)
  1007. {
  1008. struct hlist_node *pos, *n;
  1009. struct cfq_group *cfqg;
  1010. bool empty = true;
  1011. hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
  1012. /*
  1013. * If cgroup removal path got to blk_group first and removed
  1014. * it from cgroup list, then it will take care of destroying
  1015. * cfqg also.
  1016. */
  1017. if (!cfq_blkiocg_del_blkio_group(&cfqg->blkg))
  1018. cfq_destroy_cfqg(cfqd, cfqg);
  1019. else
  1020. empty = false;
  1021. }
  1022. return empty;
  1023. }
  1024. /*
  1025. * Blk cgroup controller notification saying that blkio_group object is being
  1026. * delinked as associated cgroup object is going away. That also means that
  1027. * no new IO will come in this group. So get rid of this group as soon as
  1028. * any pending IO in the group is finished.
  1029. *
  1030. * This function is called under rcu_read_lock(). key is the rcu protected
  1031. * pointer. That means @q is a valid request_queue pointer as long as we
  1032. * are rcu read lock.
  1033. *
  1034. * @q was fetched from blkio_group under blkio_cgroup->lock. That means
  1035. * it should not be NULL as even if elevator was exiting, cgroup deltion
  1036. * path got to it first.
  1037. */
  1038. static void cfq_unlink_blkio_group(struct request_queue *q,
  1039. struct blkio_group *blkg)
  1040. {
  1041. struct cfq_data *cfqd = q->elevator->elevator_data;
  1042. unsigned long flags;
  1043. spin_lock_irqsave(q->queue_lock, flags);
  1044. cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
  1045. spin_unlock_irqrestore(q->queue_lock, flags);
  1046. }
  1047. static struct elevator_type iosched_cfq;
  1048. static bool cfq_clear_queue(struct request_queue *q)
  1049. {
  1050. lockdep_assert_held(q->queue_lock);
  1051. /* shoot down blkgs iff the current elevator is cfq */
  1052. if (!q->elevator || q->elevator->type != &iosched_cfq)
  1053. return true;
  1054. return cfq_release_cfq_groups(q->elevator->elevator_data);
  1055. }
  1056. #else /* GROUP_IOSCHED */
  1057. static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
  1058. struct blkio_cgroup *blkcg)
  1059. {
  1060. return cfqd->root_group;
  1061. }
  1062. static inline struct cfq_group *cfq_ref_get_cfqg(struct cfq_group *cfqg)
  1063. {
  1064. return cfqg;
  1065. }
  1066. static inline void
  1067. cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
  1068. cfqq->cfqg = cfqg;
  1069. }
  1070. static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
  1071. static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
  1072. #endif /* GROUP_IOSCHED */
  1073. /*
  1074. * The cfqd->service_trees holds all pending cfq_queue's that have
  1075. * requests waiting to be processed. It is sorted in the order that
  1076. * we will service the queues.
  1077. */
  1078. static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1079. bool add_front)
  1080. {
  1081. struct rb_node **p, *parent;
  1082. struct cfq_queue *__cfqq;
  1083. unsigned long rb_key;
  1084. struct cfq_rb_root *service_tree;
  1085. int left;
  1086. int new_cfqq = 1;
  1087. service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
  1088. cfqq_type(cfqq));
  1089. if (cfq_class_idle(cfqq)) {
  1090. rb_key = CFQ_IDLE_DELAY;
  1091. parent = rb_last(&service_tree->rb);
  1092. if (parent && parent != &cfqq->rb_node) {
  1093. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  1094. rb_key += __cfqq->rb_key;
  1095. } else
  1096. rb_key += jiffies;
  1097. } else if (!add_front) {
  1098. /*
  1099. * Get our rb key offset. Subtract any residual slice
  1100. * value carried from last service. A negative resid
  1101. * count indicates slice overrun, and this should position
  1102. * the next service time further away in the tree.
  1103. */
  1104. rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
  1105. rb_key -= cfqq->slice_resid;
  1106. cfqq->slice_resid = 0;
  1107. } else {
  1108. rb_key = -HZ;
  1109. __cfqq = cfq_rb_first(service_tree);
  1110. rb_key += __cfqq ? __cfqq->rb_key : jiffies;
  1111. }
  1112. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  1113. new_cfqq = 0;
  1114. /*
  1115. * same position, nothing more to do
  1116. */
  1117. if (rb_key == cfqq->rb_key &&
  1118. cfqq->service_tree == service_tree)
  1119. return;
  1120. cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  1121. cfqq->service_tree = NULL;
  1122. }
  1123. left = 1;
  1124. parent = NULL;
  1125. cfqq->service_tree = service_tree;
  1126. p = &service_tree->rb.rb_node;
  1127. while (*p) {
  1128. struct rb_node **n;
  1129. parent = *p;
  1130. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  1131. /*
  1132. * sort by key, that represents service time.
  1133. */
  1134. if (time_before(rb_key, __cfqq->rb_key))
  1135. n = &(*p)->rb_left;
  1136. else {
  1137. n = &(*p)->rb_right;
  1138. left = 0;
  1139. }
  1140. p = n;
  1141. }
  1142. if (left)
  1143. service_tree->left = &cfqq->rb_node;
  1144. cfqq->rb_key = rb_key;
  1145. rb_link_node(&cfqq->rb_node, parent, p);
  1146. rb_insert_color(&cfqq->rb_node, &service_tree->rb);
  1147. service_tree->count++;
  1148. if (add_front || !new_cfqq)
  1149. return;
  1150. cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
  1151. }
  1152. static struct cfq_queue *
  1153. cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
  1154. sector_t sector, struct rb_node **ret_parent,
  1155. struct rb_node ***rb_link)
  1156. {
  1157. struct rb_node **p, *parent;
  1158. struct cfq_queue *cfqq = NULL;
  1159. parent = NULL;
  1160. p = &root->rb_node;
  1161. while (*p) {
  1162. struct rb_node **n;
  1163. parent = *p;
  1164. cfqq = rb_entry(parent, struct cfq_queue, p_node);
  1165. /*
  1166. * Sort strictly based on sector. Smallest to the left,
  1167. * largest to the right.
  1168. */
  1169. if (sector > blk_rq_pos(cfqq->next_rq))
  1170. n = &(*p)->rb_right;
  1171. else if (sector < blk_rq_pos(cfqq->next_rq))
  1172. n = &(*p)->rb_left;
  1173. else
  1174. break;
  1175. p = n;
  1176. cfqq = NULL;
  1177. }
  1178. *ret_parent = parent;
  1179. if (rb_link)
  1180. *rb_link = p;
  1181. return cfqq;
  1182. }
  1183. static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1184. {
  1185. struct rb_node **p, *parent;
  1186. struct cfq_queue *__cfqq;
  1187. if (cfqq->p_root) {
  1188. rb_erase(&cfqq->p_node, cfqq->p_root);
  1189. cfqq->p_root = NULL;
  1190. }
  1191. if (cfq_class_idle(cfqq))
  1192. return;
  1193. if (!cfqq->next_rq)
  1194. return;
  1195. cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
  1196. __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
  1197. blk_rq_pos(cfqq->next_rq), &parent, &p);
  1198. if (!__cfqq) {
  1199. rb_link_node(&cfqq->p_node, parent, p);
  1200. rb_insert_color(&cfqq->p_node, cfqq->p_root);
  1201. } else
  1202. cfqq->p_root = NULL;
  1203. }
  1204. /*
  1205. * Update cfqq's position in the service tree.
  1206. */
  1207. static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1208. {
  1209. /*
  1210. * Resorting requires the cfqq to be on the RR list already.
  1211. */
  1212. if (cfq_cfqq_on_rr(cfqq)) {
  1213. cfq_service_tree_add(cfqd, cfqq, 0);
  1214. cfq_prio_tree_add(cfqd, cfqq);
  1215. }
  1216. }
  1217. /*
  1218. * add to busy list of queues for service, trying to be fair in ordering
  1219. * the pending list according to last request service
  1220. */
  1221. static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1222. {
  1223. cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
  1224. BUG_ON(cfq_cfqq_on_rr(cfqq));
  1225. cfq_mark_cfqq_on_rr(cfqq);
  1226. cfqd->busy_queues++;
  1227. if (cfq_cfqq_sync(cfqq))
  1228. cfqd->busy_sync_queues++;
  1229. cfq_resort_rr_list(cfqd, cfqq);
  1230. }
  1231. /*
  1232. * Called when the cfqq no longer has requests pending, remove it from
  1233. * the service tree.
  1234. */
  1235. static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1236. {
  1237. cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
  1238. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  1239. cfq_clear_cfqq_on_rr(cfqq);
  1240. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  1241. cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  1242. cfqq->service_tree = NULL;
  1243. }
  1244. if (cfqq->p_root) {
  1245. rb_erase(&cfqq->p_node, cfqq->p_root);
  1246. cfqq->p_root = NULL;
  1247. }
  1248. cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
  1249. BUG_ON(!cfqd->busy_queues);
  1250. cfqd->busy_queues--;
  1251. if (cfq_cfqq_sync(cfqq))
  1252. cfqd->busy_sync_queues--;
  1253. }
  1254. /*
  1255. * rb tree support functions
  1256. */
  1257. static void cfq_del_rq_rb(struct request *rq)
  1258. {
  1259. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1260. const int sync = rq_is_sync(rq);
  1261. BUG_ON(!cfqq->queued[sync]);
  1262. cfqq->queued[sync]--;
  1263. elv_rb_del(&cfqq->sort_list, rq);
  1264. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
  1265. /*
  1266. * Queue will be deleted from service tree when we actually
  1267. * expire it later. Right now just remove it from prio tree
  1268. * as it is empty.
  1269. */
  1270. if (cfqq->p_root) {
  1271. rb_erase(&cfqq->p_node, cfqq->p_root);
  1272. cfqq->p_root = NULL;
  1273. }
  1274. }
  1275. }
  1276. static void cfq_add_rq_rb(struct request *rq)
  1277. {
  1278. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1279. struct cfq_data *cfqd = cfqq->cfqd;
  1280. struct request *prev;
  1281. cfqq->queued[rq_is_sync(rq)]++;
  1282. elv_rb_add(&cfqq->sort_list, rq);
  1283. if (!cfq_cfqq_on_rr(cfqq))
  1284. cfq_add_cfqq_rr(cfqd, cfqq);
  1285. /*
  1286. * check if this request is a better next-serve candidate
  1287. */
  1288. prev = cfqq->next_rq;
  1289. cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
  1290. /*
  1291. * adjust priority tree position, if ->next_rq changes
  1292. */
  1293. if (prev != cfqq->next_rq)
  1294. cfq_prio_tree_add(cfqd, cfqq);
  1295. BUG_ON(!cfqq->next_rq);
  1296. }
  1297. static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
  1298. {
  1299. elv_rb_del(&cfqq->sort_list, rq);
  1300. cfqq->queued[rq_is_sync(rq)]--;
  1301. cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  1302. rq_data_dir(rq), rq_is_sync(rq));
  1303. cfq_add_rq_rb(rq);
  1304. cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
  1305. &cfqq->cfqd->serving_group->blkg, rq_data_dir(rq),
  1306. rq_is_sync(rq));
  1307. }
  1308. static struct request *
  1309. cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
  1310. {
  1311. struct task_struct *tsk = current;
  1312. struct cfq_io_cq *cic;
  1313. struct cfq_queue *cfqq;
  1314. cic = cfq_cic_lookup(cfqd, tsk->io_context);
  1315. if (!cic)
  1316. return NULL;
  1317. cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
  1318. if (cfqq) {
  1319. sector_t sector = bio->bi_sector + bio_sectors(bio);
  1320. return elv_rb_find(&cfqq->sort_list, sector);
  1321. }
  1322. return NULL;
  1323. }
  1324. static void cfq_activate_request(struct request_queue *q, struct request *rq)
  1325. {
  1326. struct cfq_data *cfqd = q->elevator->elevator_data;
  1327. cfqd->rq_in_driver++;
  1328. cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
  1329. cfqd->rq_in_driver);
  1330. cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
  1331. }
  1332. static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
  1333. {
  1334. struct cfq_data *cfqd = q->elevator->elevator_data;
  1335. WARN_ON(!cfqd->rq_in_driver);
  1336. cfqd->rq_in_driver--;
  1337. cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
  1338. cfqd->rq_in_driver);
  1339. }
  1340. static void cfq_remove_request(struct request *rq)
  1341. {
  1342. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1343. if (cfqq->next_rq == rq)
  1344. cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
  1345. list_del_init(&rq->queuelist);
  1346. cfq_del_rq_rb(rq);
  1347. cfqq->cfqd->rq_queued--;
  1348. cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg,
  1349. rq_data_dir(rq), rq_is_sync(rq));
  1350. if (rq->cmd_flags & REQ_PRIO) {
  1351. WARN_ON(!cfqq->prio_pending);
  1352. cfqq->prio_pending--;
  1353. }
  1354. }
  1355. static int cfq_merge(struct request_queue *q, struct request **req,
  1356. struct bio *bio)
  1357. {
  1358. struct cfq_data *cfqd = q->elevator->elevator_data;
  1359. struct request *__rq;
  1360. __rq = cfq_find_rq_fmerge(cfqd, bio);
  1361. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  1362. *req = __rq;
  1363. return ELEVATOR_FRONT_MERGE;
  1364. }
  1365. return ELEVATOR_NO_MERGE;
  1366. }
  1367. static void cfq_merged_request(struct request_queue *q, struct request *req,
  1368. int type)
  1369. {
  1370. if (type == ELEVATOR_FRONT_MERGE) {
  1371. struct cfq_queue *cfqq = RQ_CFQQ(req);
  1372. cfq_reposition_rq_rb(cfqq, req);
  1373. }
  1374. }
  1375. static void cfq_bio_merged(struct request_queue *q, struct request *req,
  1376. struct bio *bio)
  1377. {
  1378. cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(req))->blkg,
  1379. bio_data_dir(bio), cfq_bio_sync(bio));
  1380. }
  1381. static void
  1382. cfq_merged_requests(struct request_queue *q, struct request *rq,
  1383. struct request *next)
  1384. {
  1385. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1386. struct cfq_data *cfqd = q->elevator->elevator_data;
  1387. /*
  1388. * reposition in fifo if next is older than rq
  1389. */
  1390. if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  1391. time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
  1392. list_move(&rq->queuelist, &next->queuelist);
  1393. rq_set_fifo_time(rq, rq_fifo_time(next));
  1394. }
  1395. if (cfqq->next_rq == next)
  1396. cfqq->next_rq = rq;
  1397. cfq_remove_request(next);
  1398. cfq_blkiocg_update_io_merged_stats(&(RQ_CFQG(rq))->blkg,
  1399. rq_data_dir(next), rq_is_sync(next));
  1400. cfqq = RQ_CFQQ(next);
  1401. /*
  1402. * all requests of this queue are merged to other queues, delete it
  1403. * from the service tree. If it's the active_queue,
  1404. * cfq_dispatch_requests() will choose to expire it or do idle
  1405. */
  1406. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
  1407. cfqq != cfqd->active_queue)
  1408. cfq_del_cfqq_rr(cfqd, cfqq);
  1409. }
  1410. static int cfq_allow_merge(struct request_queue *q, struct request *rq,
  1411. struct bio *bio)
  1412. {
  1413. struct cfq_data *cfqd = q->elevator->elevator_data;
  1414. struct cfq_io_cq *cic;
  1415. struct cfq_queue *cfqq;
  1416. /*
  1417. * Disallow merge of a sync bio into an async request.
  1418. */
  1419. if (cfq_bio_sync(bio) && !rq_is_sync(rq))
  1420. return false;
  1421. /*
  1422. * Lookup the cfqq that this bio will be queued with and allow
  1423. * merge only if rq is queued there.
  1424. */
  1425. cic = cfq_cic_lookup(cfqd, current->io_context);
  1426. if (!cic)
  1427. return false;
  1428. cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
  1429. return cfqq == RQ_CFQQ(rq);
  1430. }
  1431. static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1432. {
  1433. del_timer(&cfqd->idle_slice_timer);
  1434. cfq_blkiocg_update_idle_time_stats(&cfqq->cfqg->blkg);
  1435. }
  1436. static void __cfq_set_active_queue(struct cfq_data *cfqd,
  1437. struct cfq_queue *cfqq)
  1438. {
  1439. if (cfqq) {
  1440. cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
  1441. cfqd->serving_prio, cfqd->serving_type);
  1442. cfq_blkiocg_update_avg_queue_size_stats(&cfqq->cfqg->blkg);
  1443. cfqq->slice_start = 0;
  1444. cfqq->dispatch_start = jiffies;
  1445. cfqq->allocated_slice = 0;
  1446. cfqq->slice_end = 0;
  1447. cfqq->slice_dispatch = 0;
  1448. cfqq->nr_sectors = 0;
  1449. cfq_clear_cfqq_wait_request(cfqq);
  1450. cfq_clear_cfqq_must_dispatch(cfqq);
  1451. cfq_clear_cfqq_must_alloc_slice(cfqq);
  1452. cfq_clear_cfqq_fifo_expire(cfqq);
  1453. cfq_mark_cfqq_slice_new(cfqq);
  1454. cfq_del_timer(cfqd, cfqq);
  1455. }
  1456. cfqd->active_queue = cfqq;
  1457. }
  1458. /*
  1459. * current cfqq expired its slice (or was too idle), select new one
  1460. */
  1461. static void
  1462. __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1463. bool timed_out)
  1464. {
  1465. cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
  1466. if (cfq_cfqq_wait_request(cfqq))
  1467. cfq_del_timer(cfqd, cfqq);
  1468. cfq_clear_cfqq_wait_request(cfqq);
  1469. cfq_clear_cfqq_wait_busy(cfqq);
  1470. /*
  1471. * If this cfqq is shared between multiple processes, check to
  1472. * make sure that those processes are still issuing I/Os within
  1473. * the mean seek distance. If not, it may be time to break the
  1474. * queues apart again.
  1475. */
  1476. if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
  1477. cfq_mark_cfqq_split_coop(cfqq);
  1478. /*
  1479. * store what was left of this slice, if the queue idled/timed out
  1480. */
  1481. if (timed_out) {
  1482. if (cfq_cfqq_slice_new(cfqq))
  1483. cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
  1484. else
  1485. cfqq->slice_resid = cfqq->slice_end - jiffies;
  1486. cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
  1487. }
  1488. cfq_group_served(cfqd, cfqq->cfqg, cfqq);
  1489. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  1490. cfq_del_cfqq_rr(cfqd, cfqq);
  1491. cfq_resort_rr_list(cfqd, cfqq);
  1492. if (cfqq == cfqd->active_queue)
  1493. cfqd->active_queue = NULL;
  1494. if (cfqd->active_cic) {
  1495. put_io_context(cfqd->active_cic->icq.ioc);
  1496. cfqd->active_cic = NULL;
  1497. }
  1498. }
  1499. static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
  1500. {
  1501. struct cfq_queue *cfqq = cfqd->active_queue;
  1502. if (cfqq)
  1503. __cfq_slice_expired(cfqd, cfqq, timed_out);
  1504. }
  1505. /*
  1506. * Get next queue for service. Unless we have a queue preemption,
  1507. * we'll simply select the first cfqq in the service tree.
  1508. */
  1509. static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
  1510. {
  1511. struct cfq_rb_root *service_tree =
  1512. service_tree_for(cfqd->serving_group, cfqd->serving_prio,
  1513. cfqd->serving_type);
  1514. if (!cfqd->rq_queued)
  1515. return NULL;
  1516. /* There is nothing to dispatch */
  1517. if (!service_tree)
  1518. return NULL;
  1519. if (RB_EMPTY_ROOT(&service_tree->rb))
  1520. return NULL;
  1521. return cfq_rb_first(service_tree);
  1522. }
  1523. static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
  1524. {
  1525. struct cfq_group *cfqg;
  1526. struct cfq_queue *cfqq;
  1527. int i, j;
  1528. struct cfq_rb_root *st;
  1529. if (!cfqd->rq_queued)
  1530. return NULL;
  1531. cfqg = cfq_get_next_cfqg(cfqd);
  1532. if (!cfqg)
  1533. return NULL;
  1534. for_each_cfqg_st(cfqg, i, j, st)
  1535. if ((cfqq = cfq_rb_first(st)) != NULL)
  1536. return cfqq;
  1537. return NULL;
  1538. }
  1539. /*
  1540. * Get and set a new active queue for service.
  1541. */
  1542. static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
  1543. struct cfq_queue *cfqq)
  1544. {
  1545. if (!cfqq)
  1546. cfqq = cfq_get_next_queue(cfqd);
  1547. __cfq_set_active_queue(cfqd, cfqq);
  1548. return cfqq;
  1549. }
  1550. static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
  1551. struct request *rq)
  1552. {
  1553. if (blk_rq_pos(rq) >= cfqd->last_position)
  1554. return blk_rq_pos(rq) - cfqd->last_position;
  1555. else
  1556. return cfqd->last_position - blk_rq_pos(rq);
  1557. }
  1558. static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1559. struct request *rq)
  1560. {
  1561. return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
  1562. }
  1563. static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  1564. struct cfq_queue *cur_cfqq)
  1565. {
  1566. struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
  1567. struct rb_node *parent, *node;
  1568. struct cfq_queue *__cfqq;
  1569. sector_t sector = cfqd->last_position;
  1570. if (RB_EMPTY_ROOT(root))
  1571. return NULL;
  1572. /*
  1573. * First, if we find a request starting at the end of the last
  1574. * request, choose it.
  1575. */
  1576. __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
  1577. if (__cfqq)
  1578. return __cfqq;
  1579. /*
  1580. * If the exact sector wasn't found, the parent of the NULL leaf
  1581. * will contain the closest sector.
  1582. */
  1583. __cfqq = rb_entry(parent, struct cfq_queue, p_node);
  1584. if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
  1585. return __cfqq;
  1586. if (blk_rq_pos(__cfqq->next_rq) < sector)
  1587. node = rb_next(&__cfqq->p_node);
  1588. else
  1589. node = rb_prev(&__cfqq->p_node);
  1590. if (!node)
  1591. return NULL;
  1592. __cfqq = rb_entry(node, struct cfq_queue, p_node);
  1593. if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
  1594. return __cfqq;
  1595. return NULL;
  1596. }
  1597. /*
  1598. * cfqd - obvious
  1599. * cur_cfqq - passed in so that we don't decide that the current queue is
  1600. * closely cooperating with itself.
  1601. *
  1602. * So, basically we're assuming that that cur_cfqq has dispatched at least
  1603. * one request, and that cfqd->last_position reflects a position on the disk
  1604. * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
  1605. * assumption.
  1606. */
  1607. static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
  1608. struct cfq_queue *cur_cfqq)
  1609. {
  1610. struct cfq_queue *cfqq;
  1611. if (cfq_class_idle(cur_cfqq))
  1612. return NULL;
  1613. if (!cfq_cfqq_sync(cur_cfqq))
  1614. return NULL;
  1615. if (CFQQ_SEEKY(cur_cfqq))
  1616. return NULL;
  1617. /*
  1618. * Don't search priority tree if it's the only queue in the group.
  1619. */
  1620. if (cur_cfqq->cfqg->nr_cfqq == 1)
  1621. return NULL;
  1622. /*
  1623. * We should notice if some of the queues are cooperating, eg
  1624. * working closely on the same area of the disk. In that case,
  1625. * we can group them together and don't waste time idling.
  1626. */
  1627. cfqq = cfqq_close(cfqd, cur_cfqq);
  1628. if (!cfqq)
  1629. return NULL;
  1630. /* If new queue belongs to different cfq_group, don't choose it */
  1631. if (cur_cfqq->cfqg != cfqq->cfqg)
  1632. return NULL;
  1633. /*
  1634. * It only makes sense to merge sync queues.
  1635. */
  1636. if (!cfq_cfqq_sync(cfqq))
  1637. return NULL;
  1638. if (CFQQ_SEEKY(cfqq))
  1639. return NULL;
  1640. /*
  1641. * Do not merge queues of different priority classes
  1642. */
  1643. if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
  1644. return NULL;
  1645. return cfqq;
  1646. }
  1647. /*
  1648. * Determine whether we should enforce idle window for this queue.
  1649. */
  1650. static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1651. {
  1652. enum wl_prio_t prio = cfqq_prio(cfqq);
  1653. struct cfq_rb_root *service_tree = cfqq->service_tree;
  1654. BUG_ON(!service_tree);
  1655. BUG_ON(!service_tree->count);
  1656. if (!cfqd->cfq_slice_idle)
  1657. return false;
  1658. /* We never do for idle class queues. */
  1659. if (prio == IDLE_WORKLOAD)
  1660. return false;
  1661. /* We do for queues that were marked with idle window flag. */
  1662. if (cfq_cfqq_idle_window(cfqq) &&
  1663. !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
  1664. return true;
  1665. /*
  1666. * Otherwise, we do only if they are the last ones
  1667. * in their service tree.
  1668. */
  1669. if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
  1670. !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
  1671. return true;
  1672. cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
  1673. service_tree->count);
  1674. return false;
  1675. }
  1676. static void cfq_arm_slice_timer(struct cfq_data *cfqd)
  1677. {
  1678. struct cfq_queue *cfqq = cfqd->active_queue;
  1679. struct cfq_io_cq *cic;
  1680. unsigned long sl, group_idle = 0;
  1681. /*
  1682. * SSD device without seek penalty, disable idling. But only do so
  1683. * for devices that support queuing, otherwise we still have a problem
  1684. * with sync vs async workloads.
  1685. */
  1686. if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
  1687. return;
  1688. WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
  1689. WARN_ON(cfq_cfqq_slice_new(cfqq));
  1690. /*
  1691. * idle is disabled, either manually or by past process history
  1692. */
  1693. if (!cfq_should_idle(cfqd, cfqq)) {
  1694. /* no queue idling. Check for group idling */
  1695. if (cfqd->cfq_group_idle)
  1696. group_idle = cfqd->cfq_group_idle;
  1697. else
  1698. return;
  1699. }
  1700. /*
  1701. * still active requests from this queue, don't idle
  1702. */
  1703. if (cfqq->dispatched)
  1704. return;
  1705. /*
  1706. * task has exited, don't wait
  1707. */
  1708. cic = cfqd->active_cic;
  1709. if (!cic || !atomic_read(&cic->icq.ioc->nr_tasks))
  1710. return;
  1711. /*
  1712. * If our average think time is larger than the remaining time
  1713. * slice, then don't idle. This avoids overrunning the allotted
  1714. * time slice.
  1715. */
  1716. if (sample_valid(cic->ttime.ttime_samples) &&
  1717. (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
  1718. cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
  1719. cic->ttime.ttime_mean);
  1720. return;
  1721. }
  1722. /* There are other queues in the group, don't do group idle */
  1723. if (group_idle && cfqq->cfqg->nr_cfqq > 1)
  1724. return;
  1725. cfq_mark_cfqq_wait_request(cfqq);
  1726. if (group_idle)
  1727. sl = cfqd->cfq_group_idle;
  1728. else
  1729. sl = cfqd->cfq_slice_idle;
  1730. mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
  1731. cfq_blkiocg_update_set_idle_time_stats(&cfqq->cfqg->blkg);
  1732. cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
  1733. group_idle ? 1 : 0);
  1734. }
  1735. /*
  1736. * Move request from internal lists to the request queue dispatch list.
  1737. */
  1738. static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
  1739. {
  1740. struct cfq_data *cfqd = q->elevator->elevator_data;
  1741. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1742. cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
  1743. cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
  1744. cfq_remove_request(rq);
  1745. cfqq->dispatched++;
  1746. (RQ_CFQG(rq))->dispatched++;
  1747. elv_dispatch_sort(q, rq);
  1748. cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
  1749. cfqq->nr_sectors += blk_rq_sectors(rq);
  1750. cfq_blkiocg_update_dispatch_stats(&cfqq->cfqg->blkg, blk_rq_bytes(rq),
  1751. rq_data_dir(rq), rq_is_sync(rq));
  1752. }
  1753. /*
  1754. * return expired entry, or NULL to just start from scratch in rbtree
  1755. */
  1756. static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
  1757. {
  1758. struct request *rq = NULL;
  1759. if (cfq_cfqq_fifo_expire(cfqq))
  1760. return NULL;
  1761. cfq_mark_cfqq_fifo_expire(cfqq);
  1762. if (list_empty(&cfqq->fifo))
  1763. return NULL;
  1764. rq = rq_entry_fifo(cfqq->fifo.next);
  1765. if (time_before(jiffies, rq_fifo_time(rq)))
  1766. rq = NULL;
  1767. cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
  1768. return rq;
  1769. }
  1770. static inline int
  1771. cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1772. {
  1773. const int base_rq = cfqd->cfq_slice_async_rq;
  1774. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  1775. return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
  1776. }
  1777. /*
  1778. * Must be called with the queue_lock held.
  1779. */
  1780. static int cfqq_process_refs(struct cfq_queue *cfqq)
  1781. {
  1782. int process_refs, io_refs;
  1783. io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
  1784. process_refs = cfqq->ref - io_refs;
  1785. BUG_ON(process_refs < 0);
  1786. return process_refs;
  1787. }
  1788. static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
  1789. {
  1790. int process_refs, new_process_refs;
  1791. struct cfq_queue *__cfqq;
  1792. /*
  1793. * If there are no process references on the new_cfqq, then it is
  1794. * unsafe to follow the ->new_cfqq chain as other cfqq's in the
  1795. * chain may have dropped their last reference (not just their
  1796. * last process reference).
  1797. */
  1798. if (!cfqq_process_refs(new_cfqq))
  1799. return;
  1800. /* Avoid a circular list and skip interim queue merges */
  1801. while ((__cfqq = new_cfqq->new_cfqq)) {
  1802. if (__cfqq == cfqq)
  1803. return;
  1804. new_cfqq = __cfqq;
  1805. }
  1806. process_refs = cfqq_process_refs(cfqq);
  1807. new_process_refs = cfqq_process_refs(new_cfqq);
  1808. /*
  1809. * If the process for the cfqq has gone away, there is no
  1810. * sense in merging the queues.
  1811. */
  1812. if (process_refs == 0 || new_process_refs == 0)
  1813. return;
  1814. /*
  1815. * Merge in the direction of the lesser amount of work.
  1816. */
  1817. if (new_process_refs >= process_refs) {
  1818. cfqq->new_cfqq = new_cfqq;
  1819. new_cfqq->ref += process_refs;
  1820. } else {
  1821. new_cfqq->new_cfqq = cfqq;
  1822. cfqq->ref += new_process_refs;
  1823. }
  1824. }
  1825. static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
  1826. struct cfq_group *cfqg, enum wl_prio_t prio)
  1827. {
  1828. struct cfq_queue *queue;
  1829. int i;
  1830. bool key_valid = false;
  1831. unsigned long lowest_key = 0;
  1832. enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
  1833. for (i = 0; i <= SYNC_WORKLOAD; ++i) {
  1834. /* select the one with lowest rb_key */
  1835. queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
  1836. if (queue &&
  1837. (!key_valid || time_before(queue->rb_key, lowest_key))) {
  1838. lowest_key = queue->rb_key;
  1839. cur_best = i;
  1840. key_valid = true;
  1841. }
  1842. }
  1843. return cur_best;
  1844. }
  1845. static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
  1846. {
  1847. unsigned slice;
  1848. unsigned count;
  1849. struct cfq_rb_root *st;
  1850. unsigned group_slice;
  1851. enum wl_prio_t original_prio = cfqd->serving_prio;
  1852. /* Choose next priority. RT > BE > IDLE */
  1853. if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
  1854. cfqd->serving_prio = RT_WORKLOAD;
  1855. else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
  1856. cfqd->serving_prio = BE_WORKLOAD;
  1857. else {
  1858. cfqd->serving_prio = IDLE_WORKLOAD;
  1859. cfqd->workload_expires = jiffies + 1;
  1860. return;
  1861. }
  1862. if (original_prio != cfqd->serving_prio)
  1863. goto new_workload;
  1864. /*
  1865. * For RT and BE, we have to choose also the type
  1866. * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
  1867. * expiration time
  1868. */
  1869. st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
  1870. count = st->count;
  1871. /*
  1872. * check workload expiration, and that we still have other queues ready
  1873. */
  1874. if (count && !time_after(jiffies, cfqd->workload_expires))
  1875. return;
  1876. new_workload:
  1877. /* otherwise select new workload type */
  1878. cfqd->serving_type =
  1879. cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
  1880. st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
  1881. count = st->count;
  1882. /*
  1883. * the workload slice is computed as a fraction of target latency
  1884. * proportional to the number of queues in that workload, over
  1885. * all the queues in the same priority class
  1886. */
  1887. group_slice = cfq_group_slice(cfqd, cfqg);
  1888. slice = group_slice * count /
  1889. max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
  1890. cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
  1891. if (cfqd->serving_type == ASYNC_WORKLOAD) {
  1892. unsigned int tmp;
  1893. /*
  1894. * Async queues are currently system wide. Just taking
  1895. * proportion of queues with-in same group will lead to higher
  1896. * async ratio system wide as generally root group is going
  1897. * to have higher weight. A more accurate thing would be to
  1898. * calculate system wide asnc/sync ratio.
  1899. */
  1900. tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
  1901. tmp = tmp/cfqd->busy_queues;
  1902. slice = min_t(unsigned, slice, tmp);
  1903. /* async workload slice is scaled down according to
  1904. * the sync/async slice ratio. */
  1905. slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
  1906. } else
  1907. /* sync workload slice is at least 2 * cfq_slice_idle */
  1908. slice = max(slice, 2 * cfqd->cfq_slice_idle);
  1909. slice = max_t(unsigned, slice, CFQ_MIN_TT);
  1910. cfq_log(cfqd, "workload slice:%d", slice);
  1911. cfqd->workload_expires = jiffies + slice;
  1912. }
  1913. static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
  1914. {
  1915. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  1916. struct cfq_group *cfqg;
  1917. if (RB_EMPTY_ROOT(&st->rb))
  1918. return NULL;
  1919. cfqg = cfq_rb_first_group(st);
  1920. update_min_vdisktime(st);
  1921. return cfqg;
  1922. }
  1923. static void cfq_choose_cfqg(struct cfq_data *cfqd)
  1924. {
  1925. struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
  1926. cfqd->serving_group = cfqg;
  1927. /* Restore the workload type data */
  1928. if (cfqg->saved_workload_slice) {
  1929. cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
  1930. cfqd->serving_type = cfqg->saved_workload;
  1931. cfqd->serving_prio = cfqg->saved_serving_prio;
  1932. } else
  1933. cfqd->workload_expires = jiffies - 1;
  1934. choose_service_tree(cfqd, cfqg);
  1935. }
  1936. /*
  1937. * Select a queue for service. If we have a current active queue,
  1938. * check whether to continue servicing it, or retrieve and set a new one.
  1939. */
  1940. static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
  1941. {
  1942. struct cfq_queue *cfqq, *new_cfqq = NULL;
  1943. cfqq = cfqd->active_queue;
  1944. if (!cfqq)
  1945. goto new_queue;
  1946. if (!cfqd->rq_queued)
  1947. return NULL;
  1948. /*
  1949. * We were waiting for group to get backlogged. Expire the queue
  1950. */
  1951. if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
  1952. goto expire;
  1953. /*
  1954. * The active queue has run out of time, expire it and select new.
  1955. */
  1956. if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
  1957. /*
  1958. * If slice had not expired at the completion of last request
  1959. * we might not have turned on wait_busy flag. Don't expire
  1960. * the queue yet. Allow the group to get backlogged.
  1961. *
  1962. * The very fact that we have used the slice, that means we
  1963. * have been idling all along on this queue and it should be
  1964. * ok to wait for this request to complete.
  1965. */
  1966. if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
  1967. && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  1968. cfqq = NULL;
  1969. goto keep_queue;
  1970. } else
  1971. goto check_group_idle;
  1972. }
  1973. /*
  1974. * The active queue has requests and isn't expired, allow it to
  1975. * dispatch.
  1976. */
  1977. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  1978. goto keep_queue;
  1979. /*
  1980. * If another queue has a request waiting within our mean seek
  1981. * distance, let it run. The expire code will check for close
  1982. * cooperators and put the close queue at the front of the service
  1983. * tree. If possible, merge the expiring queue with the new cfqq.
  1984. */
  1985. new_cfqq = cfq_close_cooperator(cfqd, cfqq);
  1986. if (new_cfqq) {
  1987. if (!cfqq->new_cfqq)
  1988. cfq_setup_merge(cfqq, new_cfqq);
  1989. goto expire;
  1990. }
  1991. /*
  1992. * No requests pending. If the active queue still has requests in
  1993. * flight or is idling for a new request, allow either of these
  1994. * conditions to happen (or time out) before selecting a new queue.
  1995. */
  1996. if (timer_pending(&cfqd->idle_slice_timer)) {
  1997. cfqq = NULL;
  1998. goto keep_queue;
  1999. }
  2000. /*
  2001. * This is a deep seek queue, but the device is much faster than
  2002. * the queue can deliver, don't idle
  2003. **/
  2004. if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
  2005. (cfq_cfqq_slice_new(cfqq) ||
  2006. (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
  2007. cfq_clear_cfqq_deep(cfqq);
  2008. cfq_clear_cfqq_idle_window(cfqq);
  2009. }
  2010. if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  2011. cfqq = NULL;
  2012. goto keep_queue;
  2013. }
  2014. /*
  2015. * If group idle is enabled and there are requests dispatched from
  2016. * this group, wait for requests to complete.
  2017. */
  2018. check_group_idle:
  2019. if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
  2020. cfqq->cfqg->dispatched &&
  2021. !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
  2022. cfqq = NULL;
  2023. goto keep_queue;
  2024. }
  2025. expire:
  2026. cfq_slice_expired(cfqd, 0);
  2027. new_queue:
  2028. /*
  2029. * Current queue expired. Check if we have to switch to a new
  2030. * service tree
  2031. */
  2032. if (!new_cfqq)
  2033. cfq_choose_cfqg(cfqd);
  2034. cfqq = cfq_set_active_queue(cfqd, new_cfqq);
  2035. keep_queue:
  2036. return cfqq;
  2037. }
  2038. static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
  2039. {
  2040. int dispatched = 0;
  2041. while (cfqq->next_rq) {
  2042. cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  2043. dispatched++;
  2044. }
  2045. BUG_ON(!list_empty(&cfqq->fifo));
  2046. /* By default cfqq is not expired if it is empty. Do it explicitly */
  2047. __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
  2048. return dispatched;
  2049. }
  2050. /*
  2051. * Drain our current requests. Used for barriers and when switching
  2052. * io schedulers on-the-fly.
  2053. */
  2054. static int cfq_forced_dispatch(struct cfq_data *cfqd)
  2055. {
  2056. struct cfq_queue *cfqq;
  2057. int dispatched = 0;
  2058. /* Expire the timeslice of the current active queue first */
  2059. cfq_slice_expired(cfqd, 0);
  2060. while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
  2061. __cfq_set_active_queue(cfqd, cfqq);
  2062. dispatched += __cfq_forced_dispatch_cfqq(cfqq);
  2063. }
  2064. BUG_ON(cfqd->busy_queues);
  2065. cfq_log(cfqd, "forced_dispatch=%d", dispatched);
  2066. return dispatched;
  2067. }
  2068. static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
  2069. struct cfq_queue *cfqq)
  2070. {
  2071. /* the queue hasn't finished any request, can't estimate */
  2072. if (cfq_cfqq_slice_new(cfqq))
  2073. return true;
  2074. if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
  2075. cfqq->slice_end))
  2076. return true;
  2077. return false;
  2078. }
  2079. static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2080. {
  2081. unsigned int max_dispatch;
  2082. /*
  2083. * Drain async requests before we start sync IO
  2084. */
  2085. if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
  2086. return false;
  2087. /*
  2088. * If this is an async queue and we have sync IO in flight, let it wait
  2089. */
  2090. if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
  2091. return false;
  2092. max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
  2093. if (cfq_class_idle(cfqq))
  2094. max_dispatch = 1;
  2095. /*
  2096. * Does this cfqq already have too much IO in flight?
  2097. */
  2098. if (cfqq->dispatched >= max_dispatch) {
  2099. bool promote_sync = false;
  2100. /*
  2101. * idle queue must always only have a single IO in flight
  2102. */
  2103. if (cfq_class_idle(cfqq))
  2104. return false;
  2105. /*
  2106. * If there is only one sync queue
  2107. * we can ignore async queue here and give the sync
  2108. * queue no dispatch limit. The reason is a sync queue can
  2109. * preempt async queue, limiting the sync queue doesn't make
  2110. * sense. This is useful for aiostress test.
  2111. */
  2112. if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
  2113. promote_sync = true;
  2114. /*
  2115. * We have other queues, don't allow more IO from this one
  2116. */
  2117. if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
  2118. !promote_sync)
  2119. return false;
  2120. /*
  2121. * Sole queue user, no limit
  2122. */
  2123. if (cfqd->busy_queues == 1 || promote_sync)
  2124. max_dispatch = -1;
  2125. else
  2126. /*
  2127. * Normally we start throttling cfqq when cfq_quantum/2
  2128. * requests have been dispatched. But we can drive
  2129. * deeper queue depths at the beginning of slice
  2130. * subjected to upper limit of cfq_quantum.
  2131. * */
  2132. max_dispatch = cfqd->cfq_quantum;
  2133. }
  2134. /*
  2135. * Async queues must wait a bit before being allowed dispatch.
  2136. * We also ramp up the dispatch depth gradually for async IO,
  2137. * based on the last sync IO we serviced
  2138. */
  2139. if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
  2140. unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
  2141. unsigned int depth;
  2142. depth = last_sync / cfqd->cfq_slice[1];
  2143. if (!depth && !cfqq->dispatched)
  2144. depth = 1;
  2145. if (depth < max_dispatch)
  2146. max_dispatch = depth;
  2147. }
  2148. /*
  2149. * If we're below the current max, allow a dispatch
  2150. */
  2151. return cfqq->dispatched < max_dispatch;
  2152. }
  2153. /*
  2154. * Dispatch a request from cfqq, moving them to the request queue
  2155. * dispatch list.
  2156. */
  2157. static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2158. {
  2159. struct request *rq;
  2160. BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  2161. if (!cfq_may_dispatch(cfqd, cfqq))
  2162. return false;
  2163. /*
  2164. * follow expired path, else get first next available
  2165. */
  2166. rq = cfq_check_fifo(cfqq);
  2167. if (!rq)
  2168. rq = cfqq->next_rq;
  2169. /*
  2170. * insert request into driver dispatch list
  2171. */
  2172. cfq_dispatch_insert(cfqd->queue, rq);
  2173. if (!cfqd->active_cic) {
  2174. struct cfq_io_cq *cic = RQ_CIC(rq);
  2175. atomic_long_inc(&cic->icq.ioc->refcount);
  2176. cfqd->active_cic = cic;
  2177. }
  2178. return true;
  2179. }
  2180. /*
  2181. * Find the cfqq that we need to service and move a request from that to the
  2182. * dispatch list
  2183. */
  2184. static int cfq_dispatch_requests(struct request_queue *q, int force)
  2185. {
  2186. struct cfq_data *cfqd = q->elevator->elevator_data;
  2187. struct cfq_queue *cfqq;
  2188. if (!cfqd->busy_queues)
  2189. return 0;
  2190. if (unlikely(force))
  2191. return cfq_forced_dispatch(cfqd);
  2192. cfqq = cfq_select_queue(cfqd);
  2193. if (!cfqq)
  2194. return 0;
  2195. /*
  2196. * Dispatch a request from this cfqq, if it is allowed
  2197. */
  2198. if (!cfq_dispatch_request(cfqd, cfqq))
  2199. return 0;
  2200. cfqq->slice_dispatch++;
  2201. cfq_clear_cfqq_must_dispatch(cfqq);
  2202. /*
  2203. * expire an async queue immediately if it has used up its slice. idle
  2204. * queue always expire after 1 dispatch round.
  2205. */
  2206. if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
  2207. cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  2208. cfq_class_idle(cfqq))) {
  2209. cfqq->slice_end = jiffies + 1;
  2210. cfq_slice_expired(cfqd, 0);
  2211. }
  2212. cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
  2213. return 1;
  2214. }
  2215. /*
  2216. * task holds one reference to the queue, dropped when task exits. each rq
  2217. * in-flight on this queue also holds a reference, dropped when rq is freed.
  2218. *
  2219. * Each cfq queue took a reference on the parent group. Drop it now.
  2220. * queue lock must be held here.
  2221. */
  2222. static void cfq_put_queue(struct cfq_queue *cfqq)
  2223. {
  2224. struct cfq_data *cfqd = cfqq->cfqd;
  2225. struct cfq_group *cfqg;
  2226. BUG_ON(cfqq->ref <= 0);
  2227. cfqq->ref--;
  2228. if (cfqq->ref)
  2229. return;
  2230. cfq_log_cfqq(cfqd, cfqq, "put_queue");
  2231. BUG_ON(rb_first(&cfqq->sort_list));
  2232. BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
  2233. cfqg = cfqq->cfqg;
  2234. if (unlikely(cfqd->active_queue == cfqq)) {
  2235. __cfq_slice_expired(cfqd, cfqq, 0);
  2236. cfq_schedule_dispatch(cfqd);
  2237. }
  2238. BUG_ON(cfq_cfqq_on_rr(cfqq));
  2239. kmem_cache_free(cfq_pool, cfqq);
  2240. cfq_put_cfqg(cfqg);
  2241. }
  2242. static void cfq_put_cooperator(struct cfq_queue *cfqq)
  2243. {
  2244. struct cfq_queue *__cfqq, *next;
  2245. /*
  2246. * If this queue was scheduled to merge with another queue, be
  2247. * sure to drop the reference taken on that queue (and others in
  2248. * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
  2249. */
  2250. __cfqq = cfqq->new_cfqq;
  2251. while (__cfqq) {
  2252. if (__cfqq == cfqq) {
  2253. WARN(1, "cfqq->new_cfqq loop detected\n");
  2254. break;
  2255. }
  2256. next = __cfqq->new_cfqq;
  2257. cfq_put_queue(__cfqq);
  2258. __cfqq = next;
  2259. }
  2260. }
  2261. static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2262. {
  2263. if (unlikely(cfqq == cfqd->active_queue)) {
  2264. __cfq_slice_expired(cfqd, cfqq, 0);
  2265. cfq_schedule_dispatch(cfqd);
  2266. }
  2267. cfq_put_cooperator(cfqq);
  2268. cfq_put_queue(cfqq);
  2269. }
  2270. static void cfq_init_icq(struct io_cq *icq)
  2271. {
  2272. struct cfq_io_cq *cic = icq_to_cic(icq);
  2273. cic->ttime.last_end_request = jiffies;
  2274. }
  2275. static void cfq_exit_icq(struct io_cq *icq)
  2276. {
  2277. struct cfq_io_cq *cic = icq_to_cic(icq);
  2278. struct cfq_data *cfqd = cic_to_cfqd(cic);
  2279. if (cic->cfqq[BLK_RW_ASYNC]) {
  2280. cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
  2281. cic->cfqq[BLK_RW_ASYNC] = NULL;
  2282. }
  2283. if (cic->cfqq[BLK_RW_SYNC]) {
  2284. cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
  2285. cic->cfqq[BLK_RW_SYNC] = NULL;
  2286. }
  2287. }
  2288. static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
  2289. {
  2290. struct task_struct *tsk = current;
  2291. int ioprio_class;
  2292. if (!cfq_cfqq_prio_changed(cfqq))
  2293. return;
  2294. ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
  2295. switch (ioprio_class) {
  2296. default:
  2297. printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
  2298. case IOPRIO_CLASS_NONE:
  2299. /*
  2300. * no prio set, inherit CPU scheduling settings
  2301. */
  2302. cfqq->ioprio = task_nice_ioprio(tsk);
  2303. cfqq->ioprio_class = task_nice_ioclass(tsk);
  2304. break;
  2305. case IOPRIO_CLASS_RT:
  2306. cfqq->ioprio = task_ioprio(ioc);
  2307. cfqq->ioprio_class = IOPRIO_CLASS_RT;
  2308. break;
  2309. case IOPRIO_CLASS_BE:
  2310. cfqq->ioprio = task_ioprio(ioc);
  2311. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  2312. break;
  2313. case IOPRIO_CLASS_IDLE:
  2314. cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  2315. cfqq->ioprio = 7;
  2316. cfq_clear_cfqq_idle_window(cfqq);
  2317. break;
  2318. }
  2319. /*
  2320. * keep track of original prio settings in case we have to temporarily
  2321. * elevate the priority of this queue
  2322. */
  2323. cfqq->org_ioprio = cfqq->ioprio;
  2324. cfq_clear_cfqq_prio_changed(cfqq);
  2325. }
  2326. static void changed_ioprio(struct cfq_io_cq *cic)
  2327. {
  2328. struct cfq_data *cfqd = cic_to_cfqd(cic);
  2329. struct cfq_queue *cfqq;
  2330. if (unlikely(!cfqd))
  2331. return;
  2332. cfqq = cic->cfqq[BLK_RW_ASYNC];
  2333. if (cfqq) {
  2334. struct cfq_queue *new_cfqq;
  2335. new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->icq.ioc,
  2336. GFP_ATOMIC);
  2337. if (new_cfqq) {
  2338. cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
  2339. cfq_put_queue(cfqq);
  2340. }
  2341. }
  2342. cfqq = cic->cfqq[BLK_RW_SYNC];
  2343. if (cfqq)
  2344. cfq_mark_cfqq_prio_changed(cfqq);
  2345. }
  2346. static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2347. pid_t pid, bool is_sync)
  2348. {
  2349. RB_CLEAR_NODE(&cfqq->rb_node);
  2350. RB_CLEAR_NODE(&cfqq->p_node);
  2351. INIT_LIST_HEAD(&cfqq->fifo);
  2352. cfqq->ref = 0;
  2353. cfqq->cfqd = cfqd;
  2354. cfq_mark_cfqq_prio_changed(cfqq);
  2355. if (is_sync) {
  2356. if (!cfq_class_idle(cfqq))
  2357. cfq_mark_cfqq_idle_window(cfqq);
  2358. cfq_mark_cfqq_sync(cfqq);
  2359. }
  2360. cfqq->pid = pid;
  2361. }
  2362. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2363. static void changed_cgroup(struct cfq_io_cq *cic)
  2364. {
  2365. struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
  2366. struct cfq_data *cfqd = cic_to_cfqd(cic);
  2367. struct request_queue *q;
  2368. if (unlikely(!cfqd))
  2369. return;
  2370. q = cfqd->queue;
  2371. if (sync_cfqq) {
  2372. /*
  2373. * Drop reference to sync queue. A new sync queue will be
  2374. * assigned in new group upon arrival of a fresh request.
  2375. */
  2376. cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
  2377. cic_set_cfqq(cic, NULL, 1);
  2378. cfq_put_queue(sync_cfqq);
  2379. }
  2380. }
  2381. #endif /* CONFIG_CFQ_GROUP_IOSCHED */
  2382. static struct cfq_queue *
  2383. cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
  2384. struct io_context *ioc, gfp_t gfp_mask)
  2385. {
  2386. struct blkio_cgroup *blkcg;
  2387. struct cfq_queue *cfqq, *new_cfqq = NULL;
  2388. struct cfq_io_cq *cic;
  2389. struct cfq_group *cfqg;
  2390. retry:
  2391. rcu_read_lock();
  2392. blkcg = task_blkio_cgroup(current);
  2393. cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
  2394. cic = cfq_cic_lookup(cfqd, ioc);
  2395. /* cic always exists here */
  2396. cfqq = cic_to_cfqq(cic, is_sync);
  2397. /*
  2398. * Always try a new alloc if we fell back to the OOM cfqq
  2399. * originally, since it should just be a temporary situation.
  2400. */
  2401. if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  2402. cfqq = NULL;
  2403. if (new_cfqq) {
  2404. cfqq = new_cfqq;
  2405. new_cfqq = NULL;
  2406. } else if (gfp_mask & __GFP_WAIT) {
  2407. rcu_read_unlock();
  2408. spin_unlock_irq(cfqd->queue->queue_lock);
  2409. new_cfqq = kmem_cache_alloc_node(cfq_pool,
  2410. gfp_mask | __GFP_ZERO,
  2411. cfqd->queue->node);
  2412. spin_lock_irq(cfqd->queue->queue_lock);
  2413. if (new_cfqq)
  2414. goto retry;
  2415. } else {
  2416. cfqq = kmem_cache_alloc_node(cfq_pool,
  2417. gfp_mask | __GFP_ZERO,
  2418. cfqd->queue->node);
  2419. }
  2420. if (cfqq) {
  2421. cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
  2422. cfq_init_prio_data(cfqq, ioc);
  2423. cfq_link_cfqq_cfqg(cfqq, cfqg);
  2424. cfq_log_cfqq(cfqd, cfqq, "alloced");
  2425. } else
  2426. cfqq = &cfqd->oom_cfqq;
  2427. }
  2428. if (new_cfqq)
  2429. kmem_cache_free(cfq_pool, new_cfqq);
  2430. rcu_read_unlock();
  2431. return cfqq;
  2432. }
  2433. static struct cfq_queue **
  2434. cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
  2435. {
  2436. switch (ioprio_class) {
  2437. case IOPRIO_CLASS_RT:
  2438. return &cfqd->async_cfqq[0][ioprio];
  2439. case IOPRIO_CLASS_BE:
  2440. return &cfqd->async_cfqq[1][ioprio];
  2441. case IOPRIO_CLASS_IDLE:
  2442. return &cfqd->async_idle_cfqq;
  2443. default:
  2444. BUG();
  2445. }
  2446. }
  2447. static struct cfq_queue *
  2448. cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
  2449. gfp_t gfp_mask)
  2450. {
  2451. const int ioprio = task_ioprio(ioc);
  2452. const int ioprio_class = task_ioprio_class(ioc);
  2453. struct cfq_queue **async_cfqq = NULL;
  2454. struct cfq_queue *cfqq = NULL;
  2455. if (!is_sync) {
  2456. async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
  2457. cfqq = *async_cfqq;
  2458. }
  2459. if (!cfqq)
  2460. cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
  2461. /*
  2462. * pin the queue now that it's allocated, scheduler exit will prune it
  2463. */
  2464. if (!is_sync && !(*async_cfqq)) {
  2465. cfqq->ref++;
  2466. *async_cfqq = cfqq;
  2467. }
  2468. cfqq->ref++;
  2469. return cfqq;
  2470. }
  2471. static void
  2472. __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
  2473. {
  2474. unsigned long elapsed = jiffies - ttime->last_end_request;
  2475. elapsed = min(elapsed, 2UL * slice_idle);
  2476. ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
  2477. ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
  2478. ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
  2479. }
  2480. static void
  2481. cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2482. struct cfq_io_cq *cic)
  2483. {
  2484. if (cfq_cfqq_sync(cfqq)) {
  2485. __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
  2486. __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
  2487. cfqd->cfq_slice_idle);
  2488. }
  2489. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2490. __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
  2491. #endif
  2492. }
  2493. static void
  2494. cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2495. struct request *rq)
  2496. {
  2497. sector_t sdist = 0;
  2498. sector_t n_sec = blk_rq_sectors(rq);
  2499. if (cfqq->last_request_pos) {
  2500. if (cfqq->last_request_pos < blk_rq_pos(rq))
  2501. sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
  2502. else
  2503. sdist = cfqq->last_request_pos - blk_rq_pos(rq);
  2504. }
  2505. cfqq->seek_history <<= 1;
  2506. if (blk_queue_nonrot(cfqd->queue))
  2507. cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
  2508. else
  2509. cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
  2510. }
  2511. /*
  2512. * Disable idle window if the process thinks too long or seeks so much that
  2513. * it doesn't matter
  2514. */
  2515. static void
  2516. cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2517. struct cfq_io_cq *cic)
  2518. {
  2519. int old_idle, enable_idle;
  2520. /*
  2521. * Don't idle for async or idle io prio class
  2522. */
  2523. if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
  2524. return;
  2525. enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
  2526. if (cfqq->queued[0] + cfqq->queued[1] >= 4)
  2527. cfq_mark_cfqq_deep(cfqq);
  2528. if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
  2529. enable_idle = 0;
  2530. else if (!atomic_read(&cic->icq.ioc->nr_tasks) ||
  2531. !cfqd->cfq_slice_idle ||
  2532. (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
  2533. enable_idle = 0;
  2534. else if (sample_valid(cic->ttime.ttime_samples)) {
  2535. if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
  2536. enable_idle = 0;
  2537. else
  2538. enable_idle = 1;
  2539. }
  2540. if (old_idle != enable_idle) {
  2541. cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
  2542. if (enable_idle)
  2543. cfq_mark_cfqq_idle_window(cfqq);
  2544. else
  2545. cfq_clear_cfqq_idle_window(cfqq);
  2546. }
  2547. }
  2548. /*
  2549. * Check if new_cfqq should preempt the currently active queue. Return 0 for
  2550. * no or if we aren't sure, a 1 will cause a preempt.
  2551. */
  2552. static bool
  2553. cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  2554. struct request *rq)
  2555. {
  2556. struct cfq_queue *cfqq;
  2557. cfqq = cfqd->active_queue;
  2558. if (!cfqq)
  2559. return false;
  2560. if (cfq_class_idle(new_cfqq))
  2561. return false;
  2562. if (cfq_class_idle(cfqq))
  2563. return true;
  2564. /*
  2565. * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
  2566. */
  2567. if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
  2568. return false;
  2569. /*
  2570. * if the new request is sync, but the currently running queue is
  2571. * not, let the sync request have priority.
  2572. */
  2573. if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
  2574. return true;
  2575. if (new_cfqq->cfqg != cfqq->cfqg)
  2576. return false;
  2577. if (cfq_slice_used(cfqq))
  2578. return true;
  2579. /* Allow preemption only if we are idling on sync-noidle tree */
  2580. if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
  2581. cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
  2582. new_cfqq->service_tree->count == 2 &&
  2583. RB_EMPTY_ROOT(&cfqq->sort_list))
  2584. return true;
  2585. /*
  2586. * So both queues are sync. Let the new request get disk time if
  2587. * it's a metadata request and the current queue is doing regular IO.
  2588. */
  2589. if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
  2590. return true;
  2591. /*
  2592. * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
  2593. */
  2594. if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
  2595. return true;
  2596. /* An idle queue should not be idle now for some reason */
  2597. if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
  2598. return true;
  2599. if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
  2600. return false;
  2601. /*
  2602. * if this request is as-good as one we would expect from the
  2603. * current cfqq, let it preempt
  2604. */
  2605. if (cfq_rq_close(cfqd, cfqq, rq))
  2606. return true;
  2607. return false;
  2608. }
  2609. /*
  2610. * cfqq preempts the active queue. if we allowed preempt with no slice left,
  2611. * let it have half of its nominal slice.
  2612. */
  2613. static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2614. {
  2615. enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
  2616. cfq_log_cfqq(cfqd, cfqq, "preempt");
  2617. cfq_slice_expired(cfqd, 1);
  2618. /*
  2619. * workload type is changed, don't save slice, otherwise preempt
  2620. * doesn't happen
  2621. */
  2622. if (old_type != cfqq_type(cfqq))
  2623. cfqq->cfqg->saved_workload_slice = 0;
  2624. /*
  2625. * Put the new queue at the front of the of the current list,
  2626. * so we know that it will be selected next.
  2627. */
  2628. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  2629. cfq_service_tree_add(cfqd, cfqq, 1);
  2630. cfqq->slice_end = 0;
  2631. cfq_mark_cfqq_slice_new(cfqq);
  2632. }
  2633. /*
  2634. * Called when a new fs request (rq) is added (to cfqq). Check if there's
  2635. * something we should do about it
  2636. */
  2637. static void
  2638. cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2639. struct request *rq)
  2640. {
  2641. struct cfq_io_cq *cic = RQ_CIC(rq);
  2642. cfqd->rq_queued++;
  2643. if (rq->cmd_flags & REQ_PRIO)
  2644. cfqq->prio_pending++;
  2645. cfq_update_io_thinktime(cfqd, cfqq, cic);
  2646. cfq_update_io_seektime(cfqd, cfqq, rq);
  2647. cfq_update_idle_window(cfqd, cfqq, cic);
  2648. cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
  2649. if (cfqq == cfqd->active_queue) {
  2650. /*
  2651. * Remember that we saw a request from this process, but
  2652. * don't start queuing just yet. Otherwise we risk seeing lots
  2653. * of tiny requests, because we disrupt the normal plugging
  2654. * and merging. If the request is already larger than a single
  2655. * page, let it rip immediately. For that case we assume that
  2656. * merging is already done. Ditto for a busy system that
  2657. * has other work pending, don't risk delaying until the
  2658. * idle timer unplug to continue working.
  2659. */
  2660. if (cfq_cfqq_wait_request(cfqq)) {
  2661. if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
  2662. cfqd->busy_queues > 1) {
  2663. cfq_del_timer(cfqd, cfqq);
  2664. cfq_clear_cfqq_wait_request(cfqq);
  2665. __blk_run_queue(cfqd->queue);
  2666. } else {
  2667. cfq_blkiocg_update_idle_time_stats(
  2668. &cfqq->cfqg->blkg);
  2669. cfq_mark_cfqq_must_dispatch(cfqq);
  2670. }
  2671. }
  2672. } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
  2673. /*
  2674. * not the active queue - expire current slice if it is
  2675. * idle and has expired it's mean thinktime or this new queue
  2676. * has some old slice time left and is of higher priority or
  2677. * this new queue is RT and the current one is BE
  2678. */
  2679. cfq_preempt_queue(cfqd, cfqq);
  2680. __blk_run_queue(cfqd->queue);
  2681. }
  2682. }
  2683. static void cfq_insert_request(struct request_queue *q, struct request *rq)
  2684. {
  2685. struct cfq_data *cfqd = q->elevator->elevator_data;
  2686. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2687. cfq_log_cfqq(cfqd, cfqq, "insert_request");
  2688. cfq_init_prio_data(cfqq, RQ_CIC(rq)->icq.ioc);
  2689. rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
  2690. list_add_tail(&rq->queuelist, &cfqq->fifo);
  2691. cfq_add_rq_rb(rq);
  2692. cfq_blkiocg_update_io_add_stats(&(RQ_CFQG(rq))->blkg,
  2693. &cfqd->serving_group->blkg, rq_data_dir(rq),
  2694. rq_is_sync(rq));
  2695. cfq_rq_enqueued(cfqd, cfqq, rq);
  2696. }
  2697. /*
  2698. * Update hw_tag based on peak queue depth over 50 samples under
  2699. * sufficient load.
  2700. */
  2701. static void cfq_update_hw_tag(struct cfq_data *cfqd)
  2702. {
  2703. struct cfq_queue *cfqq = cfqd->active_queue;
  2704. if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
  2705. cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
  2706. if (cfqd->hw_tag == 1)
  2707. return;
  2708. if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
  2709. cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
  2710. return;
  2711. /*
  2712. * If active queue hasn't enough requests and can idle, cfq might not
  2713. * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  2714. * case
  2715. */
  2716. if (cfqq && cfq_cfqq_idle_window(cfqq) &&
  2717. cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
  2718. CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
  2719. return;
  2720. if (cfqd->hw_tag_samples++ < 50)
  2721. return;
  2722. if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
  2723. cfqd->hw_tag = 1;
  2724. else
  2725. cfqd->hw_tag = 0;
  2726. }
  2727. static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2728. {
  2729. struct cfq_io_cq *cic = cfqd->active_cic;
  2730. /* If the queue already has requests, don't wait */
  2731. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  2732. return false;
  2733. /* If there are other queues in the group, don't wait */
  2734. if (cfqq->cfqg->nr_cfqq > 1)
  2735. return false;
  2736. /* the only queue in the group, but think time is big */
  2737. if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
  2738. return false;
  2739. if (cfq_slice_used(cfqq))
  2740. return true;
  2741. /* if slice left is less than think time, wait busy */
  2742. if (cic && sample_valid(cic->ttime.ttime_samples)
  2743. && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
  2744. return true;
  2745. /*
  2746. * If think times is less than a jiffy than ttime_mean=0 and above
  2747. * will not be true. It might happen that slice has not expired yet
  2748. * but will expire soon (4-5 ns) during select_queue(). To cover the
  2749. * case where think time is less than a jiffy, mark the queue wait
  2750. * busy if only 1 jiffy is left in the slice.
  2751. */
  2752. if (cfqq->slice_end - jiffies == 1)
  2753. return true;
  2754. return false;
  2755. }
  2756. static void cfq_completed_request(struct request_queue *q, struct request *rq)
  2757. {
  2758. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2759. struct cfq_data *cfqd = cfqq->cfqd;
  2760. const int sync = rq_is_sync(rq);
  2761. unsigned long now;
  2762. now = jiffies;
  2763. cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
  2764. !!(rq->cmd_flags & REQ_NOIDLE));
  2765. cfq_update_hw_tag(cfqd);
  2766. WARN_ON(!cfqd->rq_in_driver);
  2767. WARN_ON(!cfqq->dispatched);
  2768. cfqd->rq_in_driver--;
  2769. cfqq->dispatched--;
  2770. (RQ_CFQG(rq))->dispatched--;
  2771. cfq_blkiocg_update_completion_stats(&cfqq->cfqg->blkg,
  2772. rq_start_time_ns(rq), rq_io_start_time_ns(rq),
  2773. rq_data_dir(rq), rq_is_sync(rq));
  2774. cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
  2775. if (sync) {
  2776. struct cfq_rb_root *service_tree;
  2777. RQ_CIC(rq)->ttime.last_end_request = now;
  2778. if (cfq_cfqq_on_rr(cfqq))
  2779. service_tree = cfqq->service_tree;
  2780. else
  2781. service_tree = service_tree_for(cfqq->cfqg,
  2782. cfqq_prio(cfqq), cfqq_type(cfqq));
  2783. service_tree->ttime.last_end_request = now;
  2784. if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
  2785. cfqd->last_delayed_sync = now;
  2786. }
  2787. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2788. cfqq->cfqg->ttime.last_end_request = now;
  2789. #endif
  2790. /*
  2791. * If this is the active queue, check if it needs to be expired,
  2792. * or if we want to idle in case it has no pending requests.
  2793. */
  2794. if (cfqd->active_queue == cfqq) {
  2795. const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
  2796. if (cfq_cfqq_slice_new(cfqq)) {
  2797. cfq_set_prio_slice(cfqd, cfqq);
  2798. cfq_clear_cfqq_slice_new(cfqq);
  2799. }
  2800. /*
  2801. * Should we wait for next request to come in before we expire
  2802. * the queue.
  2803. */
  2804. if (cfq_should_wait_busy(cfqd, cfqq)) {
  2805. unsigned long extend_sl = cfqd->cfq_slice_idle;
  2806. if (!cfqd->cfq_slice_idle)
  2807. extend_sl = cfqd->cfq_group_idle;
  2808. cfqq->slice_end = jiffies + extend_sl;
  2809. cfq_mark_cfqq_wait_busy(cfqq);
  2810. cfq_log_cfqq(cfqd, cfqq, "will busy wait");
  2811. }
  2812. /*
  2813. * Idling is not enabled on:
  2814. * - expired queues
  2815. * - idle-priority queues
  2816. * - async queues
  2817. * - queues with still some requests queued
  2818. * - when there is a close cooperator
  2819. */
  2820. if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
  2821. cfq_slice_expired(cfqd, 1);
  2822. else if (sync && cfqq_empty &&
  2823. !cfq_close_cooperator(cfqd, cfqq)) {
  2824. cfq_arm_slice_timer(cfqd);
  2825. }
  2826. }
  2827. if (!cfqd->rq_in_driver)
  2828. cfq_schedule_dispatch(cfqd);
  2829. }
  2830. static inline int __cfq_may_queue(struct cfq_queue *cfqq)
  2831. {
  2832. if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
  2833. cfq_mark_cfqq_must_alloc_slice(cfqq);
  2834. return ELV_MQUEUE_MUST;
  2835. }
  2836. return ELV_MQUEUE_MAY;
  2837. }
  2838. static int cfq_may_queue(struct request_queue *q, int rw)
  2839. {
  2840. struct cfq_data *cfqd = q->elevator->elevator_data;
  2841. struct task_struct *tsk = current;
  2842. struct cfq_io_cq *cic;
  2843. struct cfq_queue *cfqq;
  2844. /*
  2845. * don't force setup of a queue from here, as a call to may_queue
  2846. * does not necessarily imply that a request actually will be queued.
  2847. * so just lookup a possibly existing queue, or return 'may queue'
  2848. * if that fails
  2849. */
  2850. cic = cfq_cic_lookup(cfqd, tsk->io_context);
  2851. if (!cic)
  2852. return ELV_MQUEUE_MAY;
  2853. cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
  2854. if (cfqq) {
  2855. cfq_init_prio_data(cfqq, cic->icq.ioc);
  2856. return __cfq_may_queue(cfqq);
  2857. }
  2858. return ELV_MQUEUE_MAY;
  2859. }
  2860. /*
  2861. * queue lock held here
  2862. */
  2863. static void cfq_put_request(struct request *rq)
  2864. {
  2865. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2866. if (cfqq) {
  2867. const int rw = rq_data_dir(rq);
  2868. BUG_ON(!cfqq->allocated[rw]);
  2869. cfqq->allocated[rw]--;
  2870. /* Put down rq reference on cfqg */
  2871. cfq_put_cfqg(RQ_CFQG(rq));
  2872. rq->elv.priv[0] = NULL;
  2873. rq->elv.priv[1] = NULL;
  2874. cfq_put_queue(cfqq);
  2875. }
  2876. }
  2877. static struct cfq_queue *
  2878. cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
  2879. struct cfq_queue *cfqq)
  2880. {
  2881. cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
  2882. cic_set_cfqq(cic, cfqq->new_cfqq, 1);
  2883. cfq_mark_cfqq_coop(cfqq->new_cfqq);
  2884. cfq_put_queue(cfqq);
  2885. return cic_to_cfqq(cic, 1);
  2886. }
  2887. /*
  2888. * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
  2889. * was the last process referring to said cfqq.
  2890. */
  2891. static struct cfq_queue *
  2892. split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
  2893. {
  2894. if (cfqq_process_refs(cfqq) == 1) {
  2895. cfqq->pid = current->pid;
  2896. cfq_clear_cfqq_coop(cfqq);
  2897. cfq_clear_cfqq_split_coop(cfqq);
  2898. return cfqq;
  2899. }
  2900. cic_set_cfqq(cic, NULL, 1);
  2901. cfq_put_cooperator(cfqq);
  2902. cfq_put_queue(cfqq);
  2903. return NULL;
  2904. }
  2905. /*
  2906. * Allocate cfq data structures associated with this request.
  2907. */
  2908. static int
  2909. cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
  2910. {
  2911. struct cfq_data *cfqd = q->elevator->elevator_data;
  2912. struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
  2913. const int rw = rq_data_dir(rq);
  2914. const bool is_sync = rq_is_sync(rq);
  2915. struct cfq_queue *cfqq;
  2916. unsigned int changed;
  2917. might_sleep_if(gfp_mask & __GFP_WAIT);
  2918. spin_lock_irq(q->queue_lock);
  2919. /* handle changed notifications */
  2920. changed = icq_get_changed(&cic->icq);
  2921. if (unlikely(changed & ICQ_IOPRIO_CHANGED))
  2922. changed_ioprio(cic);
  2923. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2924. if (unlikely(changed & ICQ_CGROUP_CHANGED))
  2925. changed_cgroup(cic);
  2926. #endif
  2927. new_queue:
  2928. cfqq = cic_to_cfqq(cic, is_sync);
  2929. if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  2930. cfqq = cfq_get_queue(cfqd, is_sync, cic->icq.ioc, gfp_mask);
  2931. cic_set_cfqq(cic, cfqq, is_sync);
  2932. } else {
  2933. /*
  2934. * If the queue was seeky for too long, break it apart.
  2935. */
  2936. if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
  2937. cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
  2938. cfqq = split_cfqq(cic, cfqq);
  2939. if (!cfqq)
  2940. goto new_queue;
  2941. }
  2942. /*
  2943. * Check to see if this queue is scheduled to merge with
  2944. * another, closely cooperating queue. The merging of
  2945. * queues happens here as it must be done in process context.
  2946. * The reference on new_cfqq was taken in merge_cfqqs.
  2947. */
  2948. if (cfqq->new_cfqq)
  2949. cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
  2950. }
  2951. cfqq->allocated[rw]++;
  2952. cfqq->ref++;
  2953. rq->elv.priv[0] = cfqq;
  2954. rq->elv.priv[1] = cfq_ref_get_cfqg(cfqq->cfqg);
  2955. spin_unlock_irq(q->queue_lock);
  2956. return 0;
  2957. }
  2958. static void cfq_kick_queue(struct work_struct *work)
  2959. {
  2960. struct cfq_data *cfqd =
  2961. container_of(work, struct cfq_data, unplug_work);
  2962. struct request_queue *q = cfqd->queue;
  2963. spin_lock_irq(q->queue_lock);
  2964. __blk_run_queue(cfqd->queue);
  2965. spin_unlock_irq(q->queue_lock);
  2966. }
  2967. /*
  2968. * Timer running if the active_queue is currently idling inside its time slice
  2969. */
  2970. static void cfq_idle_slice_timer(unsigned long data)
  2971. {
  2972. struct cfq_data *cfqd = (struct cfq_data *) data;
  2973. struct cfq_queue *cfqq;
  2974. unsigned long flags;
  2975. int timed_out = 1;
  2976. cfq_log(cfqd, "idle timer fired");
  2977. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  2978. cfqq = cfqd->active_queue;
  2979. if (cfqq) {
  2980. timed_out = 0;
  2981. /*
  2982. * We saw a request before the queue expired, let it through
  2983. */
  2984. if (cfq_cfqq_must_dispatch(cfqq))
  2985. goto out_kick;
  2986. /*
  2987. * expired
  2988. */
  2989. if (cfq_slice_used(cfqq))
  2990. goto expire;
  2991. /*
  2992. * only expire and reinvoke request handler, if there are
  2993. * other queues with pending requests
  2994. */
  2995. if (!cfqd->busy_queues)
  2996. goto out_cont;
  2997. /*
  2998. * not expired and it has a request pending, let it dispatch
  2999. */
  3000. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  3001. goto out_kick;
  3002. /*
  3003. * Queue depth flag is reset only when the idle didn't succeed
  3004. */
  3005. cfq_clear_cfqq_deep(cfqq);
  3006. }
  3007. expire:
  3008. cfq_slice_expired(cfqd, timed_out);
  3009. out_kick:
  3010. cfq_schedule_dispatch(cfqd);
  3011. out_cont:
  3012. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  3013. }
  3014. static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  3015. {
  3016. del_timer_sync(&cfqd->idle_slice_timer);
  3017. cancel_work_sync(&cfqd->unplug_work);
  3018. }
  3019. static void cfq_put_async_queues(struct cfq_data *cfqd)
  3020. {
  3021. int i;
  3022. for (i = 0; i < IOPRIO_BE_NR; i++) {
  3023. if (cfqd->async_cfqq[0][i])
  3024. cfq_put_queue(cfqd->async_cfqq[0][i]);
  3025. if (cfqd->async_cfqq[1][i])
  3026. cfq_put_queue(cfqd->async_cfqq[1][i]);
  3027. }
  3028. if (cfqd->async_idle_cfqq)
  3029. cfq_put_queue(cfqd->async_idle_cfqq);
  3030. }
  3031. static void cfq_exit_queue(struct elevator_queue *e)
  3032. {
  3033. struct cfq_data *cfqd = e->elevator_data;
  3034. struct request_queue *q = cfqd->queue;
  3035. bool wait = false;
  3036. cfq_shutdown_timer_wq(cfqd);
  3037. spin_lock_irq(q->queue_lock);
  3038. if (cfqd->active_queue)
  3039. __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
  3040. cfq_put_async_queues(cfqd);
  3041. cfq_release_cfq_groups(cfqd);
  3042. /*
  3043. * If there are groups which we could not unlink from blkcg list,
  3044. * wait for a rcu period for them to be freed.
  3045. */
  3046. if (cfqd->nr_blkcg_linked_grps)
  3047. wait = true;
  3048. spin_unlock_irq(q->queue_lock);
  3049. cfq_shutdown_timer_wq(cfqd);
  3050. /*
  3051. * Wait for cfqg->blkg->key accessors to exit their grace periods.
  3052. * Do this wait only if there are other unlinked groups out
  3053. * there. This can happen if cgroup deletion path claimed the
  3054. * responsibility of cleaning up a group before queue cleanup code
  3055. * get to the group.
  3056. *
  3057. * Do not call synchronize_rcu() unconditionally as there are drivers
  3058. * which create/delete request queue hundreds of times during scan/boot
  3059. * and synchronize_rcu() can take significant time and slow down boot.
  3060. */
  3061. if (wait)
  3062. synchronize_rcu();
  3063. #ifndef CONFIG_CFQ_GROUP_IOSCHED
  3064. kfree(cfqd->root_group);
  3065. #endif
  3066. kfree(cfqd);
  3067. }
  3068. static int cfq_init_queue(struct request_queue *q)
  3069. {
  3070. struct cfq_data *cfqd;
  3071. struct blkio_group *blkg __maybe_unused;
  3072. int i;
  3073. cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
  3074. if (!cfqd)
  3075. return -ENOMEM;
  3076. cfqd->queue = q;
  3077. q->elevator->elevator_data = cfqd;
  3078. /* Init root service tree */
  3079. cfqd->grp_service_tree = CFQ_RB_ROOT;
  3080. /* Init root group and prefer root group over other groups by default */
  3081. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3082. rcu_read_lock();
  3083. spin_lock_irq(q->queue_lock);
  3084. blkg = blkg_lookup_create(&blkio_root_cgroup, q, BLKIO_POLICY_PROP,
  3085. true);
  3086. if (!IS_ERR(blkg))
  3087. cfqd->root_group = cfqg_of_blkg(blkg);
  3088. spin_unlock_irq(q->queue_lock);
  3089. rcu_read_unlock();
  3090. #else
  3091. cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
  3092. GFP_KERNEL, cfqd->queue->node);
  3093. if (cfqd->root_group)
  3094. cfq_init_cfqg_base(cfqd->root_group);
  3095. #endif
  3096. if (!cfqd->root_group) {
  3097. kfree(cfqd);
  3098. return -ENOMEM;
  3099. }
  3100. cfqd->root_group->weight = 2*BLKIO_WEIGHT_DEFAULT;
  3101. /*
  3102. * Not strictly needed (since RB_ROOT just clears the node and we
  3103. * zeroed cfqd on alloc), but better be safe in case someone decides
  3104. * to add magic to the rb code
  3105. */
  3106. for (i = 0; i < CFQ_PRIO_LISTS; i++)
  3107. cfqd->prio_trees[i] = RB_ROOT;
  3108. /*
  3109. * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
  3110. * Grab a permanent reference to it, so that the normal code flow
  3111. * will not attempt to free it. oom_cfqq is linked to root_group
  3112. * but shouldn't hold a reference as it'll never be unlinked. Lose
  3113. * the reference from linking right away.
  3114. */
  3115. cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
  3116. cfqd->oom_cfqq.ref++;
  3117. cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
  3118. cfq_put_cfqg(cfqd->root_group);
  3119. init_timer(&cfqd->idle_slice_timer);
  3120. cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  3121. cfqd->idle_slice_timer.data = (unsigned long) cfqd;
  3122. INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
  3123. cfqd->cfq_quantum = cfq_quantum;
  3124. cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  3125. cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
  3126. cfqd->cfq_back_max = cfq_back_max;
  3127. cfqd->cfq_back_penalty = cfq_back_penalty;
  3128. cfqd->cfq_slice[0] = cfq_slice_async;
  3129. cfqd->cfq_slice[1] = cfq_slice_sync;
  3130. cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  3131. cfqd->cfq_slice_idle = cfq_slice_idle;
  3132. cfqd->cfq_group_idle = cfq_group_idle;
  3133. cfqd->cfq_latency = 1;
  3134. cfqd->hw_tag = -1;
  3135. /*
  3136. * we optimistically start assuming sync ops weren't delayed in last
  3137. * second, in order to have larger depth for async operations.
  3138. */
  3139. cfqd->last_delayed_sync = jiffies - HZ;
  3140. return 0;
  3141. }
  3142. /*
  3143. * sysfs parts below -->
  3144. */
  3145. static ssize_t
  3146. cfq_var_show(unsigned int var, char *page)
  3147. {
  3148. return sprintf(page, "%d\n", var);
  3149. }
  3150. static ssize_t
  3151. cfq_var_store(unsigned int *var, const char *page, size_t count)
  3152. {
  3153. char *p = (char *) page;
  3154. *var = simple_strtoul(p, &p, 10);
  3155. return count;
  3156. }
  3157. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  3158. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  3159. { \
  3160. struct cfq_data *cfqd = e->elevator_data; \
  3161. unsigned int __data = __VAR; \
  3162. if (__CONV) \
  3163. __data = jiffies_to_msecs(__data); \
  3164. return cfq_var_show(__data, (page)); \
  3165. }
  3166. SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
  3167. SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  3168. SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
  3169. SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  3170. SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
  3171. SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
  3172. SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
  3173. SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  3174. SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  3175. SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
  3176. SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
  3177. #undef SHOW_FUNCTION
  3178. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  3179. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  3180. { \
  3181. struct cfq_data *cfqd = e->elevator_data; \
  3182. unsigned int __data; \
  3183. int ret = cfq_var_store(&__data, (page), count); \
  3184. if (__data < (MIN)) \
  3185. __data = (MIN); \
  3186. else if (__data > (MAX)) \
  3187. __data = (MAX); \
  3188. if (__CONV) \
  3189. *(__PTR) = msecs_to_jiffies(__data); \
  3190. else \
  3191. *(__PTR) = __data; \
  3192. return ret; \
  3193. }
  3194. STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
  3195. STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
  3196. UINT_MAX, 1);
  3197. STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
  3198. UINT_MAX, 1);
  3199. STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
  3200. STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
  3201. UINT_MAX, 0);
  3202. STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
  3203. STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
  3204. STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  3205. STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
  3206. STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
  3207. UINT_MAX, 0);
  3208. STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
  3209. #undef STORE_FUNCTION
  3210. #define CFQ_ATTR(name) \
  3211. __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  3212. static struct elv_fs_entry cfq_attrs[] = {
  3213. CFQ_ATTR(quantum),
  3214. CFQ_ATTR(fifo_expire_sync),
  3215. CFQ_ATTR(fifo_expire_async),
  3216. CFQ_ATTR(back_seek_max),
  3217. CFQ_ATTR(back_seek_penalty),
  3218. CFQ_ATTR(slice_sync),
  3219. CFQ_ATTR(slice_async),
  3220. CFQ_ATTR(slice_async_rq),
  3221. CFQ_ATTR(slice_idle),
  3222. CFQ_ATTR(group_idle),
  3223. CFQ_ATTR(low_latency),
  3224. __ATTR_NULL
  3225. };
  3226. static struct elevator_type iosched_cfq = {
  3227. .ops = {
  3228. .elevator_merge_fn = cfq_merge,
  3229. .elevator_merged_fn = cfq_merged_request,
  3230. .elevator_merge_req_fn = cfq_merged_requests,
  3231. .elevator_allow_merge_fn = cfq_allow_merge,
  3232. .elevator_bio_merged_fn = cfq_bio_merged,
  3233. .elevator_dispatch_fn = cfq_dispatch_requests,
  3234. .elevator_add_req_fn = cfq_insert_request,
  3235. .elevator_activate_req_fn = cfq_activate_request,
  3236. .elevator_deactivate_req_fn = cfq_deactivate_request,
  3237. .elevator_completed_req_fn = cfq_completed_request,
  3238. .elevator_former_req_fn = elv_rb_former_request,
  3239. .elevator_latter_req_fn = elv_rb_latter_request,
  3240. .elevator_init_icq_fn = cfq_init_icq,
  3241. .elevator_exit_icq_fn = cfq_exit_icq,
  3242. .elevator_set_req_fn = cfq_set_request,
  3243. .elevator_put_req_fn = cfq_put_request,
  3244. .elevator_may_queue_fn = cfq_may_queue,
  3245. .elevator_init_fn = cfq_init_queue,
  3246. .elevator_exit_fn = cfq_exit_queue,
  3247. },
  3248. .icq_size = sizeof(struct cfq_io_cq),
  3249. .icq_align = __alignof__(struct cfq_io_cq),
  3250. .elevator_attrs = cfq_attrs,
  3251. .elevator_name = "cfq",
  3252. .elevator_owner = THIS_MODULE,
  3253. };
  3254. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3255. static struct blkio_policy_type blkio_policy_cfq = {
  3256. .ops = {
  3257. .blkio_alloc_group_fn = cfq_alloc_blkio_group,
  3258. .blkio_link_group_fn = cfq_link_blkio_group,
  3259. .blkio_unlink_group_fn = cfq_unlink_blkio_group,
  3260. .blkio_clear_queue_fn = cfq_clear_queue,
  3261. .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
  3262. },
  3263. .plid = BLKIO_POLICY_PROP,
  3264. };
  3265. #endif
  3266. static int __init cfq_init(void)
  3267. {
  3268. int ret;
  3269. /*
  3270. * could be 0 on HZ < 1000 setups
  3271. */
  3272. if (!cfq_slice_async)
  3273. cfq_slice_async = 1;
  3274. if (!cfq_slice_idle)
  3275. cfq_slice_idle = 1;
  3276. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3277. if (!cfq_group_idle)
  3278. cfq_group_idle = 1;
  3279. #else
  3280. cfq_group_idle = 0;
  3281. #endif
  3282. cfq_pool = KMEM_CACHE(cfq_queue, 0);
  3283. if (!cfq_pool)
  3284. return -ENOMEM;
  3285. ret = elv_register(&iosched_cfq);
  3286. if (ret) {
  3287. kmem_cache_destroy(cfq_pool);
  3288. return ret;
  3289. }
  3290. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3291. blkio_policy_register(&blkio_policy_cfq);
  3292. #endif
  3293. return 0;
  3294. }
  3295. static void __exit cfq_exit(void)
  3296. {
  3297. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3298. blkio_policy_unregister(&blkio_policy_cfq);
  3299. #endif
  3300. elv_unregister(&iosched_cfq);
  3301. kmem_cache_destroy(cfq_pool);
  3302. }
  3303. module_init(cfq_init);
  3304. module_exit(cfq_exit);
  3305. MODULE_AUTHOR("Jens Axboe");
  3306. MODULE_LICENSE("GPL");
  3307. MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");