cfq-iosched.c 108 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239
  1. /*
  2. * CFQ, or complete fairness queueing, disk scheduler.
  3. *
  4. * Based on ideas from a previously unfinished io
  5. * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  6. *
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/blkdev.h>
  12. #include <linux/elevator.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/ioprio.h>
  16. #include <linux/blktrace_api.h>
  17. #include "blk.h"
  18. #include "blk-cgroup.h"
  19. /*
  20. * tunables
  21. */
  22. /* max queue in one round of service */
  23. static const int cfq_quantum = 8;
  24. static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  25. /* maximum backwards seek, in KiB */
  26. static const int cfq_back_max = 16 * 1024;
  27. /* penalty of a backwards seek */
  28. static const int cfq_back_penalty = 2;
  29. static const int cfq_slice_sync = HZ / 10;
  30. static int cfq_slice_async = HZ / 25;
  31. static const int cfq_slice_async_rq = 2;
  32. static int cfq_slice_idle = HZ / 125;
  33. static int cfq_group_idle = HZ / 125;
  34. static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  35. static const int cfq_hist_divisor = 4;
  36. /*
  37. * offset from end of service tree
  38. */
  39. #define CFQ_IDLE_DELAY (HZ / 5)
  40. /*
  41. * below this threshold, we consider thinktime immediate
  42. */
  43. #define CFQ_MIN_TT (2)
  44. #define CFQ_SLICE_SCALE (5)
  45. #define CFQ_HW_QUEUE_MIN (5)
  46. #define CFQ_SERVICE_SHIFT 12
  47. #define CFQQ_SEEK_THR (sector_t)(8 * 100)
  48. #define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
  49. #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
  50. #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
  51. #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
  52. #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
  53. #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
  54. static struct kmem_cache *cfq_pool;
  55. #define CFQ_PRIO_LISTS IOPRIO_BE_NR
  56. #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  57. #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  58. #define sample_valid(samples) ((samples) > 80)
  59. #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
  60. struct cfq_ttime {
  61. unsigned long last_end_request;
  62. unsigned long ttime_total;
  63. unsigned long ttime_samples;
  64. unsigned long ttime_mean;
  65. };
  66. /*
  67. * Most of our rbtree usage is for sorting with min extraction, so
  68. * if we cache the leftmost node we don't have to walk down the tree
  69. * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  70. * move this into the elevator for the rq sorting as well.
  71. */
  72. struct cfq_rb_root {
  73. struct rb_root rb;
  74. struct rb_node *left;
  75. unsigned count;
  76. unsigned total_weight;
  77. u64 min_vdisktime;
  78. struct cfq_ttime ttime;
  79. };
  80. #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
  81. .ttime = {.last_end_request = jiffies,},}
  82. /*
  83. * Per process-grouping structure
  84. */
  85. struct cfq_queue {
  86. /* reference count */
  87. int ref;
  88. /* various state flags, see below */
  89. unsigned int flags;
  90. /* parent cfq_data */
  91. struct cfq_data *cfqd;
  92. /* service_tree member */
  93. struct rb_node rb_node;
  94. /* service_tree key */
  95. unsigned long rb_key;
  96. /* prio tree member */
  97. struct rb_node p_node;
  98. /* prio tree root we belong to, if any */
  99. struct rb_root *p_root;
  100. /* sorted list of pending requests */
  101. struct rb_root sort_list;
  102. /* if fifo isn't expired, next request to serve */
  103. struct request *next_rq;
  104. /* requests queued in sort_list */
  105. int queued[2];
  106. /* currently allocated requests */
  107. int allocated[2];
  108. /* fifo list of requests in sort_list */
  109. struct list_head fifo;
  110. /* time when queue got scheduled in to dispatch first request. */
  111. unsigned long dispatch_start;
  112. unsigned int allocated_slice;
  113. unsigned int slice_dispatch;
  114. /* time when first request from queue completed and slice started. */
  115. unsigned long slice_start;
  116. unsigned long slice_end;
  117. long slice_resid;
  118. /* pending priority requests */
  119. int prio_pending;
  120. /* number of requests that are on the dispatch list or inside driver */
  121. int dispatched;
  122. /* io prio of this group */
  123. unsigned short ioprio, org_ioprio;
  124. unsigned short ioprio_class;
  125. pid_t pid;
  126. u32 seek_history;
  127. sector_t last_request_pos;
  128. struct cfq_rb_root *service_tree;
  129. struct cfq_queue *new_cfqq;
  130. struct cfq_group *cfqg;
  131. /* Number of sectors dispatched from queue in single dispatch round */
  132. unsigned long nr_sectors;
  133. };
  134. /*
  135. * First index in the service_trees.
  136. * IDLE is handled separately, so it has negative index
  137. */
  138. enum wl_class_t {
  139. BE_WORKLOAD = 0,
  140. RT_WORKLOAD = 1,
  141. IDLE_WORKLOAD = 2,
  142. CFQ_PRIO_NR,
  143. };
  144. /*
  145. * Second index in the service_trees.
  146. */
  147. enum wl_type_t {
  148. ASYNC_WORKLOAD = 0,
  149. SYNC_NOIDLE_WORKLOAD = 1,
  150. SYNC_WORKLOAD = 2
  151. };
  152. struct cfqg_stats {
  153. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  154. /* total bytes transferred */
  155. struct blkg_rwstat service_bytes;
  156. /* total IOs serviced, post merge */
  157. struct blkg_rwstat serviced;
  158. /* number of ios merged */
  159. struct blkg_rwstat merged;
  160. /* total time spent on device in ns, may not be accurate w/ queueing */
  161. struct blkg_rwstat service_time;
  162. /* total time spent waiting in scheduler queue in ns */
  163. struct blkg_rwstat wait_time;
  164. /* number of IOs queued up */
  165. struct blkg_rwstat queued;
  166. /* total sectors transferred */
  167. struct blkg_stat sectors;
  168. /* total disk time and nr sectors dispatched by this group */
  169. struct blkg_stat time;
  170. #ifdef CONFIG_DEBUG_BLK_CGROUP
  171. /* time not charged to this cgroup */
  172. struct blkg_stat unaccounted_time;
  173. /* sum of number of ios queued across all samples */
  174. struct blkg_stat avg_queue_size_sum;
  175. /* count of samples taken for average */
  176. struct blkg_stat avg_queue_size_samples;
  177. /* how many times this group has been removed from service tree */
  178. struct blkg_stat dequeue;
  179. /* total time spent waiting for it to be assigned a timeslice. */
  180. struct blkg_stat group_wait_time;
  181. /* time spent idling for this blkcg_gq */
  182. struct blkg_stat idle_time;
  183. /* total time with empty current active q with other requests queued */
  184. struct blkg_stat empty_time;
  185. /* fields after this shouldn't be cleared on stat reset */
  186. uint64_t start_group_wait_time;
  187. uint64_t start_idle_time;
  188. uint64_t start_empty_time;
  189. uint16_t flags;
  190. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  191. #endif /* CONFIG_CFQ_GROUP_IOSCHED */
  192. };
  193. /* This is per cgroup per device grouping structure */
  194. struct cfq_group {
  195. /* must be the first member */
  196. struct blkg_policy_data pd;
  197. /* group service_tree member */
  198. struct rb_node rb_node;
  199. /* group service_tree key */
  200. u64 vdisktime;
  201. unsigned int weight;
  202. unsigned int new_weight;
  203. unsigned int dev_weight;
  204. /* number of cfqq currently on this group */
  205. int nr_cfqq;
  206. /*
  207. * Per group busy queues average. Useful for workload slice calc. We
  208. * create the array for each prio class but at run time it is used
  209. * only for RT and BE class and slot for IDLE class remains unused.
  210. * This is primarily done to avoid confusion and a gcc warning.
  211. */
  212. unsigned int busy_queues_avg[CFQ_PRIO_NR];
  213. /*
  214. * rr lists of queues with requests. We maintain service trees for
  215. * RT and BE classes. These trees are subdivided in subclasses
  216. * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
  217. * class there is no subclassification and all the cfq queues go on
  218. * a single tree service_tree_idle.
  219. * Counts are embedded in the cfq_rb_root
  220. */
  221. struct cfq_rb_root service_trees[2][3];
  222. struct cfq_rb_root service_tree_idle;
  223. unsigned long saved_wl_slice;
  224. enum wl_type_t saved_wl_type;
  225. enum wl_class_t saved_wl_class;
  226. /* number of requests that are on the dispatch list or inside driver */
  227. int dispatched;
  228. struct cfq_ttime ttime;
  229. struct cfqg_stats stats;
  230. };
  231. struct cfq_io_cq {
  232. struct io_cq icq; /* must be the first member */
  233. struct cfq_queue *cfqq[2];
  234. struct cfq_ttime ttime;
  235. int ioprio; /* the current ioprio */
  236. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  237. uint64_t blkcg_id; /* the current blkcg ID */
  238. #endif
  239. };
  240. /*
  241. * Per block device queue structure
  242. */
  243. struct cfq_data {
  244. struct request_queue *queue;
  245. /* Root service tree for cfq_groups */
  246. struct cfq_rb_root grp_service_tree;
  247. struct cfq_group *root_group;
  248. /*
  249. * The priority currently being served
  250. */
  251. enum wl_class_t serving_wl_class;
  252. enum wl_type_t serving_wl_type;
  253. unsigned long workload_expires;
  254. struct cfq_group *serving_group;
  255. /*
  256. * Each priority tree is sorted by next_request position. These
  257. * trees are used when determining if two or more queues are
  258. * interleaving requests (see cfq_close_cooperator).
  259. */
  260. struct rb_root prio_trees[CFQ_PRIO_LISTS];
  261. unsigned int busy_queues;
  262. unsigned int busy_sync_queues;
  263. int rq_in_driver;
  264. int rq_in_flight[2];
  265. /*
  266. * queue-depth detection
  267. */
  268. int rq_queued;
  269. int hw_tag;
  270. /*
  271. * hw_tag can be
  272. * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
  273. * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
  274. * 0 => no NCQ
  275. */
  276. int hw_tag_est_depth;
  277. unsigned int hw_tag_samples;
  278. /*
  279. * idle window management
  280. */
  281. struct timer_list idle_slice_timer;
  282. struct work_struct unplug_work;
  283. struct cfq_queue *active_queue;
  284. struct cfq_io_cq *active_cic;
  285. /*
  286. * async queue for each priority case
  287. */
  288. struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
  289. struct cfq_queue *async_idle_cfqq;
  290. sector_t last_position;
  291. /*
  292. * tunables, see top of file
  293. */
  294. unsigned int cfq_quantum;
  295. unsigned int cfq_fifo_expire[2];
  296. unsigned int cfq_back_penalty;
  297. unsigned int cfq_back_max;
  298. unsigned int cfq_slice[2];
  299. unsigned int cfq_slice_async_rq;
  300. unsigned int cfq_slice_idle;
  301. unsigned int cfq_group_idle;
  302. unsigned int cfq_latency;
  303. unsigned int cfq_target_latency;
  304. /*
  305. * Fallback dummy cfqq for extreme OOM conditions
  306. */
  307. struct cfq_queue oom_cfqq;
  308. unsigned long last_delayed_sync;
  309. };
  310. static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
  311. static struct cfq_rb_root *st_for(struct cfq_group *cfqg,
  312. enum wl_class_t class,
  313. enum wl_type_t type)
  314. {
  315. if (!cfqg)
  316. return NULL;
  317. if (class == IDLE_WORKLOAD)
  318. return &cfqg->service_tree_idle;
  319. return &cfqg->service_trees[class][type];
  320. }
  321. enum cfqq_state_flags {
  322. CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
  323. CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
  324. CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
  325. CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
  326. CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
  327. CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
  328. CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
  329. CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
  330. CFQ_CFQQ_FLAG_sync, /* synchronous queue */
  331. CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
  332. CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
  333. CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
  334. CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
  335. };
  336. #define CFQ_CFQQ_FNS(name) \
  337. static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
  338. { \
  339. (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
  340. } \
  341. static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
  342. { \
  343. (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
  344. } \
  345. static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
  346. { \
  347. return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
  348. }
  349. CFQ_CFQQ_FNS(on_rr);
  350. CFQ_CFQQ_FNS(wait_request);
  351. CFQ_CFQQ_FNS(must_dispatch);
  352. CFQ_CFQQ_FNS(must_alloc_slice);
  353. CFQ_CFQQ_FNS(fifo_expire);
  354. CFQ_CFQQ_FNS(idle_window);
  355. CFQ_CFQQ_FNS(prio_changed);
  356. CFQ_CFQQ_FNS(slice_new);
  357. CFQ_CFQQ_FNS(sync);
  358. CFQ_CFQQ_FNS(coop);
  359. CFQ_CFQQ_FNS(split_coop);
  360. CFQ_CFQQ_FNS(deep);
  361. CFQ_CFQQ_FNS(wait_busy);
  362. #undef CFQ_CFQQ_FNS
  363. static inline struct cfq_group *pd_to_cfqg(struct blkg_policy_data *pd)
  364. {
  365. return pd ? container_of(pd, struct cfq_group, pd) : NULL;
  366. }
  367. static inline struct blkcg_gq *cfqg_to_blkg(struct cfq_group *cfqg)
  368. {
  369. return pd_to_blkg(&cfqg->pd);
  370. }
  371. #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
  372. /* cfqg stats flags */
  373. enum cfqg_stats_flags {
  374. CFQG_stats_waiting = 0,
  375. CFQG_stats_idling,
  376. CFQG_stats_empty,
  377. };
  378. #define CFQG_FLAG_FNS(name) \
  379. static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
  380. { \
  381. stats->flags |= (1 << CFQG_stats_##name); \
  382. } \
  383. static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
  384. { \
  385. stats->flags &= ~(1 << CFQG_stats_##name); \
  386. } \
  387. static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
  388. { \
  389. return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
  390. } \
  391. CFQG_FLAG_FNS(waiting)
  392. CFQG_FLAG_FNS(idling)
  393. CFQG_FLAG_FNS(empty)
  394. #undef CFQG_FLAG_FNS
  395. /* This should be called with the queue_lock held. */
  396. static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
  397. {
  398. unsigned long long now;
  399. if (!cfqg_stats_waiting(stats))
  400. return;
  401. now = sched_clock();
  402. if (time_after64(now, stats->start_group_wait_time))
  403. blkg_stat_add(&stats->group_wait_time,
  404. now - stats->start_group_wait_time);
  405. cfqg_stats_clear_waiting(stats);
  406. }
  407. /* This should be called with the queue_lock held. */
  408. static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
  409. struct cfq_group *curr_cfqg)
  410. {
  411. struct cfqg_stats *stats = &cfqg->stats;
  412. if (cfqg_stats_waiting(stats))
  413. return;
  414. if (cfqg == curr_cfqg)
  415. return;
  416. stats->start_group_wait_time = sched_clock();
  417. cfqg_stats_mark_waiting(stats);
  418. }
  419. /* This should be called with the queue_lock held. */
  420. static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
  421. {
  422. unsigned long long now;
  423. if (!cfqg_stats_empty(stats))
  424. return;
  425. now = sched_clock();
  426. if (time_after64(now, stats->start_empty_time))
  427. blkg_stat_add(&stats->empty_time,
  428. now - stats->start_empty_time);
  429. cfqg_stats_clear_empty(stats);
  430. }
  431. static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
  432. {
  433. blkg_stat_add(&cfqg->stats.dequeue, 1);
  434. }
  435. static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
  436. {
  437. struct cfqg_stats *stats = &cfqg->stats;
  438. if (blkg_rwstat_sum(&stats->queued))
  439. return;
  440. /*
  441. * group is already marked empty. This can happen if cfqq got new
  442. * request in parent group and moved to this group while being added
  443. * to service tree. Just ignore the event and move on.
  444. */
  445. if (cfqg_stats_empty(stats))
  446. return;
  447. stats->start_empty_time = sched_clock();
  448. cfqg_stats_mark_empty(stats);
  449. }
  450. static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
  451. {
  452. struct cfqg_stats *stats = &cfqg->stats;
  453. if (cfqg_stats_idling(stats)) {
  454. unsigned long long now = sched_clock();
  455. if (time_after64(now, stats->start_idle_time))
  456. blkg_stat_add(&stats->idle_time,
  457. now - stats->start_idle_time);
  458. cfqg_stats_clear_idling(stats);
  459. }
  460. }
  461. static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
  462. {
  463. struct cfqg_stats *stats = &cfqg->stats;
  464. BUG_ON(cfqg_stats_idling(stats));
  465. stats->start_idle_time = sched_clock();
  466. cfqg_stats_mark_idling(stats);
  467. }
  468. static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
  469. {
  470. struct cfqg_stats *stats = &cfqg->stats;
  471. blkg_stat_add(&stats->avg_queue_size_sum,
  472. blkg_rwstat_sum(&stats->queued));
  473. blkg_stat_add(&stats->avg_queue_size_samples, 1);
  474. cfqg_stats_update_group_wait_time(stats);
  475. }
  476. #else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
  477. static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
  478. static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
  479. static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
  480. static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
  481. static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
  482. static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
  483. static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
  484. #endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
  485. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  486. static struct blkcg_policy blkcg_policy_cfq;
  487. static inline struct cfq_group *blkg_to_cfqg(struct blkcg_gq *blkg)
  488. {
  489. return pd_to_cfqg(blkg_to_pd(blkg, &blkcg_policy_cfq));
  490. }
  491. static inline void cfqg_get(struct cfq_group *cfqg)
  492. {
  493. return blkg_get(cfqg_to_blkg(cfqg));
  494. }
  495. static inline void cfqg_put(struct cfq_group *cfqg)
  496. {
  497. return blkg_put(cfqg_to_blkg(cfqg));
  498. }
  499. #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) do { \
  500. char __pbuf[128]; \
  501. \
  502. blkg_path(cfqg_to_blkg((cfqq)->cfqg), __pbuf, sizeof(__pbuf)); \
  503. blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
  504. cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
  505. __pbuf, ##args); \
  506. } while (0)
  507. #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do { \
  508. char __pbuf[128]; \
  509. \
  510. blkg_path(cfqg_to_blkg(cfqg), __pbuf, sizeof(__pbuf)); \
  511. blk_add_trace_msg((cfqd)->queue, "%s " fmt, __pbuf, ##args); \
  512. } while (0)
  513. static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
  514. struct cfq_group *curr_cfqg, int rw)
  515. {
  516. blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
  517. cfqg_stats_end_empty_time(&cfqg->stats);
  518. cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
  519. }
  520. static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
  521. unsigned long time, unsigned long unaccounted_time)
  522. {
  523. blkg_stat_add(&cfqg->stats.time, time);
  524. #ifdef CONFIG_DEBUG_BLK_CGROUP
  525. blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
  526. #endif
  527. }
  528. static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
  529. {
  530. blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
  531. }
  532. static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
  533. {
  534. blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
  535. }
  536. static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
  537. uint64_t bytes, int rw)
  538. {
  539. blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
  540. blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
  541. blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
  542. }
  543. static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
  544. uint64_t start_time, uint64_t io_start_time, int rw)
  545. {
  546. struct cfqg_stats *stats = &cfqg->stats;
  547. unsigned long long now = sched_clock();
  548. if (time_after64(now, io_start_time))
  549. blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
  550. if (time_after64(io_start_time, start_time))
  551. blkg_rwstat_add(&stats->wait_time, rw,
  552. io_start_time - start_time);
  553. }
  554. static void cfq_pd_reset_stats(struct blkcg_gq *blkg)
  555. {
  556. struct cfq_group *cfqg = blkg_to_cfqg(blkg);
  557. struct cfqg_stats *stats = &cfqg->stats;
  558. /* queued stats shouldn't be cleared */
  559. blkg_rwstat_reset(&stats->service_bytes);
  560. blkg_rwstat_reset(&stats->serviced);
  561. blkg_rwstat_reset(&stats->merged);
  562. blkg_rwstat_reset(&stats->service_time);
  563. blkg_rwstat_reset(&stats->wait_time);
  564. blkg_stat_reset(&stats->time);
  565. #ifdef CONFIG_DEBUG_BLK_CGROUP
  566. blkg_stat_reset(&stats->unaccounted_time);
  567. blkg_stat_reset(&stats->avg_queue_size_sum);
  568. blkg_stat_reset(&stats->avg_queue_size_samples);
  569. blkg_stat_reset(&stats->dequeue);
  570. blkg_stat_reset(&stats->group_wait_time);
  571. blkg_stat_reset(&stats->idle_time);
  572. blkg_stat_reset(&stats->empty_time);
  573. #endif
  574. }
  575. #else /* CONFIG_CFQ_GROUP_IOSCHED */
  576. static inline void cfqg_get(struct cfq_group *cfqg) { }
  577. static inline void cfqg_put(struct cfq_group *cfqg) { }
  578. #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
  579. blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
  580. #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
  581. static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
  582. struct cfq_group *curr_cfqg, int rw) { }
  583. static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
  584. unsigned long time, unsigned long unaccounted_time) { }
  585. static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
  586. static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
  587. static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
  588. uint64_t bytes, int rw) { }
  589. static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
  590. uint64_t start_time, uint64_t io_start_time, int rw) { }
  591. #endif /* CONFIG_CFQ_GROUP_IOSCHED */
  592. #define cfq_log(cfqd, fmt, args...) \
  593. blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
  594. /* Traverses through cfq group service trees */
  595. #define for_each_cfqg_st(cfqg, i, j, st) \
  596. for (i = 0; i <= IDLE_WORKLOAD; i++) \
  597. for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
  598. : &cfqg->service_tree_idle; \
  599. (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
  600. (i == IDLE_WORKLOAD && j == 0); \
  601. j++, st = i < IDLE_WORKLOAD ? \
  602. &cfqg->service_trees[i][j]: NULL) \
  603. static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
  604. struct cfq_ttime *ttime, bool group_idle)
  605. {
  606. unsigned long slice;
  607. if (!sample_valid(ttime->ttime_samples))
  608. return false;
  609. if (group_idle)
  610. slice = cfqd->cfq_group_idle;
  611. else
  612. slice = cfqd->cfq_slice_idle;
  613. return ttime->ttime_mean > slice;
  614. }
  615. static inline bool iops_mode(struct cfq_data *cfqd)
  616. {
  617. /*
  618. * If we are not idling on queues and it is a NCQ drive, parallel
  619. * execution of requests is on and measuring time is not possible
  620. * in most of the cases until and unless we drive shallower queue
  621. * depths and that becomes a performance bottleneck. In such cases
  622. * switch to start providing fairness in terms of number of IOs.
  623. */
  624. if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
  625. return true;
  626. else
  627. return false;
  628. }
  629. static inline enum wl_class_t cfqq_class(struct cfq_queue *cfqq)
  630. {
  631. if (cfq_class_idle(cfqq))
  632. return IDLE_WORKLOAD;
  633. if (cfq_class_rt(cfqq))
  634. return RT_WORKLOAD;
  635. return BE_WORKLOAD;
  636. }
  637. static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
  638. {
  639. if (!cfq_cfqq_sync(cfqq))
  640. return ASYNC_WORKLOAD;
  641. if (!cfq_cfqq_idle_window(cfqq))
  642. return SYNC_NOIDLE_WORKLOAD;
  643. return SYNC_WORKLOAD;
  644. }
  645. static inline int cfq_group_busy_queues_wl(enum wl_class_t wl_class,
  646. struct cfq_data *cfqd,
  647. struct cfq_group *cfqg)
  648. {
  649. if (wl_class == IDLE_WORKLOAD)
  650. return cfqg->service_tree_idle.count;
  651. return cfqg->service_trees[wl_class][ASYNC_WORKLOAD].count +
  652. cfqg->service_trees[wl_class][SYNC_NOIDLE_WORKLOAD].count +
  653. cfqg->service_trees[wl_class][SYNC_WORKLOAD].count;
  654. }
  655. static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
  656. struct cfq_group *cfqg)
  657. {
  658. return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count +
  659. cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
  660. }
  661. static void cfq_dispatch_insert(struct request_queue *, struct request *);
  662. static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
  663. struct cfq_io_cq *cic, struct bio *bio,
  664. gfp_t gfp_mask);
  665. static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
  666. {
  667. /* cic->icq is the first member, %NULL will convert to %NULL */
  668. return container_of(icq, struct cfq_io_cq, icq);
  669. }
  670. static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
  671. struct io_context *ioc)
  672. {
  673. if (ioc)
  674. return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
  675. return NULL;
  676. }
  677. static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
  678. {
  679. return cic->cfqq[is_sync];
  680. }
  681. static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
  682. bool is_sync)
  683. {
  684. cic->cfqq[is_sync] = cfqq;
  685. }
  686. static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
  687. {
  688. return cic->icq.q->elevator->elevator_data;
  689. }
  690. /*
  691. * We regard a request as SYNC, if it's either a read or has the SYNC bit
  692. * set (in which case it could also be direct WRITE).
  693. */
  694. static inline bool cfq_bio_sync(struct bio *bio)
  695. {
  696. return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
  697. }
  698. /*
  699. * scheduler run of queue, if there are requests pending and no one in the
  700. * driver that will restart queueing
  701. */
  702. static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
  703. {
  704. if (cfqd->busy_queues) {
  705. cfq_log(cfqd, "schedule dispatch");
  706. kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
  707. }
  708. }
  709. /*
  710. * Scale schedule slice based on io priority. Use the sync time slice only
  711. * if a queue is marked sync and has sync io queued. A sync queue with async
  712. * io only, should not get full sync slice length.
  713. */
  714. static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
  715. unsigned short prio)
  716. {
  717. const int base_slice = cfqd->cfq_slice[sync];
  718. WARN_ON(prio >= IOPRIO_BE_NR);
  719. return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
  720. }
  721. static inline int
  722. cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  723. {
  724. return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
  725. }
  726. static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
  727. {
  728. u64 d = delta << CFQ_SERVICE_SHIFT;
  729. d = d * CFQ_WEIGHT_DEFAULT;
  730. do_div(d, cfqg->weight);
  731. return d;
  732. }
  733. static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
  734. {
  735. s64 delta = (s64)(vdisktime - min_vdisktime);
  736. if (delta > 0)
  737. min_vdisktime = vdisktime;
  738. return min_vdisktime;
  739. }
  740. static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
  741. {
  742. s64 delta = (s64)(vdisktime - min_vdisktime);
  743. if (delta < 0)
  744. min_vdisktime = vdisktime;
  745. return min_vdisktime;
  746. }
  747. static void update_min_vdisktime(struct cfq_rb_root *st)
  748. {
  749. struct cfq_group *cfqg;
  750. if (st->left) {
  751. cfqg = rb_entry_cfqg(st->left);
  752. st->min_vdisktime = max_vdisktime(st->min_vdisktime,
  753. cfqg->vdisktime);
  754. }
  755. }
  756. /*
  757. * get averaged number of queues of RT/BE priority.
  758. * average is updated, with a formula that gives more weight to higher numbers,
  759. * to quickly follows sudden increases and decrease slowly
  760. */
  761. static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
  762. struct cfq_group *cfqg, bool rt)
  763. {
  764. unsigned min_q, max_q;
  765. unsigned mult = cfq_hist_divisor - 1;
  766. unsigned round = cfq_hist_divisor / 2;
  767. unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
  768. min_q = min(cfqg->busy_queues_avg[rt], busy);
  769. max_q = max(cfqg->busy_queues_avg[rt], busy);
  770. cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
  771. cfq_hist_divisor;
  772. return cfqg->busy_queues_avg[rt];
  773. }
  774. static inline unsigned
  775. cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
  776. {
  777. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  778. return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
  779. }
  780. static inline unsigned
  781. cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  782. {
  783. unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
  784. if (cfqd->cfq_latency) {
  785. /*
  786. * interested queues (we consider only the ones with the same
  787. * priority class in the cfq group)
  788. */
  789. unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
  790. cfq_class_rt(cfqq));
  791. unsigned sync_slice = cfqd->cfq_slice[1];
  792. unsigned expect_latency = sync_slice * iq;
  793. unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
  794. if (expect_latency > group_slice) {
  795. unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
  796. /* scale low_slice according to IO priority
  797. * and sync vs async */
  798. unsigned low_slice =
  799. min(slice, base_low_slice * slice / sync_slice);
  800. /* the adapted slice value is scaled to fit all iqs
  801. * into the target latency */
  802. slice = max(slice * group_slice / expect_latency,
  803. low_slice);
  804. }
  805. }
  806. return slice;
  807. }
  808. static inline void
  809. cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  810. {
  811. unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
  812. cfqq->slice_start = jiffies;
  813. cfqq->slice_end = jiffies + slice;
  814. cfqq->allocated_slice = slice;
  815. cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
  816. }
  817. /*
  818. * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
  819. * isn't valid until the first request from the dispatch is activated
  820. * and the slice time set.
  821. */
  822. static inline bool cfq_slice_used(struct cfq_queue *cfqq)
  823. {
  824. if (cfq_cfqq_slice_new(cfqq))
  825. return false;
  826. if (time_before(jiffies, cfqq->slice_end))
  827. return false;
  828. return true;
  829. }
  830. /*
  831. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  832. * We choose the request that is closest to the head right now. Distance
  833. * behind the head is penalized and only allowed to a certain extent.
  834. */
  835. static struct request *
  836. cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
  837. {
  838. sector_t s1, s2, d1 = 0, d2 = 0;
  839. unsigned long back_max;
  840. #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  841. #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  842. unsigned wrap = 0; /* bit mask: requests behind the disk head? */
  843. if (rq1 == NULL || rq1 == rq2)
  844. return rq2;
  845. if (rq2 == NULL)
  846. return rq1;
  847. if (rq_is_sync(rq1) != rq_is_sync(rq2))
  848. return rq_is_sync(rq1) ? rq1 : rq2;
  849. if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
  850. return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
  851. s1 = blk_rq_pos(rq1);
  852. s2 = blk_rq_pos(rq2);
  853. /*
  854. * by definition, 1KiB is 2 sectors
  855. */
  856. back_max = cfqd->cfq_back_max * 2;
  857. /*
  858. * Strict one way elevator _except_ in the case where we allow
  859. * short backward seeks which are biased as twice the cost of a
  860. * similar forward seek.
  861. */
  862. if (s1 >= last)
  863. d1 = s1 - last;
  864. else if (s1 + back_max >= last)
  865. d1 = (last - s1) * cfqd->cfq_back_penalty;
  866. else
  867. wrap |= CFQ_RQ1_WRAP;
  868. if (s2 >= last)
  869. d2 = s2 - last;
  870. else if (s2 + back_max >= last)
  871. d2 = (last - s2) * cfqd->cfq_back_penalty;
  872. else
  873. wrap |= CFQ_RQ2_WRAP;
  874. /* Found required data */
  875. /*
  876. * By doing switch() on the bit mask "wrap" we avoid having to
  877. * check two variables for all permutations: --> faster!
  878. */
  879. switch (wrap) {
  880. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  881. if (d1 < d2)
  882. return rq1;
  883. else if (d2 < d1)
  884. return rq2;
  885. else {
  886. if (s1 >= s2)
  887. return rq1;
  888. else
  889. return rq2;
  890. }
  891. case CFQ_RQ2_WRAP:
  892. return rq1;
  893. case CFQ_RQ1_WRAP:
  894. return rq2;
  895. case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
  896. default:
  897. /*
  898. * Since both rqs are wrapped,
  899. * start with the one that's further behind head
  900. * (--> only *one* back seek required),
  901. * since back seek takes more time than forward.
  902. */
  903. if (s1 <= s2)
  904. return rq1;
  905. else
  906. return rq2;
  907. }
  908. }
  909. /*
  910. * The below is leftmost cache rbtree addon
  911. */
  912. static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
  913. {
  914. /* Service tree is empty */
  915. if (!root->count)
  916. return NULL;
  917. if (!root->left)
  918. root->left = rb_first(&root->rb);
  919. if (root->left)
  920. return rb_entry(root->left, struct cfq_queue, rb_node);
  921. return NULL;
  922. }
  923. static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
  924. {
  925. if (!root->left)
  926. root->left = rb_first(&root->rb);
  927. if (root->left)
  928. return rb_entry_cfqg(root->left);
  929. return NULL;
  930. }
  931. static void rb_erase_init(struct rb_node *n, struct rb_root *root)
  932. {
  933. rb_erase(n, root);
  934. RB_CLEAR_NODE(n);
  935. }
  936. static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
  937. {
  938. if (root->left == n)
  939. root->left = NULL;
  940. rb_erase_init(n, &root->rb);
  941. --root->count;
  942. }
  943. /*
  944. * would be nice to take fifo expire time into account as well
  945. */
  946. static struct request *
  947. cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  948. struct request *last)
  949. {
  950. struct rb_node *rbnext = rb_next(&last->rb_node);
  951. struct rb_node *rbprev = rb_prev(&last->rb_node);
  952. struct request *next = NULL, *prev = NULL;
  953. BUG_ON(RB_EMPTY_NODE(&last->rb_node));
  954. if (rbprev)
  955. prev = rb_entry_rq(rbprev);
  956. if (rbnext)
  957. next = rb_entry_rq(rbnext);
  958. else {
  959. rbnext = rb_first(&cfqq->sort_list);
  960. if (rbnext && rbnext != &last->rb_node)
  961. next = rb_entry_rq(rbnext);
  962. }
  963. return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
  964. }
  965. static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  966. struct cfq_queue *cfqq)
  967. {
  968. /*
  969. * just an approximation, should be ok.
  970. */
  971. return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
  972. cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
  973. }
  974. static inline s64
  975. cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
  976. {
  977. return cfqg->vdisktime - st->min_vdisktime;
  978. }
  979. static void
  980. __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  981. {
  982. struct rb_node **node = &st->rb.rb_node;
  983. struct rb_node *parent = NULL;
  984. struct cfq_group *__cfqg;
  985. s64 key = cfqg_key(st, cfqg);
  986. int left = 1;
  987. while (*node != NULL) {
  988. parent = *node;
  989. __cfqg = rb_entry_cfqg(parent);
  990. if (key < cfqg_key(st, __cfqg))
  991. node = &parent->rb_left;
  992. else {
  993. node = &parent->rb_right;
  994. left = 0;
  995. }
  996. }
  997. if (left)
  998. st->left = &cfqg->rb_node;
  999. rb_link_node(&cfqg->rb_node, parent, node);
  1000. rb_insert_color(&cfqg->rb_node, &st->rb);
  1001. }
  1002. static void
  1003. cfq_update_group_weight(struct cfq_group *cfqg)
  1004. {
  1005. BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  1006. if (cfqg->new_weight) {
  1007. cfqg->weight = cfqg->new_weight;
  1008. cfqg->new_weight = 0;
  1009. }
  1010. }
  1011. static void
  1012. cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  1013. {
  1014. BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  1015. cfq_update_group_weight(cfqg);
  1016. __cfq_group_service_tree_add(st, cfqg);
  1017. st->total_weight += cfqg->weight;
  1018. }
  1019. static void
  1020. cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
  1021. {
  1022. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  1023. struct cfq_group *__cfqg;
  1024. struct rb_node *n;
  1025. cfqg->nr_cfqq++;
  1026. if (!RB_EMPTY_NODE(&cfqg->rb_node))
  1027. return;
  1028. /*
  1029. * Currently put the group at the end. Later implement something
  1030. * so that groups get lesser vtime based on their weights, so that
  1031. * if group does not loose all if it was not continuously backlogged.
  1032. */
  1033. n = rb_last(&st->rb);
  1034. if (n) {
  1035. __cfqg = rb_entry_cfqg(n);
  1036. cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
  1037. } else
  1038. cfqg->vdisktime = st->min_vdisktime;
  1039. cfq_group_service_tree_add(st, cfqg);
  1040. }
  1041. static void
  1042. cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
  1043. {
  1044. st->total_weight -= cfqg->weight;
  1045. if (!RB_EMPTY_NODE(&cfqg->rb_node))
  1046. cfq_rb_erase(&cfqg->rb_node, st);
  1047. }
  1048. static void
  1049. cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
  1050. {
  1051. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  1052. BUG_ON(cfqg->nr_cfqq < 1);
  1053. cfqg->nr_cfqq--;
  1054. /* If there are other cfq queues under this group, don't delete it */
  1055. if (cfqg->nr_cfqq)
  1056. return;
  1057. cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
  1058. cfq_group_service_tree_del(st, cfqg);
  1059. cfqg->saved_wl_slice = 0;
  1060. cfqg_stats_update_dequeue(cfqg);
  1061. }
  1062. static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
  1063. unsigned int *unaccounted_time)
  1064. {
  1065. unsigned int slice_used;
  1066. /*
  1067. * Queue got expired before even a single request completed or
  1068. * got expired immediately after first request completion.
  1069. */
  1070. if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
  1071. /*
  1072. * Also charge the seek time incurred to the group, otherwise
  1073. * if there are mutiple queues in the group, each can dispatch
  1074. * a single request on seeky media and cause lots of seek time
  1075. * and group will never know it.
  1076. */
  1077. slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
  1078. 1);
  1079. } else {
  1080. slice_used = jiffies - cfqq->slice_start;
  1081. if (slice_used > cfqq->allocated_slice) {
  1082. *unaccounted_time = slice_used - cfqq->allocated_slice;
  1083. slice_used = cfqq->allocated_slice;
  1084. }
  1085. if (time_after(cfqq->slice_start, cfqq->dispatch_start))
  1086. *unaccounted_time += cfqq->slice_start -
  1087. cfqq->dispatch_start;
  1088. }
  1089. return slice_used;
  1090. }
  1091. static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
  1092. struct cfq_queue *cfqq)
  1093. {
  1094. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  1095. unsigned int used_sl, charge, unaccounted_sl = 0;
  1096. int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
  1097. - cfqg->service_tree_idle.count;
  1098. BUG_ON(nr_sync < 0);
  1099. used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
  1100. if (iops_mode(cfqd))
  1101. charge = cfqq->slice_dispatch;
  1102. else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
  1103. charge = cfqq->allocated_slice;
  1104. /* Can't update vdisktime while group is on service tree */
  1105. cfq_group_service_tree_del(st, cfqg);
  1106. cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
  1107. /* If a new weight was requested, update now, off tree */
  1108. cfq_group_service_tree_add(st, cfqg);
  1109. /* This group is being expired. Save the context */
  1110. if (time_after(cfqd->workload_expires, jiffies)) {
  1111. cfqg->saved_wl_slice = cfqd->workload_expires
  1112. - jiffies;
  1113. cfqg->saved_wl_type = cfqd->serving_wl_type;
  1114. cfqg->saved_wl_class = cfqd->serving_wl_class;
  1115. } else
  1116. cfqg->saved_wl_slice = 0;
  1117. cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
  1118. st->min_vdisktime);
  1119. cfq_log_cfqq(cfqq->cfqd, cfqq,
  1120. "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
  1121. used_sl, cfqq->slice_dispatch, charge,
  1122. iops_mode(cfqd), cfqq->nr_sectors);
  1123. cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
  1124. cfqg_stats_set_start_empty_time(cfqg);
  1125. }
  1126. /**
  1127. * cfq_init_cfqg_base - initialize base part of a cfq_group
  1128. * @cfqg: cfq_group to initialize
  1129. *
  1130. * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
  1131. * is enabled or not.
  1132. */
  1133. static void cfq_init_cfqg_base(struct cfq_group *cfqg)
  1134. {
  1135. struct cfq_rb_root *st;
  1136. int i, j;
  1137. for_each_cfqg_st(cfqg, i, j, st)
  1138. *st = CFQ_RB_ROOT;
  1139. RB_CLEAR_NODE(&cfqg->rb_node);
  1140. cfqg->ttime.last_end_request = jiffies;
  1141. }
  1142. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  1143. static void cfq_pd_init(struct blkcg_gq *blkg)
  1144. {
  1145. struct cfq_group *cfqg = blkg_to_cfqg(blkg);
  1146. cfq_init_cfqg_base(cfqg);
  1147. cfqg->weight = blkg->blkcg->cfq_weight;
  1148. }
  1149. /*
  1150. * Search for the cfq group current task belongs to. request_queue lock must
  1151. * be held.
  1152. */
  1153. static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
  1154. struct blkcg *blkcg)
  1155. {
  1156. struct request_queue *q = cfqd->queue;
  1157. struct cfq_group *cfqg = NULL;
  1158. /* avoid lookup for the common case where there's no blkcg */
  1159. if (blkcg == &blkcg_root) {
  1160. cfqg = cfqd->root_group;
  1161. } else {
  1162. struct blkcg_gq *blkg;
  1163. blkg = blkg_lookup_create(blkcg, q);
  1164. if (!IS_ERR(blkg))
  1165. cfqg = blkg_to_cfqg(blkg);
  1166. }
  1167. return cfqg;
  1168. }
  1169. static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
  1170. {
  1171. /* Currently, all async queues are mapped to root group */
  1172. if (!cfq_cfqq_sync(cfqq))
  1173. cfqg = cfqq->cfqd->root_group;
  1174. cfqq->cfqg = cfqg;
  1175. /* cfqq reference on cfqg */
  1176. cfqg_get(cfqg);
  1177. }
  1178. static u64 cfqg_prfill_weight_device(struct seq_file *sf,
  1179. struct blkg_policy_data *pd, int off)
  1180. {
  1181. struct cfq_group *cfqg = pd_to_cfqg(pd);
  1182. if (!cfqg->dev_weight)
  1183. return 0;
  1184. return __blkg_prfill_u64(sf, pd, cfqg->dev_weight);
  1185. }
  1186. static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
  1187. struct seq_file *sf)
  1188. {
  1189. blkcg_print_blkgs(sf, cgroup_to_blkcg(cgrp),
  1190. cfqg_prfill_weight_device, &blkcg_policy_cfq, 0,
  1191. false);
  1192. return 0;
  1193. }
  1194. static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
  1195. struct seq_file *sf)
  1196. {
  1197. seq_printf(sf, "%u\n", cgroup_to_blkcg(cgrp)->cfq_weight);
  1198. return 0;
  1199. }
  1200. static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
  1201. const char *buf)
  1202. {
  1203. struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
  1204. struct blkg_conf_ctx ctx;
  1205. struct cfq_group *cfqg;
  1206. int ret;
  1207. ret = blkg_conf_prep(blkcg, &blkcg_policy_cfq, buf, &ctx);
  1208. if (ret)
  1209. return ret;
  1210. ret = -EINVAL;
  1211. cfqg = blkg_to_cfqg(ctx.blkg);
  1212. if (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN && ctx.v <= CFQ_WEIGHT_MAX)) {
  1213. cfqg->dev_weight = ctx.v;
  1214. cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
  1215. ret = 0;
  1216. }
  1217. blkg_conf_finish(&ctx);
  1218. return ret;
  1219. }
  1220. static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1221. {
  1222. struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
  1223. struct blkcg_gq *blkg;
  1224. struct hlist_node *n;
  1225. if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
  1226. return -EINVAL;
  1227. spin_lock_irq(&blkcg->lock);
  1228. blkcg->cfq_weight = (unsigned int)val;
  1229. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1230. struct cfq_group *cfqg = blkg_to_cfqg(blkg);
  1231. if (cfqg && !cfqg->dev_weight)
  1232. cfqg->new_weight = blkcg->cfq_weight;
  1233. }
  1234. spin_unlock_irq(&blkcg->lock);
  1235. return 0;
  1236. }
  1237. static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
  1238. struct seq_file *sf)
  1239. {
  1240. struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
  1241. blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, &blkcg_policy_cfq,
  1242. cft->private, false);
  1243. return 0;
  1244. }
  1245. static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
  1246. struct seq_file *sf)
  1247. {
  1248. struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
  1249. blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, &blkcg_policy_cfq,
  1250. cft->private, true);
  1251. return 0;
  1252. }
  1253. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1254. static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf,
  1255. struct blkg_policy_data *pd, int off)
  1256. {
  1257. struct cfq_group *cfqg = pd_to_cfqg(pd);
  1258. u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
  1259. u64 v = 0;
  1260. if (samples) {
  1261. v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
  1262. do_div(v, samples);
  1263. }
  1264. __blkg_prfill_u64(sf, pd, v);
  1265. return 0;
  1266. }
  1267. /* print avg_queue_size */
  1268. static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
  1269. struct seq_file *sf)
  1270. {
  1271. struct blkcg *blkcg = cgroup_to_blkcg(cgrp);
  1272. blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
  1273. &blkcg_policy_cfq, 0, false);
  1274. return 0;
  1275. }
  1276. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  1277. static struct cftype cfq_blkcg_files[] = {
  1278. {
  1279. .name = "weight_device",
  1280. .read_seq_string = cfqg_print_weight_device,
  1281. .write_string = cfqg_set_weight_device,
  1282. .max_write_len = 256,
  1283. },
  1284. {
  1285. .name = "weight",
  1286. .read_seq_string = cfq_print_weight,
  1287. .write_u64 = cfq_set_weight,
  1288. },
  1289. {
  1290. .name = "time",
  1291. .private = offsetof(struct cfq_group, stats.time),
  1292. .read_seq_string = cfqg_print_stat,
  1293. },
  1294. {
  1295. .name = "sectors",
  1296. .private = offsetof(struct cfq_group, stats.sectors),
  1297. .read_seq_string = cfqg_print_stat,
  1298. },
  1299. {
  1300. .name = "io_service_bytes",
  1301. .private = offsetof(struct cfq_group, stats.service_bytes),
  1302. .read_seq_string = cfqg_print_rwstat,
  1303. },
  1304. {
  1305. .name = "io_serviced",
  1306. .private = offsetof(struct cfq_group, stats.serviced),
  1307. .read_seq_string = cfqg_print_rwstat,
  1308. },
  1309. {
  1310. .name = "io_service_time",
  1311. .private = offsetof(struct cfq_group, stats.service_time),
  1312. .read_seq_string = cfqg_print_rwstat,
  1313. },
  1314. {
  1315. .name = "io_wait_time",
  1316. .private = offsetof(struct cfq_group, stats.wait_time),
  1317. .read_seq_string = cfqg_print_rwstat,
  1318. },
  1319. {
  1320. .name = "io_merged",
  1321. .private = offsetof(struct cfq_group, stats.merged),
  1322. .read_seq_string = cfqg_print_rwstat,
  1323. },
  1324. {
  1325. .name = "io_queued",
  1326. .private = offsetof(struct cfq_group, stats.queued),
  1327. .read_seq_string = cfqg_print_rwstat,
  1328. },
  1329. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1330. {
  1331. .name = "avg_queue_size",
  1332. .read_seq_string = cfqg_print_avg_queue_size,
  1333. },
  1334. {
  1335. .name = "group_wait_time",
  1336. .private = offsetof(struct cfq_group, stats.group_wait_time),
  1337. .read_seq_string = cfqg_print_stat,
  1338. },
  1339. {
  1340. .name = "idle_time",
  1341. .private = offsetof(struct cfq_group, stats.idle_time),
  1342. .read_seq_string = cfqg_print_stat,
  1343. },
  1344. {
  1345. .name = "empty_time",
  1346. .private = offsetof(struct cfq_group, stats.empty_time),
  1347. .read_seq_string = cfqg_print_stat,
  1348. },
  1349. {
  1350. .name = "dequeue",
  1351. .private = offsetof(struct cfq_group, stats.dequeue),
  1352. .read_seq_string = cfqg_print_stat,
  1353. },
  1354. {
  1355. .name = "unaccounted_time",
  1356. .private = offsetof(struct cfq_group, stats.unaccounted_time),
  1357. .read_seq_string = cfqg_print_stat,
  1358. },
  1359. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  1360. { } /* terminate */
  1361. };
  1362. #else /* GROUP_IOSCHED */
  1363. static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
  1364. struct blkcg *blkcg)
  1365. {
  1366. return cfqd->root_group;
  1367. }
  1368. static inline void
  1369. cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
  1370. cfqq->cfqg = cfqg;
  1371. }
  1372. #endif /* GROUP_IOSCHED */
  1373. /*
  1374. * The cfqd->service_trees holds all pending cfq_queue's that have
  1375. * requests waiting to be processed. It is sorted in the order that
  1376. * we will service the queues.
  1377. */
  1378. static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1379. bool add_front)
  1380. {
  1381. struct rb_node **p, *parent;
  1382. struct cfq_queue *__cfqq;
  1383. unsigned long rb_key;
  1384. struct cfq_rb_root *st;
  1385. int left;
  1386. int new_cfqq = 1;
  1387. st = st_for(cfqq->cfqg, cfqq_class(cfqq), cfqq_type(cfqq));
  1388. if (cfq_class_idle(cfqq)) {
  1389. rb_key = CFQ_IDLE_DELAY;
  1390. parent = rb_last(&st->rb);
  1391. if (parent && parent != &cfqq->rb_node) {
  1392. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  1393. rb_key += __cfqq->rb_key;
  1394. } else
  1395. rb_key += jiffies;
  1396. } else if (!add_front) {
  1397. /*
  1398. * Get our rb key offset. Subtract any residual slice
  1399. * value carried from last service. A negative resid
  1400. * count indicates slice overrun, and this should position
  1401. * the next service time further away in the tree.
  1402. */
  1403. rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
  1404. rb_key -= cfqq->slice_resid;
  1405. cfqq->slice_resid = 0;
  1406. } else {
  1407. rb_key = -HZ;
  1408. __cfqq = cfq_rb_first(st);
  1409. rb_key += __cfqq ? __cfqq->rb_key : jiffies;
  1410. }
  1411. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  1412. new_cfqq = 0;
  1413. /*
  1414. * same position, nothing more to do
  1415. */
  1416. if (rb_key == cfqq->rb_key && cfqq->service_tree == st)
  1417. return;
  1418. cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  1419. cfqq->service_tree = NULL;
  1420. }
  1421. left = 1;
  1422. parent = NULL;
  1423. cfqq->service_tree = st;
  1424. p = &st->rb.rb_node;
  1425. while (*p) {
  1426. struct rb_node **n;
  1427. parent = *p;
  1428. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  1429. /*
  1430. * sort by key, that represents service time.
  1431. */
  1432. if (time_before(rb_key, __cfqq->rb_key))
  1433. n = &(*p)->rb_left;
  1434. else {
  1435. n = &(*p)->rb_right;
  1436. left = 0;
  1437. }
  1438. p = n;
  1439. }
  1440. if (left)
  1441. st->left = &cfqq->rb_node;
  1442. cfqq->rb_key = rb_key;
  1443. rb_link_node(&cfqq->rb_node, parent, p);
  1444. rb_insert_color(&cfqq->rb_node, &st->rb);
  1445. st->count++;
  1446. if (add_front || !new_cfqq)
  1447. return;
  1448. cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
  1449. }
  1450. static struct cfq_queue *
  1451. cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
  1452. sector_t sector, struct rb_node **ret_parent,
  1453. struct rb_node ***rb_link)
  1454. {
  1455. struct rb_node **p, *parent;
  1456. struct cfq_queue *cfqq = NULL;
  1457. parent = NULL;
  1458. p = &root->rb_node;
  1459. while (*p) {
  1460. struct rb_node **n;
  1461. parent = *p;
  1462. cfqq = rb_entry(parent, struct cfq_queue, p_node);
  1463. /*
  1464. * Sort strictly based on sector. Smallest to the left,
  1465. * largest to the right.
  1466. */
  1467. if (sector > blk_rq_pos(cfqq->next_rq))
  1468. n = &(*p)->rb_right;
  1469. else if (sector < blk_rq_pos(cfqq->next_rq))
  1470. n = &(*p)->rb_left;
  1471. else
  1472. break;
  1473. p = n;
  1474. cfqq = NULL;
  1475. }
  1476. *ret_parent = parent;
  1477. if (rb_link)
  1478. *rb_link = p;
  1479. return cfqq;
  1480. }
  1481. static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1482. {
  1483. struct rb_node **p, *parent;
  1484. struct cfq_queue *__cfqq;
  1485. if (cfqq->p_root) {
  1486. rb_erase(&cfqq->p_node, cfqq->p_root);
  1487. cfqq->p_root = NULL;
  1488. }
  1489. if (cfq_class_idle(cfqq))
  1490. return;
  1491. if (!cfqq->next_rq)
  1492. return;
  1493. cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
  1494. __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
  1495. blk_rq_pos(cfqq->next_rq), &parent, &p);
  1496. if (!__cfqq) {
  1497. rb_link_node(&cfqq->p_node, parent, p);
  1498. rb_insert_color(&cfqq->p_node, cfqq->p_root);
  1499. } else
  1500. cfqq->p_root = NULL;
  1501. }
  1502. /*
  1503. * Update cfqq's position in the service tree.
  1504. */
  1505. static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1506. {
  1507. /*
  1508. * Resorting requires the cfqq to be on the RR list already.
  1509. */
  1510. if (cfq_cfqq_on_rr(cfqq)) {
  1511. cfq_service_tree_add(cfqd, cfqq, 0);
  1512. cfq_prio_tree_add(cfqd, cfqq);
  1513. }
  1514. }
  1515. /*
  1516. * add to busy list of queues for service, trying to be fair in ordering
  1517. * the pending list according to last request service
  1518. */
  1519. static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1520. {
  1521. cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
  1522. BUG_ON(cfq_cfqq_on_rr(cfqq));
  1523. cfq_mark_cfqq_on_rr(cfqq);
  1524. cfqd->busy_queues++;
  1525. if (cfq_cfqq_sync(cfqq))
  1526. cfqd->busy_sync_queues++;
  1527. cfq_resort_rr_list(cfqd, cfqq);
  1528. }
  1529. /*
  1530. * Called when the cfqq no longer has requests pending, remove it from
  1531. * the service tree.
  1532. */
  1533. static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1534. {
  1535. cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
  1536. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  1537. cfq_clear_cfqq_on_rr(cfqq);
  1538. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  1539. cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  1540. cfqq->service_tree = NULL;
  1541. }
  1542. if (cfqq->p_root) {
  1543. rb_erase(&cfqq->p_node, cfqq->p_root);
  1544. cfqq->p_root = NULL;
  1545. }
  1546. cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
  1547. BUG_ON(!cfqd->busy_queues);
  1548. cfqd->busy_queues--;
  1549. if (cfq_cfqq_sync(cfqq))
  1550. cfqd->busy_sync_queues--;
  1551. }
  1552. /*
  1553. * rb tree support functions
  1554. */
  1555. static void cfq_del_rq_rb(struct request *rq)
  1556. {
  1557. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1558. const int sync = rq_is_sync(rq);
  1559. BUG_ON(!cfqq->queued[sync]);
  1560. cfqq->queued[sync]--;
  1561. elv_rb_del(&cfqq->sort_list, rq);
  1562. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
  1563. /*
  1564. * Queue will be deleted from service tree when we actually
  1565. * expire it later. Right now just remove it from prio tree
  1566. * as it is empty.
  1567. */
  1568. if (cfqq->p_root) {
  1569. rb_erase(&cfqq->p_node, cfqq->p_root);
  1570. cfqq->p_root = NULL;
  1571. }
  1572. }
  1573. }
  1574. static void cfq_add_rq_rb(struct request *rq)
  1575. {
  1576. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1577. struct cfq_data *cfqd = cfqq->cfqd;
  1578. struct request *prev;
  1579. cfqq->queued[rq_is_sync(rq)]++;
  1580. elv_rb_add(&cfqq->sort_list, rq);
  1581. if (!cfq_cfqq_on_rr(cfqq))
  1582. cfq_add_cfqq_rr(cfqd, cfqq);
  1583. /*
  1584. * check if this request is a better next-serve candidate
  1585. */
  1586. prev = cfqq->next_rq;
  1587. cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
  1588. /*
  1589. * adjust priority tree position, if ->next_rq changes
  1590. */
  1591. if (prev != cfqq->next_rq)
  1592. cfq_prio_tree_add(cfqd, cfqq);
  1593. BUG_ON(!cfqq->next_rq);
  1594. }
  1595. static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
  1596. {
  1597. elv_rb_del(&cfqq->sort_list, rq);
  1598. cfqq->queued[rq_is_sync(rq)]--;
  1599. cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
  1600. cfq_add_rq_rb(rq);
  1601. cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
  1602. rq->cmd_flags);
  1603. }
  1604. static struct request *
  1605. cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
  1606. {
  1607. struct task_struct *tsk = current;
  1608. struct cfq_io_cq *cic;
  1609. struct cfq_queue *cfqq;
  1610. cic = cfq_cic_lookup(cfqd, tsk->io_context);
  1611. if (!cic)
  1612. return NULL;
  1613. cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
  1614. if (cfqq) {
  1615. sector_t sector = bio->bi_sector + bio_sectors(bio);
  1616. return elv_rb_find(&cfqq->sort_list, sector);
  1617. }
  1618. return NULL;
  1619. }
  1620. static void cfq_activate_request(struct request_queue *q, struct request *rq)
  1621. {
  1622. struct cfq_data *cfqd = q->elevator->elevator_data;
  1623. cfqd->rq_in_driver++;
  1624. cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
  1625. cfqd->rq_in_driver);
  1626. cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
  1627. }
  1628. static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
  1629. {
  1630. struct cfq_data *cfqd = q->elevator->elevator_data;
  1631. WARN_ON(!cfqd->rq_in_driver);
  1632. cfqd->rq_in_driver--;
  1633. cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
  1634. cfqd->rq_in_driver);
  1635. }
  1636. static void cfq_remove_request(struct request *rq)
  1637. {
  1638. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1639. if (cfqq->next_rq == rq)
  1640. cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
  1641. list_del_init(&rq->queuelist);
  1642. cfq_del_rq_rb(rq);
  1643. cfqq->cfqd->rq_queued--;
  1644. cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
  1645. if (rq->cmd_flags & REQ_PRIO) {
  1646. WARN_ON(!cfqq->prio_pending);
  1647. cfqq->prio_pending--;
  1648. }
  1649. }
  1650. static int cfq_merge(struct request_queue *q, struct request **req,
  1651. struct bio *bio)
  1652. {
  1653. struct cfq_data *cfqd = q->elevator->elevator_data;
  1654. struct request *__rq;
  1655. __rq = cfq_find_rq_fmerge(cfqd, bio);
  1656. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  1657. *req = __rq;
  1658. return ELEVATOR_FRONT_MERGE;
  1659. }
  1660. return ELEVATOR_NO_MERGE;
  1661. }
  1662. static void cfq_merged_request(struct request_queue *q, struct request *req,
  1663. int type)
  1664. {
  1665. if (type == ELEVATOR_FRONT_MERGE) {
  1666. struct cfq_queue *cfqq = RQ_CFQQ(req);
  1667. cfq_reposition_rq_rb(cfqq, req);
  1668. }
  1669. }
  1670. static void cfq_bio_merged(struct request_queue *q, struct request *req,
  1671. struct bio *bio)
  1672. {
  1673. cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
  1674. }
  1675. static void
  1676. cfq_merged_requests(struct request_queue *q, struct request *rq,
  1677. struct request *next)
  1678. {
  1679. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1680. struct cfq_data *cfqd = q->elevator->elevator_data;
  1681. /*
  1682. * reposition in fifo if next is older than rq
  1683. */
  1684. if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  1685. time_before(rq_fifo_time(next), rq_fifo_time(rq)) &&
  1686. cfqq == RQ_CFQQ(next)) {
  1687. list_move(&rq->queuelist, &next->queuelist);
  1688. rq_set_fifo_time(rq, rq_fifo_time(next));
  1689. }
  1690. if (cfqq->next_rq == next)
  1691. cfqq->next_rq = rq;
  1692. cfq_remove_request(next);
  1693. cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
  1694. cfqq = RQ_CFQQ(next);
  1695. /*
  1696. * all requests of this queue are merged to other queues, delete it
  1697. * from the service tree. If it's the active_queue,
  1698. * cfq_dispatch_requests() will choose to expire it or do idle
  1699. */
  1700. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
  1701. cfqq != cfqd->active_queue)
  1702. cfq_del_cfqq_rr(cfqd, cfqq);
  1703. }
  1704. static int cfq_allow_merge(struct request_queue *q, struct request *rq,
  1705. struct bio *bio)
  1706. {
  1707. struct cfq_data *cfqd = q->elevator->elevator_data;
  1708. struct cfq_io_cq *cic;
  1709. struct cfq_queue *cfqq;
  1710. /*
  1711. * Disallow merge of a sync bio into an async request.
  1712. */
  1713. if (cfq_bio_sync(bio) && !rq_is_sync(rq))
  1714. return false;
  1715. /*
  1716. * Lookup the cfqq that this bio will be queued with and allow
  1717. * merge only if rq is queued there.
  1718. */
  1719. cic = cfq_cic_lookup(cfqd, current->io_context);
  1720. if (!cic)
  1721. return false;
  1722. cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
  1723. return cfqq == RQ_CFQQ(rq);
  1724. }
  1725. static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1726. {
  1727. del_timer(&cfqd->idle_slice_timer);
  1728. cfqg_stats_update_idle_time(cfqq->cfqg);
  1729. }
  1730. static void __cfq_set_active_queue(struct cfq_data *cfqd,
  1731. struct cfq_queue *cfqq)
  1732. {
  1733. if (cfqq) {
  1734. cfq_log_cfqq(cfqd, cfqq, "set_active wl_class:%d wl_type:%d",
  1735. cfqd->serving_wl_class, cfqd->serving_wl_type);
  1736. cfqg_stats_update_avg_queue_size(cfqq->cfqg);
  1737. cfqq->slice_start = 0;
  1738. cfqq->dispatch_start = jiffies;
  1739. cfqq->allocated_slice = 0;
  1740. cfqq->slice_end = 0;
  1741. cfqq->slice_dispatch = 0;
  1742. cfqq->nr_sectors = 0;
  1743. cfq_clear_cfqq_wait_request(cfqq);
  1744. cfq_clear_cfqq_must_dispatch(cfqq);
  1745. cfq_clear_cfqq_must_alloc_slice(cfqq);
  1746. cfq_clear_cfqq_fifo_expire(cfqq);
  1747. cfq_mark_cfqq_slice_new(cfqq);
  1748. cfq_del_timer(cfqd, cfqq);
  1749. }
  1750. cfqd->active_queue = cfqq;
  1751. }
  1752. /*
  1753. * current cfqq expired its slice (or was too idle), select new one
  1754. */
  1755. static void
  1756. __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1757. bool timed_out)
  1758. {
  1759. cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
  1760. if (cfq_cfqq_wait_request(cfqq))
  1761. cfq_del_timer(cfqd, cfqq);
  1762. cfq_clear_cfqq_wait_request(cfqq);
  1763. cfq_clear_cfqq_wait_busy(cfqq);
  1764. /*
  1765. * If this cfqq is shared between multiple processes, check to
  1766. * make sure that those processes are still issuing I/Os within
  1767. * the mean seek distance. If not, it may be time to break the
  1768. * queues apart again.
  1769. */
  1770. if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
  1771. cfq_mark_cfqq_split_coop(cfqq);
  1772. /*
  1773. * store what was left of this slice, if the queue idled/timed out
  1774. */
  1775. if (timed_out) {
  1776. if (cfq_cfqq_slice_new(cfqq))
  1777. cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
  1778. else
  1779. cfqq->slice_resid = cfqq->slice_end - jiffies;
  1780. cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
  1781. }
  1782. cfq_group_served(cfqd, cfqq->cfqg, cfqq);
  1783. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  1784. cfq_del_cfqq_rr(cfqd, cfqq);
  1785. cfq_resort_rr_list(cfqd, cfqq);
  1786. if (cfqq == cfqd->active_queue)
  1787. cfqd->active_queue = NULL;
  1788. if (cfqd->active_cic) {
  1789. put_io_context(cfqd->active_cic->icq.ioc);
  1790. cfqd->active_cic = NULL;
  1791. }
  1792. }
  1793. static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
  1794. {
  1795. struct cfq_queue *cfqq = cfqd->active_queue;
  1796. if (cfqq)
  1797. __cfq_slice_expired(cfqd, cfqq, timed_out);
  1798. }
  1799. /*
  1800. * Get next queue for service. Unless we have a queue preemption,
  1801. * we'll simply select the first cfqq in the service tree.
  1802. */
  1803. static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
  1804. {
  1805. struct cfq_rb_root *st = st_for(cfqd->serving_group,
  1806. cfqd->serving_wl_class, cfqd->serving_wl_type);
  1807. if (!cfqd->rq_queued)
  1808. return NULL;
  1809. /* There is nothing to dispatch */
  1810. if (!st)
  1811. return NULL;
  1812. if (RB_EMPTY_ROOT(&st->rb))
  1813. return NULL;
  1814. return cfq_rb_first(st);
  1815. }
  1816. static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
  1817. {
  1818. struct cfq_group *cfqg;
  1819. struct cfq_queue *cfqq;
  1820. int i, j;
  1821. struct cfq_rb_root *st;
  1822. if (!cfqd->rq_queued)
  1823. return NULL;
  1824. cfqg = cfq_get_next_cfqg(cfqd);
  1825. if (!cfqg)
  1826. return NULL;
  1827. for_each_cfqg_st(cfqg, i, j, st)
  1828. if ((cfqq = cfq_rb_first(st)) != NULL)
  1829. return cfqq;
  1830. return NULL;
  1831. }
  1832. /*
  1833. * Get and set a new active queue for service.
  1834. */
  1835. static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
  1836. struct cfq_queue *cfqq)
  1837. {
  1838. if (!cfqq)
  1839. cfqq = cfq_get_next_queue(cfqd);
  1840. __cfq_set_active_queue(cfqd, cfqq);
  1841. return cfqq;
  1842. }
  1843. static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
  1844. struct request *rq)
  1845. {
  1846. if (blk_rq_pos(rq) >= cfqd->last_position)
  1847. return blk_rq_pos(rq) - cfqd->last_position;
  1848. else
  1849. return cfqd->last_position - blk_rq_pos(rq);
  1850. }
  1851. static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1852. struct request *rq)
  1853. {
  1854. return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
  1855. }
  1856. static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  1857. struct cfq_queue *cur_cfqq)
  1858. {
  1859. struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
  1860. struct rb_node *parent, *node;
  1861. struct cfq_queue *__cfqq;
  1862. sector_t sector = cfqd->last_position;
  1863. if (RB_EMPTY_ROOT(root))
  1864. return NULL;
  1865. /*
  1866. * First, if we find a request starting at the end of the last
  1867. * request, choose it.
  1868. */
  1869. __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
  1870. if (__cfqq)
  1871. return __cfqq;
  1872. /*
  1873. * If the exact sector wasn't found, the parent of the NULL leaf
  1874. * will contain the closest sector.
  1875. */
  1876. __cfqq = rb_entry(parent, struct cfq_queue, p_node);
  1877. if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
  1878. return __cfqq;
  1879. if (blk_rq_pos(__cfqq->next_rq) < sector)
  1880. node = rb_next(&__cfqq->p_node);
  1881. else
  1882. node = rb_prev(&__cfqq->p_node);
  1883. if (!node)
  1884. return NULL;
  1885. __cfqq = rb_entry(node, struct cfq_queue, p_node);
  1886. if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
  1887. return __cfqq;
  1888. return NULL;
  1889. }
  1890. /*
  1891. * cfqd - obvious
  1892. * cur_cfqq - passed in so that we don't decide that the current queue is
  1893. * closely cooperating with itself.
  1894. *
  1895. * So, basically we're assuming that that cur_cfqq has dispatched at least
  1896. * one request, and that cfqd->last_position reflects a position on the disk
  1897. * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
  1898. * assumption.
  1899. */
  1900. static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
  1901. struct cfq_queue *cur_cfqq)
  1902. {
  1903. struct cfq_queue *cfqq;
  1904. if (cfq_class_idle(cur_cfqq))
  1905. return NULL;
  1906. if (!cfq_cfqq_sync(cur_cfqq))
  1907. return NULL;
  1908. if (CFQQ_SEEKY(cur_cfqq))
  1909. return NULL;
  1910. /*
  1911. * Don't search priority tree if it's the only queue in the group.
  1912. */
  1913. if (cur_cfqq->cfqg->nr_cfqq == 1)
  1914. return NULL;
  1915. /*
  1916. * We should notice if some of the queues are cooperating, eg
  1917. * working closely on the same area of the disk. In that case,
  1918. * we can group them together and don't waste time idling.
  1919. */
  1920. cfqq = cfqq_close(cfqd, cur_cfqq);
  1921. if (!cfqq)
  1922. return NULL;
  1923. /* If new queue belongs to different cfq_group, don't choose it */
  1924. if (cur_cfqq->cfqg != cfqq->cfqg)
  1925. return NULL;
  1926. /*
  1927. * It only makes sense to merge sync queues.
  1928. */
  1929. if (!cfq_cfqq_sync(cfqq))
  1930. return NULL;
  1931. if (CFQQ_SEEKY(cfqq))
  1932. return NULL;
  1933. /*
  1934. * Do not merge queues of different priority classes
  1935. */
  1936. if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
  1937. return NULL;
  1938. return cfqq;
  1939. }
  1940. /*
  1941. * Determine whether we should enforce idle window for this queue.
  1942. */
  1943. static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1944. {
  1945. enum wl_class_t wl_class = cfqq_class(cfqq);
  1946. struct cfq_rb_root *st = cfqq->service_tree;
  1947. BUG_ON(!st);
  1948. BUG_ON(!st->count);
  1949. if (!cfqd->cfq_slice_idle)
  1950. return false;
  1951. /* We never do for idle class queues. */
  1952. if (wl_class == IDLE_WORKLOAD)
  1953. return false;
  1954. /* We do for queues that were marked with idle window flag. */
  1955. if (cfq_cfqq_idle_window(cfqq) &&
  1956. !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
  1957. return true;
  1958. /*
  1959. * Otherwise, we do only if they are the last ones
  1960. * in their service tree.
  1961. */
  1962. if (st->count == 1 && cfq_cfqq_sync(cfqq) &&
  1963. !cfq_io_thinktime_big(cfqd, &st->ttime, false))
  1964. return true;
  1965. cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d", st->count);
  1966. return false;
  1967. }
  1968. static void cfq_arm_slice_timer(struct cfq_data *cfqd)
  1969. {
  1970. struct cfq_queue *cfqq = cfqd->active_queue;
  1971. struct cfq_io_cq *cic;
  1972. unsigned long sl, group_idle = 0;
  1973. /*
  1974. * SSD device without seek penalty, disable idling. But only do so
  1975. * for devices that support queuing, otherwise we still have a problem
  1976. * with sync vs async workloads.
  1977. */
  1978. if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
  1979. return;
  1980. WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
  1981. WARN_ON(cfq_cfqq_slice_new(cfqq));
  1982. /*
  1983. * idle is disabled, either manually or by past process history
  1984. */
  1985. if (!cfq_should_idle(cfqd, cfqq)) {
  1986. /* no queue idling. Check for group idling */
  1987. if (cfqd->cfq_group_idle)
  1988. group_idle = cfqd->cfq_group_idle;
  1989. else
  1990. return;
  1991. }
  1992. /*
  1993. * still active requests from this queue, don't idle
  1994. */
  1995. if (cfqq->dispatched)
  1996. return;
  1997. /*
  1998. * task has exited, don't wait
  1999. */
  2000. cic = cfqd->active_cic;
  2001. if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
  2002. return;
  2003. /*
  2004. * If our average think time is larger than the remaining time
  2005. * slice, then don't idle. This avoids overrunning the allotted
  2006. * time slice.
  2007. */
  2008. if (sample_valid(cic->ttime.ttime_samples) &&
  2009. (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
  2010. cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
  2011. cic->ttime.ttime_mean);
  2012. return;
  2013. }
  2014. /* There are other queues in the group, don't do group idle */
  2015. if (group_idle && cfqq->cfqg->nr_cfqq > 1)
  2016. return;
  2017. cfq_mark_cfqq_wait_request(cfqq);
  2018. if (group_idle)
  2019. sl = cfqd->cfq_group_idle;
  2020. else
  2021. sl = cfqd->cfq_slice_idle;
  2022. mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
  2023. cfqg_stats_set_start_idle_time(cfqq->cfqg);
  2024. cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
  2025. group_idle ? 1 : 0);
  2026. }
  2027. /*
  2028. * Move request from internal lists to the request queue dispatch list.
  2029. */
  2030. static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
  2031. {
  2032. struct cfq_data *cfqd = q->elevator->elevator_data;
  2033. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2034. cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
  2035. cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
  2036. cfq_remove_request(rq);
  2037. cfqq->dispatched++;
  2038. (RQ_CFQG(rq))->dispatched++;
  2039. elv_dispatch_sort(q, rq);
  2040. cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
  2041. cfqq->nr_sectors += blk_rq_sectors(rq);
  2042. cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
  2043. }
  2044. /*
  2045. * return expired entry, or NULL to just start from scratch in rbtree
  2046. */
  2047. static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
  2048. {
  2049. struct request *rq = NULL;
  2050. if (cfq_cfqq_fifo_expire(cfqq))
  2051. return NULL;
  2052. cfq_mark_cfqq_fifo_expire(cfqq);
  2053. if (list_empty(&cfqq->fifo))
  2054. return NULL;
  2055. rq = rq_entry_fifo(cfqq->fifo.next);
  2056. if (time_before(jiffies, rq_fifo_time(rq)))
  2057. rq = NULL;
  2058. cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
  2059. return rq;
  2060. }
  2061. static inline int
  2062. cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2063. {
  2064. const int base_rq = cfqd->cfq_slice_async_rq;
  2065. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  2066. return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
  2067. }
  2068. /*
  2069. * Must be called with the queue_lock held.
  2070. */
  2071. static int cfqq_process_refs(struct cfq_queue *cfqq)
  2072. {
  2073. int process_refs, io_refs;
  2074. io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
  2075. process_refs = cfqq->ref - io_refs;
  2076. BUG_ON(process_refs < 0);
  2077. return process_refs;
  2078. }
  2079. static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
  2080. {
  2081. int process_refs, new_process_refs;
  2082. struct cfq_queue *__cfqq;
  2083. /*
  2084. * If there are no process references on the new_cfqq, then it is
  2085. * unsafe to follow the ->new_cfqq chain as other cfqq's in the
  2086. * chain may have dropped their last reference (not just their
  2087. * last process reference).
  2088. */
  2089. if (!cfqq_process_refs(new_cfqq))
  2090. return;
  2091. /* Avoid a circular list and skip interim queue merges */
  2092. while ((__cfqq = new_cfqq->new_cfqq)) {
  2093. if (__cfqq == cfqq)
  2094. return;
  2095. new_cfqq = __cfqq;
  2096. }
  2097. process_refs = cfqq_process_refs(cfqq);
  2098. new_process_refs = cfqq_process_refs(new_cfqq);
  2099. /*
  2100. * If the process for the cfqq has gone away, there is no
  2101. * sense in merging the queues.
  2102. */
  2103. if (process_refs == 0 || new_process_refs == 0)
  2104. return;
  2105. /*
  2106. * Merge in the direction of the lesser amount of work.
  2107. */
  2108. if (new_process_refs >= process_refs) {
  2109. cfqq->new_cfqq = new_cfqq;
  2110. new_cfqq->ref += process_refs;
  2111. } else {
  2112. new_cfqq->new_cfqq = cfqq;
  2113. cfqq->ref += new_process_refs;
  2114. }
  2115. }
  2116. static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
  2117. struct cfq_group *cfqg, enum wl_class_t wl_class)
  2118. {
  2119. struct cfq_queue *queue;
  2120. int i;
  2121. bool key_valid = false;
  2122. unsigned long lowest_key = 0;
  2123. enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
  2124. for (i = 0; i <= SYNC_WORKLOAD; ++i) {
  2125. /* select the one with lowest rb_key */
  2126. queue = cfq_rb_first(st_for(cfqg, wl_class, i));
  2127. if (queue &&
  2128. (!key_valid || time_before(queue->rb_key, lowest_key))) {
  2129. lowest_key = queue->rb_key;
  2130. cur_best = i;
  2131. key_valid = true;
  2132. }
  2133. }
  2134. return cur_best;
  2135. }
  2136. static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
  2137. {
  2138. unsigned slice;
  2139. unsigned count;
  2140. struct cfq_rb_root *st;
  2141. unsigned group_slice;
  2142. enum wl_class_t original_class = cfqd->serving_wl_class;
  2143. /* Choose next priority. RT > BE > IDLE */
  2144. if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
  2145. cfqd->serving_wl_class = RT_WORKLOAD;
  2146. else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
  2147. cfqd->serving_wl_class = BE_WORKLOAD;
  2148. else {
  2149. cfqd->serving_wl_class = IDLE_WORKLOAD;
  2150. cfqd->workload_expires = jiffies + 1;
  2151. return;
  2152. }
  2153. if (original_class != cfqd->serving_wl_class)
  2154. goto new_workload;
  2155. /*
  2156. * For RT and BE, we have to choose also the type
  2157. * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
  2158. * expiration time
  2159. */
  2160. st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
  2161. count = st->count;
  2162. /*
  2163. * check workload expiration, and that we still have other queues ready
  2164. */
  2165. if (count && !time_after(jiffies, cfqd->workload_expires))
  2166. return;
  2167. new_workload:
  2168. /* otherwise select new workload type */
  2169. cfqd->serving_wl_type = cfq_choose_wl(cfqd, cfqg,
  2170. cfqd->serving_wl_class);
  2171. st = st_for(cfqg, cfqd->serving_wl_class, cfqd->serving_wl_type);
  2172. count = st->count;
  2173. /*
  2174. * the workload slice is computed as a fraction of target latency
  2175. * proportional to the number of queues in that workload, over
  2176. * all the queues in the same priority class
  2177. */
  2178. group_slice = cfq_group_slice(cfqd, cfqg);
  2179. slice = group_slice * count /
  2180. max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_wl_class],
  2181. cfq_group_busy_queues_wl(cfqd->serving_wl_class, cfqd,
  2182. cfqg));
  2183. if (cfqd->serving_wl_type == ASYNC_WORKLOAD) {
  2184. unsigned int tmp;
  2185. /*
  2186. * Async queues are currently system wide. Just taking
  2187. * proportion of queues with-in same group will lead to higher
  2188. * async ratio system wide as generally root group is going
  2189. * to have higher weight. A more accurate thing would be to
  2190. * calculate system wide asnc/sync ratio.
  2191. */
  2192. tmp = cfqd->cfq_target_latency *
  2193. cfqg_busy_async_queues(cfqd, cfqg);
  2194. tmp = tmp/cfqd->busy_queues;
  2195. slice = min_t(unsigned, slice, tmp);
  2196. /* async workload slice is scaled down according to
  2197. * the sync/async slice ratio. */
  2198. slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
  2199. } else
  2200. /* sync workload slice is at least 2 * cfq_slice_idle */
  2201. slice = max(slice, 2 * cfqd->cfq_slice_idle);
  2202. slice = max_t(unsigned, slice, CFQ_MIN_TT);
  2203. cfq_log(cfqd, "workload slice:%d", slice);
  2204. cfqd->workload_expires = jiffies + slice;
  2205. }
  2206. static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
  2207. {
  2208. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  2209. struct cfq_group *cfqg;
  2210. if (RB_EMPTY_ROOT(&st->rb))
  2211. return NULL;
  2212. cfqg = cfq_rb_first_group(st);
  2213. update_min_vdisktime(st);
  2214. return cfqg;
  2215. }
  2216. static void cfq_choose_cfqg(struct cfq_data *cfqd)
  2217. {
  2218. struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
  2219. cfqd->serving_group = cfqg;
  2220. /* Restore the workload type data */
  2221. if (cfqg->saved_wl_slice) {
  2222. cfqd->workload_expires = jiffies + cfqg->saved_wl_slice;
  2223. cfqd->serving_wl_type = cfqg->saved_wl_type;
  2224. cfqd->serving_wl_class = cfqg->saved_wl_class;
  2225. } else
  2226. cfqd->workload_expires = jiffies - 1;
  2227. choose_service_tree(cfqd, cfqg);
  2228. }
  2229. /*
  2230. * Select a queue for service. If we have a current active queue,
  2231. * check whether to continue servicing it, or retrieve and set a new one.
  2232. */
  2233. static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
  2234. {
  2235. struct cfq_queue *cfqq, *new_cfqq = NULL;
  2236. cfqq = cfqd->active_queue;
  2237. if (!cfqq)
  2238. goto new_queue;
  2239. if (!cfqd->rq_queued)
  2240. return NULL;
  2241. /*
  2242. * We were waiting for group to get backlogged. Expire the queue
  2243. */
  2244. if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
  2245. goto expire;
  2246. /*
  2247. * The active queue has run out of time, expire it and select new.
  2248. */
  2249. if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
  2250. /*
  2251. * If slice had not expired at the completion of last request
  2252. * we might not have turned on wait_busy flag. Don't expire
  2253. * the queue yet. Allow the group to get backlogged.
  2254. *
  2255. * The very fact that we have used the slice, that means we
  2256. * have been idling all along on this queue and it should be
  2257. * ok to wait for this request to complete.
  2258. */
  2259. if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
  2260. && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  2261. cfqq = NULL;
  2262. goto keep_queue;
  2263. } else
  2264. goto check_group_idle;
  2265. }
  2266. /*
  2267. * The active queue has requests and isn't expired, allow it to
  2268. * dispatch.
  2269. */
  2270. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  2271. goto keep_queue;
  2272. /*
  2273. * If another queue has a request waiting within our mean seek
  2274. * distance, let it run. The expire code will check for close
  2275. * cooperators and put the close queue at the front of the service
  2276. * tree. If possible, merge the expiring queue with the new cfqq.
  2277. */
  2278. new_cfqq = cfq_close_cooperator(cfqd, cfqq);
  2279. if (new_cfqq) {
  2280. if (!cfqq->new_cfqq)
  2281. cfq_setup_merge(cfqq, new_cfqq);
  2282. goto expire;
  2283. }
  2284. /*
  2285. * No requests pending. If the active queue still has requests in
  2286. * flight or is idling for a new request, allow either of these
  2287. * conditions to happen (or time out) before selecting a new queue.
  2288. */
  2289. if (timer_pending(&cfqd->idle_slice_timer)) {
  2290. cfqq = NULL;
  2291. goto keep_queue;
  2292. }
  2293. /*
  2294. * This is a deep seek queue, but the device is much faster than
  2295. * the queue can deliver, don't idle
  2296. **/
  2297. if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
  2298. (cfq_cfqq_slice_new(cfqq) ||
  2299. (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
  2300. cfq_clear_cfqq_deep(cfqq);
  2301. cfq_clear_cfqq_idle_window(cfqq);
  2302. }
  2303. if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  2304. cfqq = NULL;
  2305. goto keep_queue;
  2306. }
  2307. /*
  2308. * If group idle is enabled and there are requests dispatched from
  2309. * this group, wait for requests to complete.
  2310. */
  2311. check_group_idle:
  2312. if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
  2313. cfqq->cfqg->dispatched &&
  2314. !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
  2315. cfqq = NULL;
  2316. goto keep_queue;
  2317. }
  2318. expire:
  2319. cfq_slice_expired(cfqd, 0);
  2320. new_queue:
  2321. /*
  2322. * Current queue expired. Check if we have to switch to a new
  2323. * service tree
  2324. */
  2325. if (!new_cfqq)
  2326. cfq_choose_cfqg(cfqd);
  2327. cfqq = cfq_set_active_queue(cfqd, new_cfqq);
  2328. keep_queue:
  2329. return cfqq;
  2330. }
  2331. static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
  2332. {
  2333. int dispatched = 0;
  2334. while (cfqq->next_rq) {
  2335. cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  2336. dispatched++;
  2337. }
  2338. BUG_ON(!list_empty(&cfqq->fifo));
  2339. /* By default cfqq is not expired if it is empty. Do it explicitly */
  2340. __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
  2341. return dispatched;
  2342. }
  2343. /*
  2344. * Drain our current requests. Used for barriers and when switching
  2345. * io schedulers on-the-fly.
  2346. */
  2347. static int cfq_forced_dispatch(struct cfq_data *cfqd)
  2348. {
  2349. struct cfq_queue *cfqq;
  2350. int dispatched = 0;
  2351. /* Expire the timeslice of the current active queue first */
  2352. cfq_slice_expired(cfqd, 0);
  2353. while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
  2354. __cfq_set_active_queue(cfqd, cfqq);
  2355. dispatched += __cfq_forced_dispatch_cfqq(cfqq);
  2356. }
  2357. BUG_ON(cfqd->busy_queues);
  2358. cfq_log(cfqd, "forced_dispatch=%d", dispatched);
  2359. return dispatched;
  2360. }
  2361. static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
  2362. struct cfq_queue *cfqq)
  2363. {
  2364. /* the queue hasn't finished any request, can't estimate */
  2365. if (cfq_cfqq_slice_new(cfqq))
  2366. return true;
  2367. if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
  2368. cfqq->slice_end))
  2369. return true;
  2370. return false;
  2371. }
  2372. static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2373. {
  2374. unsigned int max_dispatch;
  2375. /*
  2376. * Drain async requests before we start sync IO
  2377. */
  2378. if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
  2379. return false;
  2380. /*
  2381. * If this is an async queue and we have sync IO in flight, let it wait
  2382. */
  2383. if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
  2384. return false;
  2385. max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
  2386. if (cfq_class_idle(cfqq))
  2387. max_dispatch = 1;
  2388. /*
  2389. * Does this cfqq already have too much IO in flight?
  2390. */
  2391. if (cfqq->dispatched >= max_dispatch) {
  2392. bool promote_sync = false;
  2393. /*
  2394. * idle queue must always only have a single IO in flight
  2395. */
  2396. if (cfq_class_idle(cfqq))
  2397. return false;
  2398. /*
  2399. * If there is only one sync queue
  2400. * we can ignore async queue here and give the sync
  2401. * queue no dispatch limit. The reason is a sync queue can
  2402. * preempt async queue, limiting the sync queue doesn't make
  2403. * sense. This is useful for aiostress test.
  2404. */
  2405. if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
  2406. promote_sync = true;
  2407. /*
  2408. * We have other queues, don't allow more IO from this one
  2409. */
  2410. if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
  2411. !promote_sync)
  2412. return false;
  2413. /*
  2414. * Sole queue user, no limit
  2415. */
  2416. if (cfqd->busy_queues == 1 || promote_sync)
  2417. max_dispatch = -1;
  2418. else
  2419. /*
  2420. * Normally we start throttling cfqq when cfq_quantum/2
  2421. * requests have been dispatched. But we can drive
  2422. * deeper queue depths at the beginning of slice
  2423. * subjected to upper limit of cfq_quantum.
  2424. * */
  2425. max_dispatch = cfqd->cfq_quantum;
  2426. }
  2427. /*
  2428. * Async queues must wait a bit before being allowed dispatch.
  2429. * We also ramp up the dispatch depth gradually for async IO,
  2430. * based on the last sync IO we serviced
  2431. */
  2432. if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
  2433. unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
  2434. unsigned int depth;
  2435. depth = last_sync / cfqd->cfq_slice[1];
  2436. if (!depth && !cfqq->dispatched)
  2437. depth = 1;
  2438. if (depth < max_dispatch)
  2439. max_dispatch = depth;
  2440. }
  2441. /*
  2442. * If we're below the current max, allow a dispatch
  2443. */
  2444. return cfqq->dispatched < max_dispatch;
  2445. }
  2446. /*
  2447. * Dispatch a request from cfqq, moving them to the request queue
  2448. * dispatch list.
  2449. */
  2450. static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2451. {
  2452. struct request *rq;
  2453. BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  2454. if (!cfq_may_dispatch(cfqd, cfqq))
  2455. return false;
  2456. /*
  2457. * follow expired path, else get first next available
  2458. */
  2459. rq = cfq_check_fifo(cfqq);
  2460. if (!rq)
  2461. rq = cfqq->next_rq;
  2462. /*
  2463. * insert request into driver dispatch list
  2464. */
  2465. cfq_dispatch_insert(cfqd->queue, rq);
  2466. if (!cfqd->active_cic) {
  2467. struct cfq_io_cq *cic = RQ_CIC(rq);
  2468. atomic_long_inc(&cic->icq.ioc->refcount);
  2469. cfqd->active_cic = cic;
  2470. }
  2471. return true;
  2472. }
  2473. /*
  2474. * Find the cfqq that we need to service and move a request from that to the
  2475. * dispatch list
  2476. */
  2477. static int cfq_dispatch_requests(struct request_queue *q, int force)
  2478. {
  2479. struct cfq_data *cfqd = q->elevator->elevator_data;
  2480. struct cfq_queue *cfqq;
  2481. if (!cfqd->busy_queues)
  2482. return 0;
  2483. if (unlikely(force))
  2484. return cfq_forced_dispatch(cfqd);
  2485. cfqq = cfq_select_queue(cfqd);
  2486. if (!cfqq)
  2487. return 0;
  2488. /*
  2489. * Dispatch a request from this cfqq, if it is allowed
  2490. */
  2491. if (!cfq_dispatch_request(cfqd, cfqq))
  2492. return 0;
  2493. cfqq->slice_dispatch++;
  2494. cfq_clear_cfqq_must_dispatch(cfqq);
  2495. /*
  2496. * expire an async queue immediately if it has used up its slice. idle
  2497. * queue always expire after 1 dispatch round.
  2498. */
  2499. if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
  2500. cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  2501. cfq_class_idle(cfqq))) {
  2502. cfqq->slice_end = jiffies + 1;
  2503. cfq_slice_expired(cfqd, 0);
  2504. }
  2505. cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
  2506. return 1;
  2507. }
  2508. /*
  2509. * task holds one reference to the queue, dropped when task exits. each rq
  2510. * in-flight on this queue also holds a reference, dropped when rq is freed.
  2511. *
  2512. * Each cfq queue took a reference on the parent group. Drop it now.
  2513. * queue lock must be held here.
  2514. */
  2515. static void cfq_put_queue(struct cfq_queue *cfqq)
  2516. {
  2517. struct cfq_data *cfqd = cfqq->cfqd;
  2518. struct cfq_group *cfqg;
  2519. BUG_ON(cfqq->ref <= 0);
  2520. cfqq->ref--;
  2521. if (cfqq->ref)
  2522. return;
  2523. cfq_log_cfqq(cfqd, cfqq, "put_queue");
  2524. BUG_ON(rb_first(&cfqq->sort_list));
  2525. BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
  2526. cfqg = cfqq->cfqg;
  2527. if (unlikely(cfqd->active_queue == cfqq)) {
  2528. __cfq_slice_expired(cfqd, cfqq, 0);
  2529. cfq_schedule_dispatch(cfqd);
  2530. }
  2531. BUG_ON(cfq_cfqq_on_rr(cfqq));
  2532. kmem_cache_free(cfq_pool, cfqq);
  2533. cfqg_put(cfqg);
  2534. }
  2535. static void cfq_put_cooperator(struct cfq_queue *cfqq)
  2536. {
  2537. struct cfq_queue *__cfqq, *next;
  2538. /*
  2539. * If this queue was scheduled to merge with another queue, be
  2540. * sure to drop the reference taken on that queue (and others in
  2541. * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
  2542. */
  2543. __cfqq = cfqq->new_cfqq;
  2544. while (__cfqq) {
  2545. if (__cfqq == cfqq) {
  2546. WARN(1, "cfqq->new_cfqq loop detected\n");
  2547. break;
  2548. }
  2549. next = __cfqq->new_cfqq;
  2550. cfq_put_queue(__cfqq);
  2551. __cfqq = next;
  2552. }
  2553. }
  2554. static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2555. {
  2556. if (unlikely(cfqq == cfqd->active_queue)) {
  2557. __cfq_slice_expired(cfqd, cfqq, 0);
  2558. cfq_schedule_dispatch(cfqd);
  2559. }
  2560. cfq_put_cooperator(cfqq);
  2561. cfq_put_queue(cfqq);
  2562. }
  2563. static void cfq_init_icq(struct io_cq *icq)
  2564. {
  2565. struct cfq_io_cq *cic = icq_to_cic(icq);
  2566. cic->ttime.last_end_request = jiffies;
  2567. }
  2568. static void cfq_exit_icq(struct io_cq *icq)
  2569. {
  2570. struct cfq_io_cq *cic = icq_to_cic(icq);
  2571. struct cfq_data *cfqd = cic_to_cfqd(cic);
  2572. if (cic->cfqq[BLK_RW_ASYNC]) {
  2573. cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
  2574. cic->cfqq[BLK_RW_ASYNC] = NULL;
  2575. }
  2576. if (cic->cfqq[BLK_RW_SYNC]) {
  2577. cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
  2578. cic->cfqq[BLK_RW_SYNC] = NULL;
  2579. }
  2580. }
  2581. static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
  2582. {
  2583. struct task_struct *tsk = current;
  2584. int ioprio_class;
  2585. if (!cfq_cfqq_prio_changed(cfqq))
  2586. return;
  2587. ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
  2588. switch (ioprio_class) {
  2589. default:
  2590. printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
  2591. case IOPRIO_CLASS_NONE:
  2592. /*
  2593. * no prio set, inherit CPU scheduling settings
  2594. */
  2595. cfqq->ioprio = task_nice_ioprio(tsk);
  2596. cfqq->ioprio_class = task_nice_ioclass(tsk);
  2597. break;
  2598. case IOPRIO_CLASS_RT:
  2599. cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
  2600. cfqq->ioprio_class = IOPRIO_CLASS_RT;
  2601. break;
  2602. case IOPRIO_CLASS_BE:
  2603. cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
  2604. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  2605. break;
  2606. case IOPRIO_CLASS_IDLE:
  2607. cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  2608. cfqq->ioprio = 7;
  2609. cfq_clear_cfqq_idle_window(cfqq);
  2610. break;
  2611. }
  2612. /*
  2613. * keep track of original prio settings in case we have to temporarily
  2614. * elevate the priority of this queue
  2615. */
  2616. cfqq->org_ioprio = cfqq->ioprio;
  2617. cfq_clear_cfqq_prio_changed(cfqq);
  2618. }
  2619. static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
  2620. {
  2621. int ioprio = cic->icq.ioc->ioprio;
  2622. struct cfq_data *cfqd = cic_to_cfqd(cic);
  2623. struct cfq_queue *cfqq;
  2624. /*
  2625. * Check whether ioprio has changed. The condition may trigger
  2626. * spuriously on a newly created cic but there's no harm.
  2627. */
  2628. if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
  2629. return;
  2630. cfqq = cic->cfqq[BLK_RW_ASYNC];
  2631. if (cfqq) {
  2632. struct cfq_queue *new_cfqq;
  2633. new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
  2634. GFP_ATOMIC);
  2635. if (new_cfqq) {
  2636. cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
  2637. cfq_put_queue(cfqq);
  2638. }
  2639. }
  2640. cfqq = cic->cfqq[BLK_RW_SYNC];
  2641. if (cfqq)
  2642. cfq_mark_cfqq_prio_changed(cfqq);
  2643. cic->ioprio = ioprio;
  2644. }
  2645. static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2646. pid_t pid, bool is_sync)
  2647. {
  2648. RB_CLEAR_NODE(&cfqq->rb_node);
  2649. RB_CLEAR_NODE(&cfqq->p_node);
  2650. INIT_LIST_HEAD(&cfqq->fifo);
  2651. cfqq->ref = 0;
  2652. cfqq->cfqd = cfqd;
  2653. cfq_mark_cfqq_prio_changed(cfqq);
  2654. if (is_sync) {
  2655. if (!cfq_class_idle(cfqq))
  2656. cfq_mark_cfqq_idle_window(cfqq);
  2657. cfq_mark_cfqq_sync(cfqq);
  2658. }
  2659. cfqq->pid = pid;
  2660. }
  2661. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2662. static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
  2663. {
  2664. struct cfq_data *cfqd = cic_to_cfqd(cic);
  2665. struct cfq_queue *sync_cfqq;
  2666. uint64_t id;
  2667. rcu_read_lock();
  2668. id = bio_blkcg(bio)->id;
  2669. rcu_read_unlock();
  2670. /*
  2671. * Check whether blkcg has changed. The condition may trigger
  2672. * spuriously on a newly created cic but there's no harm.
  2673. */
  2674. if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
  2675. return;
  2676. sync_cfqq = cic_to_cfqq(cic, 1);
  2677. if (sync_cfqq) {
  2678. /*
  2679. * Drop reference to sync queue. A new sync queue will be
  2680. * assigned in new group upon arrival of a fresh request.
  2681. */
  2682. cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
  2683. cic_set_cfqq(cic, NULL, 1);
  2684. cfq_put_queue(sync_cfqq);
  2685. }
  2686. cic->blkcg_id = id;
  2687. }
  2688. #else
  2689. static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
  2690. #endif /* CONFIG_CFQ_GROUP_IOSCHED */
  2691. static struct cfq_queue *
  2692. cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
  2693. struct bio *bio, gfp_t gfp_mask)
  2694. {
  2695. struct blkcg *blkcg;
  2696. struct cfq_queue *cfqq, *new_cfqq = NULL;
  2697. struct cfq_group *cfqg;
  2698. retry:
  2699. rcu_read_lock();
  2700. blkcg = bio_blkcg(bio);
  2701. cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
  2702. cfqq = cic_to_cfqq(cic, is_sync);
  2703. /*
  2704. * Always try a new alloc if we fell back to the OOM cfqq
  2705. * originally, since it should just be a temporary situation.
  2706. */
  2707. if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  2708. cfqq = NULL;
  2709. if (new_cfqq) {
  2710. cfqq = new_cfqq;
  2711. new_cfqq = NULL;
  2712. } else if (gfp_mask & __GFP_WAIT) {
  2713. rcu_read_unlock();
  2714. spin_unlock_irq(cfqd->queue->queue_lock);
  2715. new_cfqq = kmem_cache_alloc_node(cfq_pool,
  2716. gfp_mask | __GFP_ZERO,
  2717. cfqd->queue->node);
  2718. spin_lock_irq(cfqd->queue->queue_lock);
  2719. if (new_cfqq)
  2720. goto retry;
  2721. } else {
  2722. cfqq = kmem_cache_alloc_node(cfq_pool,
  2723. gfp_mask | __GFP_ZERO,
  2724. cfqd->queue->node);
  2725. }
  2726. if (cfqq) {
  2727. cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
  2728. cfq_init_prio_data(cfqq, cic);
  2729. cfq_link_cfqq_cfqg(cfqq, cfqg);
  2730. cfq_log_cfqq(cfqd, cfqq, "alloced");
  2731. } else
  2732. cfqq = &cfqd->oom_cfqq;
  2733. }
  2734. if (new_cfqq)
  2735. kmem_cache_free(cfq_pool, new_cfqq);
  2736. rcu_read_unlock();
  2737. return cfqq;
  2738. }
  2739. static struct cfq_queue **
  2740. cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
  2741. {
  2742. switch (ioprio_class) {
  2743. case IOPRIO_CLASS_RT:
  2744. return &cfqd->async_cfqq[0][ioprio];
  2745. case IOPRIO_CLASS_NONE:
  2746. ioprio = IOPRIO_NORM;
  2747. /* fall through */
  2748. case IOPRIO_CLASS_BE:
  2749. return &cfqd->async_cfqq[1][ioprio];
  2750. case IOPRIO_CLASS_IDLE:
  2751. return &cfqd->async_idle_cfqq;
  2752. default:
  2753. BUG();
  2754. }
  2755. }
  2756. static struct cfq_queue *
  2757. cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
  2758. struct bio *bio, gfp_t gfp_mask)
  2759. {
  2760. const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
  2761. const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
  2762. struct cfq_queue **async_cfqq = NULL;
  2763. struct cfq_queue *cfqq = NULL;
  2764. if (!is_sync) {
  2765. async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
  2766. cfqq = *async_cfqq;
  2767. }
  2768. if (!cfqq)
  2769. cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
  2770. /*
  2771. * pin the queue now that it's allocated, scheduler exit will prune it
  2772. */
  2773. if (!is_sync && !(*async_cfqq)) {
  2774. cfqq->ref++;
  2775. *async_cfqq = cfqq;
  2776. }
  2777. cfqq->ref++;
  2778. return cfqq;
  2779. }
  2780. static void
  2781. __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
  2782. {
  2783. unsigned long elapsed = jiffies - ttime->last_end_request;
  2784. elapsed = min(elapsed, 2UL * slice_idle);
  2785. ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
  2786. ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
  2787. ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
  2788. }
  2789. static void
  2790. cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2791. struct cfq_io_cq *cic)
  2792. {
  2793. if (cfq_cfqq_sync(cfqq)) {
  2794. __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
  2795. __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
  2796. cfqd->cfq_slice_idle);
  2797. }
  2798. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2799. __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
  2800. #endif
  2801. }
  2802. static void
  2803. cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2804. struct request *rq)
  2805. {
  2806. sector_t sdist = 0;
  2807. sector_t n_sec = blk_rq_sectors(rq);
  2808. if (cfqq->last_request_pos) {
  2809. if (cfqq->last_request_pos < blk_rq_pos(rq))
  2810. sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
  2811. else
  2812. sdist = cfqq->last_request_pos - blk_rq_pos(rq);
  2813. }
  2814. cfqq->seek_history <<= 1;
  2815. if (blk_queue_nonrot(cfqd->queue))
  2816. cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
  2817. else
  2818. cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
  2819. }
  2820. /*
  2821. * Disable idle window if the process thinks too long or seeks so much that
  2822. * it doesn't matter
  2823. */
  2824. static void
  2825. cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2826. struct cfq_io_cq *cic)
  2827. {
  2828. int old_idle, enable_idle;
  2829. /*
  2830. * Don't idle for async or idle io prio class
  2831. */
  2832. if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
  2833. return;
  2834. enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
  2835. if (cfqq->queued[0] + cfqq->queued[1] >= 4)
  2836. cfq_mark_cfqq_deep(cfqq);
  2837. if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
  2838. enable_idle = 0;
  2839. else if (!atomic_read(&cic->icq.ioc->active_ref) ||
  2840. !cfqd->cfq_slice_idle ||
  2841. (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
  2842. enable_idle = 0;
  2843. else if (sample_valid(cic->ttime.ttime_samples)) {
  2844. if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
  2845. enable_idle = 0;
  2846. else
  2847. enable_idle = 1;
  2848. }
  2849. if (old_idle != enable_idle) {
  2850. cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
  2851. if (enable_idle)
  2852. cfq_mark_cfqq_idle_window(cfqq);
  2853. else
  2854. cfq_clear_cfqq_idle_window(cfqq);
  2855. }
  2856. }
  2857. /*
  2858. * Check if new_cfqq should preempt the currently active queue. Return 0 for
  2859. * no or if we aren't sure, a 1 will cause a preempt.
  2860. */
  2861. static bool
  2862. cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  2863. struct request *rq)
  2864. {
  2865. struct cfq_queue *cfqq;
  2866. cfqq = cfqd->active_queue;
  2867. if (!cfqq)
  2868. return false;
  2869. if (cfq_class_idle(new_cfqq))
  2870. return false;
  2871. if (cfq_class_idle(cfqq))
  2872. return true;
  2873. /*
  2874. * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
  2875. */
  2876. if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
  2877. return false;
  2878. /*
  2879. * if the new request is sync, but the currently running queue is
  2880. * not, let the sync request have priority.
  2881. */
  2882. if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
  2883. return true;
  2884. if (new_cfqq->cfqg != cfqq->cfqg)
  2885. return false;
  2886. if (cfq_slice_used(cfqq))
  2887. return true;
  2888. /* Allow preemption only if we are idling on sync-noidle tree */
  2889. if (cfqd->serving_wl_type == SYNC_NOIDLE_WORKLOAD &&
  2890. cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
  2891. new_cfqq->service_tree->count == 2 &&
  2892. RB_EMPTY_ROOT(&cfqq->sort_list))
  2893. return true;
  2894. /*
  2895. * So both queues are sync. Let the new request get disk time if
  2896. * it's a metadata request and the current queue is doing regular IO.
  2897. */
  2898. if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
  2899. return true;
  2900. /*
  2901. * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
  2902. */
  2903. if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
  2904. return true;
  2905. /* An idle queue should not be idle now for some reason */
  2906. if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
  2907. return true;
  2908. if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
  2909. return false;
  2910. /*
  2911. * if this request is as-good as one we would expect from the
  2912. * current cfqq, let it preempt
  2913. */
  2914. if (cfq_rq_close(cfqd, cfqq, rq))
  2915. return true;
  2916. return false;
  2917. }
  2918. /*
  2919. * cfqq preempts the active queue. if we allowed preempt with no slice left,
  2920. * let it have half of its nominal slice.
  2921. */
  2922. static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2923. {
  2924. enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
  2925. cfq_log_cfqq(cfqd, cfqq, "preempt");
  2926. cfq_slice_expired(cfqd, 1);
  2927. /*
  2928. * workload type is changed, don't save slice, otherwise preempt
  2929. * doesn't happen
  2930. */
  2931. if (old_type != cfqq_type(cfqq))
  2932. cfqq->cfqg->saved_wl_slice = 0;
  2933. /*
  2934. * Put the new queue at the front of the of the current list,
  2935. * so we know that it will be selected next.
  2936. */
  2937. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  2938. cfq_service_tree_add(cfqd, cfqq, 1);
  2939. cfqq->slice_end = 0;
  2940. cfq_mark_cfqq_slice_new(cfqq);
  2941. }
  2942. /*
  2943. * Called when a new fs request (rq) is added (to cfqq). Check if there's
  2944. * something we should do about it
  2945. */
  2946. static void
  2947. cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2948. struct request *rq)
  2949. {
  2950. struct cfq_io_cq *cic = RQ_CIC(rq);
  2951. cfqd->rq_queued++;
  2952. if (rq->cmd_flags & REQ_PRIO)
  2953. cfqq->prio_pending++;
  2954. cfq_update_io_thinktime(cfqd, cfqq, cic);
  2955. cfq_update_io_seektime(cfqd, cfqq, rq);
  2956. cfq_update_idle_window(cfqd, cfqq, cic);
  2957. cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
  2958. if (cfqq == cfqd->active_queue) {
  2959. /*
  2960. * Remember that we saw a request from this process, but
  2961. * don't start queuing just yet. Otherwise we risk seeing lots
  2962. * of tiny requests, because we disrupt the normal plugging
  2963. * and merging. If the request is already larger than a single
  2964. * page, let it rip immediately. For that case we assume that
  2965. * merging is already done. Ditto for a busy system that
  2966. * has other work pending, don't risk delaying until the
  2967. * idle timer unplug to continue working.
  2968. */
  2969. if (cfq_cfqq_wait_request(cfqq)) {
  2970. if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
  2971. cfqd->busy_queues > 1) {
  2972. cfq_del_timer(cfqd, cfqq);
  2973. cfq_clear_cfqq_wait_request(cfqq);
  2974. __blk_run_queue(cfqd->queue);
  2975. } else {
  2976. cfqg_stats_update_idle_time(cfqq->cfqg);
  2977. cfq_mark_cfqq_must_dispatch(cfqq);
  2978. }
  2979. }
  2980. } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
  2981. /*
  2982. * not the active queue - expire current slice if it is
  2983. * idle and has expired it's mean thinktime or this new queue
  2984. * has some old slice time left and is of higher priority or
  2985. * this new queue is RT and the current one is BE
  2986. */
  2987. cfq_preempt_queue(cfqd, cfqq);
  2988. __blk_run_queue(cfqd->queue);
  2989. }
  2990. }
  2991. static void cfq_insert_request(struct request_queue *q, struct request *rq)
  2992. {
  2993. struct cfq_data *cfqd = q->elevator->elevator_data;
  2994. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2995. cfq_log_cfqq(cfqd, cfqq, "insert_request");
  2996. cfq_init_prio_data(cfqq, RQ_CIC(rq));
  2997. rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
  2998. list_add_tail(&rq->queuelist, &cfqq->fifo);
  2999. cfq_add_rq_rb(rq);
  3000. cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
  3001. rq->cmd_flags);
  3002. cfq_rq_enqueued(cfqd, cfqq, rq);
  3003. }
  3004. /*
  3005. * Update hw_tag based on peak queue depth over 50 samples under
  3006. * sufficient load.
  3007. */
  3008. static void cfq_update_hw_tag(struct cfq_data *cfqd)
  3009. {
  3010. struct cfq_queue *cfqq = cfqd->active_queue;
  3011. if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
  3012. cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
  3013. if (cfqd->hw_tag == 1)
  3014. return;
  3015. if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
  3016. cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
  3017. return;
  3018. /*
  3019. * If active queue hasn't enough requests and can idle, cfq might not
  3020. * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  3021. * case
  3022. */
  3023. if (cfqq && cfq_cfqq_idle_window(cfqq) &&
  3024. cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
  3025. CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
  3026. return;
  3027. if (cfqd->hw_tag_samples++ < 50)
  3028. return;
  3029. if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
  3030. cfqd->hw_tag = 1;
  3031. else
  3032. cfqd->hw_tag = 0;
  3033. }
  3034. static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  3035. {
  3036. struct cfq_io_cq *cic = cfqd->active_cic;
  3037. /* If the queue already has requests, don't wait */
  3038. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  3039. return false;
  3040. /* If there are other queues in the group, don't wait */
  3041. if (cfqq->cfqg->nr_cfqq > 1)
  3042. return false;
  3043. /* the only queue in the group, but think time is big */
  3044. if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
  3045. return false;
  3046. if (cfq_slice_used(cfqq))
  3047. return true;
  3048. /* if slice left is less than think time, wait busy */
  3049. if (cic && sample_valid(cic->ttime.ttime_samples)
  3050. && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
  3051. return true;
  3052. /*
  3053. * If think times is less than a jiffy than ttime_mean=0 and above
  3054. * will not be true. It might happen that slice has not expired yet
  3055. * but will expire soon (4-5 ns) during select_queue(). To cover the
  3056. * case where think time is less than a jiffy, mark the queue wait
  3057. * busy if only 1 jiffy is left in the slice.
  3058. */
  3059. if (cfqq->slice_end - jiffies == 1)
  3060. return true;
  3061. return false;
  3062. }
  3063. static void cfq_completed_request(struct request_queue *q, struct request *rq)
  3064. {
  3065. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  3066. struct cfq_data *cfqd = cfqq->cfqd;
  3067. const int sync = rq_is_sync(rq);
  3068. unsigned long now;
  3069. now = jiffies;
  3070. cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
  3071. !!(rq->cmd_flags & REQ_NOIDLE));
  3072. cfq_update_hw_tag(cfqd);
  3073. WARN_ON(!cfqd->rq_in_driver);
  3074. WARN_ON(!cfqq->dispatched);
  3075. cfqd->rq_in_driver--;
  3076. cfqq->dispatched--;
  3077. (RQ_CFQG(rq))->dispatched--;
  3078. cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
  3079. rq_io_start_time_ns(rq), rq->cmd_flags);
  3080. cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
  3081. if (sync) {
  3082. struct cfq_rb_root *st;
  3083. RQ_CIC(rq)->ttime.last_end_request = now;
  3084. if (cfq_cfqq_on_rr(cfqq))
  3085. st = cfqq->service_tree;
  3086. else
  3087. st = st_for(cfqq->cfqg, cfqq_class(cfqq),
  3088. cfqq_type(cfqq));
  3089. st->ttime.last_end_request = now;
  3090. if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
  3091. cfqd->last_delayed_sync = now;
  3092. }
  3093. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3094. cfqq->cfqg->ttime.last_end_request = now;
  3095. #endif
  3096. /*
  3097. * If this is the active queue, check if it needs to be expired,
  3098. * or if we want to idle in case it has no pending requests.
  3099. */
  3100. if (cfqd->active_queue == cfqq) {
  3101. const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
  3102. if (cfq_cfqq_slice_new(cfqq)) {
  3103. cfq_set_prio_slice(cfqd, cfqq);
  3104. cfq_clear_cfqq_slice_new(cfqq);
  3105. }
  3106. /*
  3107. * Should we wait for next request to come in before we expire
  3108. * the queue.
  3109. */
  3110. if (cfq_should_wait_busy(cfqd, cfqq)) {
  3111. unsigned long extend_sl = cfqd->cfq_slice_idle;
  3112. if (!cfqd->cfq_slice_idle)
  3113. extend_sl = cfqd->cfq_group_idle;
  3114. cfqq->slice_end = jiffies + extend_sl;
  3115. cfq_mark_cfqq_wait_busy(cfqq);
  3116. cfq_log_cfqq(cfqd, cfqq, "will busy wait");
  3117. }
  3118. /*
  3119. * Idling is not enabled on:
  3120. * - expired queues
  3121. * - idle-priority queues
  3122. * - async queues
  3123. * - queues with still some requests queued
  3124. * - when there is a close cooperator
  3125. */
  3126. if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
  3127. cfq_slice_expired(cfqd, 1);
  3128. else if (sync && cfqq_empty &&
  3129. !cfq_close_cooperator(cfqd, cfqq)) {
  3130. cfq_arm_slice_timer(cfqd);
  3131. }
  3132. }
  3133. if (!cfqd->rq_in_driver)
  3134. cfq_schedule_dispatch(cfqd);
  3135. }
  3136. static inline int __cfq_may_queue(struct cfq_queue *cfqq)
  3137. {
  3138. if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
  3139. cfq_mark_cfqq_must_alloc_slice(cfqq);
  3140. return ELV_MQUEUE_MUST;
  3141. }
  3142. return ELV_MQUEUE_MAY;
  3143. }
  3144. static int cfq_may_queue(struct request_queue *q, int rw)
  3145. {
  3146. struct cfq_data *cfqd = q->elevator->elevator_data;
  3147. struct task_struct *tsk = current;
  3148. struct cfq_io_cq *cic;
  3149. struct cfq_queue *cfqq;
  3150. /*
  3151. * don't force setup of a queue from here, as a call to may_queue
  3152. * does not necessarily imply that a request actually will be queued.
  3153. * so just lookup a possibly existing queue, or return 'may queue'
  3154. * if that fails
  3155. */
  3156. cic = cfq_cic_lookup(cfqd, tsk->io_context);
  3157. if (!cic)
  3158. return ELV_MQUEUE_MAY;
  3159. cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
  3160. if (cfqq) {
  3161. cfq_init_prio_data(cfqq, cic);
  3162. return __cfq_may_queue(cfqq);
  3163. }
  3164. return ELV_MQUEUE_MAY;
  3165. }
  3166. /*
  3167. * queue lock held here
  3168. */
  3169. static void cfq_put_request(struct request *rq)
  3170. {
  3171. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  3172. if (cfqq) {
  3173. const int rw = rq_data_dir(rq);
  3174. BUG_ON(!cfqq->allocated[rw]);
  3175. cfqq->allocated[rw]--;
  3176. /* Put down rq reference on cfqg */
  3177. cfqg_put(RQ_CFQG(rq));
  3178. rq->elv.priv[0] = NULL;
  3179. rq->elv.priv[1] = NULL;
  3180. cfq_put_queue(cfqq);
  3181. }
  3182. }
  3183. static struct cfq_queue *
  3184. cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
  3185. struct cfq_queue *cfqq)
  3186. {
  3187. cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
  3188. cic_set_cfqq(cic, cfqq->new_cfqq, 1);
  3189. cfq_mark_cfqq_coop(cfqq->new_cfqq);
  3190. cfq_put_queue(cfqq);
  3191. return cic_to_cfqq(cic, 1);
  3192. }
  3193. /*
  3194. * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
  3195. * was the last process referring to said cfqq.
  3196. */
  3197. static struct cfq_queue *
  3198. split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
  3199. {
  3200. if (cfqq_process_refs(cfqq) == 1) {
  3201. cfqq->pid = current->pid;
  3202. cfq_clear_cfqq_coop(cfqq);
  3203. cfq_clear_cfqq_split_coop(cfqq);
  3204. return cfqq;
  3205. }
  3206. cic_set_cfqq(cic, NULL, 1);
  3207. cfq_put_cooperator(cfqq);
  3208. cfq_put_queue(cfqq);
  3209. return NULL;
  3210. }
  3211. /*
  3212. * Allocate cfq data structures associated with this request.
  3213. */
  3214. static int
  3215. cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
  3216. gfp_t gfp_mask)
  3217. {
  3218. struct cfq_data *cfqd = q->elevator->elevator_data;
  3219. struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
  3220. const int rw = rq_data_dir(rq);
  3221. const bool is_sync = rq_is_sync(rq);
  3222. struct cfq_queue *cfqq;
  3223. might_sleep_if(gfp_mask & __GFP_WAIT);
  3224. spin_lock_irq(q->queue_lock);
  3225. check_ioprio_changed(cic, bio);
  3226. check_blkcg_changed(cic, bio);
  3227. new_queue:
  3228. cfqq = cic_to_cfqq(cic, is_sync);
  3229. if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  3230. cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
  3231. cic_set_cfqq(cic, cfqq, is_sync);
  3232. } else {
  3233. /*
  3234. * If the queue was seeky for too long, break it apart.
  3235. */
  3236. if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
  3237. cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
  3238. cfqq = split_cfqq(cic, cfqq);
  3239. if (!cfqq)
  3240. goto new_queue;
  3241. }
  3242. /*
  3243. * Check to see if this queue is scheduled to merge with
  3244. * another, closely cooperating queue. The merging of
  3245. * queues happens here as it must be done in process context.
  3246. * The reference on new_cfqq was taken in merge_cfqqs.
  3247. */
  3248. if (cfqq->new_cfqq)
  3249. cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
  3250. }
  3251. cfqq->allocated[rw]++;
  3252. cfqq->ref++;
  3253. cfqg_get(cfqq->cfqg);
  3254. rq->elv.priv[0] = cfqq;
  3255. rq->elv.priv[1] = cfqq->cfqg;
  3256. spin_unlock_irq(q->queue_lock);
  3257. return 0;
  3258. }
  3259. static void cfq_kick_queue(struct work_struct *work)
  3260. {
  3261. struct cfq_data *cfqd =
  3262. container_of(work, struct cfq_data, unplug_work);
  3263. struct request_queue *q = cfqd->queue;
  3264. spin_lock_irq(q->queue_lock);
  3265. __blk_run_queue(cfqd->queue);
  3266. spin_unlock_irq(q->queue_lock);
  3267. }
  3268. /*
  3269. * Timer running if the active_queue is currently idling inside its time slice
  3270. */
  3271. static void cfq_idle_slice_timer(unsigned long data)
  3272. {
  3273. struct cfq_data *cfqd = (struct cfq_data *) data;
  3274. struct cfq_queue *cfqq;
  3275. unsigned long flags;
  3276. int timed_out = 1;
  3277. cfq_log(cfqd, "idle timer fired");
  3278. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  3279. cfqq = cfqd->active_queue;
  3280. if (cfqq) {
  3281. timed_out = 0;
  3282. /*
  3283. * We saw a request before the queue expired, let it through
  3284. */
  3285. if (cfq_cfqq_must_dispatch(cfqq))
  3286. goto out_kick;
  3287. /*
  3288. * expired
  3289. */
  3290. if (cfq_slice_used(cfqq))
  3291. goto expire;
  3292. /*
  3293. * only expire and reinvoke request handler, if there are
  3294. * other queues with pending requests
  3295. */
  3296. if (!cfqd->busy_queues)
  3297. goto out_cont;
  3298. /*
  3299. * not expired and it has a request pending, let it dispatch
  3300. */
  3301. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  3302. goto out_kick;
  3303. /*
  3304. * Queue depth flag is reset only when the idle didn't succeed
  3305. */
  3306. cfq_clear_cfqq_deep(cfqq);
  3307. }
  3308. expire:
  3309. cfq_slice_expired(cfqd, timed_out);
  3310. out_kick:
  3311. cfq_schedule_dispatch(cfqd);
  3312. out_cont:
  3313. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  3314. }
  3315. static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  3316. {
  3317. del_timer_sync(&cfqd->idle_slice_timer);
  3318. cancel_work_sync(&cfqd->unplug_work);
  3319. }
  3320. static void cfq_put_async_queues(struct cfq_data *cfqd)
  3321. {
  3322. int i;
  3323. for (i = 0; i < IOPRIO_BE_NR; i++) {
  3324. if (cfqd->async_cfqq[0][i])
  3325. cfq_put_queue(cfqd->async_cfqq[0][i]);
  3326. if (cfqd->async_cfqq[1][i])
  3327. cfq_put_queue(cfqd->async_cfqq[1][i]);
  3328. }
  3329. if (cfqd->async_idle_cfqq)
  3330. cfq_put_queue(cfqd->async_idle_cfqq);
  3331. }
  3332. static void cfq_exit_queue(struct elevator_queue *e)
  3333. {
  3334. struct cfq_data *cfqd = e->elevator_data;
  3335. struct request_queue *q = cfqd->queue;
  3336. cfq_shutdown_timer_wq(cfqd);
  3337. spin_lock_irq(q->queue_lock);
  3338. if (cfqd->active_queue)
  3339. __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
  3340. cfq_put_async_queues(cfqd);
  3341. spin_unlock_irq(q->queue_lock);
  3342. cfq_shutdown_timer_wq(cfqd);
  3343. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3344. blkcg_deactivate_policy(q, &blkcg_policy_cfq);
  3345. #else
  3346. kfree(cfqd->root_group);
  3347. #endif
  3348. kfree(cfqd);
  3349. }
  3350. static int cfq_init_queue(struct request_queue *q)
  3351. {
  3352. struct cfq_data *cfqd;
  3353. struct blkcg_gq *blkg __maybe_unused;
  3354. int i, ret;
  3355. cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
  3356. if (!cfqd)
  3357. return -ENOMEM;
  3358. cfqd->queue = q;
  3359. q->elevator->elevator_data = cfqd;
  3360. /* Init root service tree */
  3361. cfqd->grp_service_tree = CFQ_RB_ROOT;
  3362. /* Init root group and prefer root group over other groups by default */
  3363. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3364. ret = blkcg_activate_policy(q, &blkcg_policy_cfq);
  3365. if (ret)
  3366. goto out_free;
  3367. cfqd->root_group = blkg_to_cfqg(q->root_blkg);
  3368. #else
  3369. ret = -ENOMEM;
  3370. cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
  3371. GFP_KERNEL, cfqd->queue->node);
  3372. if (!cfqd->root_group)
  3373. goto out_free;
  3374. cfq_init_cfqg_base(cfqd->root_group);
  3375. #endif
  3376. cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
  3377. /*
  3378. * Not strictly needed (since RB_ROOT just clears the node and we
  3379. * zeroed cfqd on alloc), but better be safe in case someone decides
  3380. * to add magic to the rb code
  3381. */
  3382. for (i = 0; i < CFQ_PRIO_LISTS; i++)
  3383. cfqd->prio_trees[i] = RB_ROOT;
  3384. /*
  3385. * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
  3386. * Grab a permanent reference to it, so that the normal code flow
  3387. * will not attempt to free it. oom_cfqq is linked to root_group
  3388. * but shouldn't hold a reference as it'll never be unlinked. Lose
  3389. * the reference from linking right away.
  3390. */
  3391. cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
  3392. cfqd->oom_cfqq.ref++;
  3393. spin_lock_irq(q->queue_lock);
  3394. cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
  3395. cfqg_put(cfqd->root_group);
  3396. spin_unlock_irq(q->queue_lock);
  3397. init_timer(&cfqd->idle_slice_timer);
  3398. cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  3399. cfqd->idle_slice_timer.data = (unsigned long) cfqd;
  3400. INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
  3401. cfqd->cfq_quantum = cfq_quantum;
  3402. cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  3403. cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
  3404. cfqd->cfq_back_max = cfq_back_max;
  3405. cfqd->cfq_back_penalty = cfq_back_penalty;
  3406. cfqd->cfq_slice[0] = cfq_slice_async;
  3407. cfqd->cfq_slice[1] = cfq_slice_sync;
  3408. cfqd->cfq_target_latency = cfq_target_latency;
  3409. cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  3410. cfqd->cfq_slice_idle = cfq_slice_idle;
  3411. cfqd->cfq_group_idle = cfq_group_idle;
  3412. cfqd->cfq_latency = 1;
  3413. cfqd->hw_tag = -1;
  3414. /*
  3415. * we optimistically start assuming sync ops weren't delayed in last
  3416. * second, in order to have larger depth for async operations.
  3417. */
  3418. cfqd->last_delayed_sync = jiffies - HZ;
  3419. return 0;
  3420. out_free:
  3421. kfree(cfqd);
  3422. return ret;
  3423. }
  3424. /*
  3425. * sysfs parts below -->
  3426. */
  3427. static ssize_t
  3428. cfq_var_show(unsigned int var, char *page)
  3429. {
  3430. return sprintf(page, "%d\n", var);
  3431. }
  3432. static ssize_t
  3433. cfq_var_store(unsigned int *var, const char *page, size_t count)
  3434. {
  3435. char *p = (char *) page;
  3436. *var = simple_strtoul(p, &p, 10);
  3437. return count;
  3438. }
  3439. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  3440. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  3441. { \
  3442. struct cfq_data *cfqd = e->elevator_data; \
  3443. unsigned int __data = __VAR; \
  3444. if (__CONV) \
  3445. __data = jiffies_to_msecs(__data); \
  3446. return cfq_var_show(__data, (page)); \
  3447. }
  3448. SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
  3449. SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  3450. SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
  3451. SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  3452. SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
  3453. SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
  3454. SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
  3455. SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  3456. SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  3457. SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
  3458. SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
  3459. SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
  3460. #undef SHOW_FUNCTION
  3461. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  3462. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  3463. { \
  3464. struct cfq_data *cfqd = e->elevator_data; \
  3465. unsigned int __data; \
  3466. int ret = cfq_var_store(&__data, (page), count); \
  3467. if (__data < (MIN)) \
  3468. __data = (MIN); \
  3469. else if (__data > (MAX)) \
  3470. __data = (MAX); \
  3471. if (__CONV) \
  3472. *(__PTR) = msecs_to_jiffies(__data); \
  3473. else \
  3474. *(__PTR) = __data; \
  3475. return ret; \
  3476. }
  3477. STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
  3478. STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
  3479. UINT_MAX, 1);
  3480. STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
  3481. UINT_MAX, 1);
  3482. STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
  3483. STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
  3484. UINT_MAX, 0);
  3485. STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
  3486. STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
  3487. STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  3488. STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
  3489. STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
  3490. UINT_MAX, 0);
  3491. STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
  3492. STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
  3493. #undef STORE_FUNCTION
  3494. #define CFQ_ATTR(name) \
  3495. __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  3496. static struct elv_fs_entry cfq_attrs[] = {
  3497. CFQ_ATTR(quantum),
  3498. CFQ_ATTR(fifo_expire_sync),
  3499. CFQ_ATTR(fifo_expire_async),
  3500. CFQ_ATTR(back_seek_max),
  3501. CFQ_ATTR(back_seek_penalty),
  3502. CFQ_ATTR(slice_sync),
  3503. CFQ_ATTR(slice_async),
  3504. CFQ_ATTR(slice_async_rq),
  3505. CFQ_ATTR(slice_idle),
  3506. CFQ_ATTR(group_idle),
  3507. CFQ_ATTR(low_latency),
  3508. CFQ_ATTR(target_latency),
  3509. __ATTR_NULL
  3510. };
  3511. static struct elevator_type iosched_cfq = {
  3512. .ops = {
  3513. .elevator_merge_fn = cfq_merge,
  3514. .elevator_merged_fn = cfq_merged_request,
  3515. .elevator_merge_req_fn = cfq_merged_requests,
  3516. .elevator_allow_merge_fn = cfq_allow_merge,
  3517. .elevator_bio_merged_fn = cfq_bio_merged,
  3518. .elevator_dispatch_fn = cfq_dispatch_requests,
  3519. .elevator_add_req_fn = cfq_insert_request,
  3520. .elevator_activate_req_fn = cfq_activate_request,
  3521. .elevator_deactivate_req_fn = cfq_deactivate_request,
  3522. .elevator_completed_req_fn = cfq_completed_request,
  3523. .elevator_former_req_fn = elv_rb_former_request,
  3524. .elevator_latter_req_fn = elv_rb_latter_request,
  3525. .elevator_init_icq_fn = cfq_init_icq,
  3526. .elevator_exit_icq_fn = cfq_exit_icq,
  3527. .elevator_set_req_fn = cfq_set_request,
  3528. .elevator_put_req_fn = cfq_put_request,
  3529. .elevator_may_queue_fn = cfq_may_queue,
  3530. .elevator_init_fn = cfq_init_queue,
  3531. .elevator_exit_fn = cfq_exit_queue,
  3532. },
  3533. .icq_size = sizeof(struct cfq_io_cq),
  3534. .icq_align = __alignof__(struct cfq_io_cq),
  3535. .elevator_attrs = cfq_attrs,
  3536. .elevator_name = "cfq",
  3537. .elevator_owner = THIS_MODULE,
  3538. };
  3539. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3540. static struct blkcg_policy blkcg_policy_cfq = {
  3541. .pd_size = sizeof(struct cfq_group),
  3542. .cftypes = cfq_blkcg_files,
  3543. .pd_init_fn = cfq_pd_init,
  3544. .pd_reset_stats_fn = cfq_pd_reset_stats,
  3545. };
  3546. #endif
  3547. static int __init cfq_init(void)
  3548. {
  3549. int ret;
  3550. /*
  3551. * could be 0 on HZ < 1000 setups
  3552. */
  3553. if (!cfq_slice_async)
  3554. cfq_slice_async = 1;
  3555. if (!cfq_slice_idle)
  3556. cfq_slice_idle = 1;
  3557. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3558. if (!cfq_group_idle)
  3559. cfq_group_idle = 1;
  3560. ret = blkcg_policy_register(&blkcg_policy_cfq);
  3561. if (ret)
  3562. return ret;
  3563. #else
  3564. cfq_group_idle = 0;
  3565. #endif
  3566. ret = -ENOMEM;
  3567. cfq_pool = KMEM_CACHE(cfq_queue, 0);
  3568. if (!cfq_pool)
  3569. goto err_pol_unreg;
  3570. ret = elv_register(&iosched_cfq);
  3571. if (ret)
  3572. goto err_free_pool;
  3573. return 0;
  3574. err_free_pool:
  3575. kmem_cache_destroy(cfq_pool);
  3576. err_pol_unreg:
  3577. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3578. blkcg_policy_unregister(&blkcg_policy_cfq);
  3579. #endif
  3580. return ret;
  3581. }
  3582. static void __exit cfq_exit(void)
  3583. {
  3584. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3585. blkcg_policy_unregister(&blkcg_policy_cfq);
  3586. #endif
  3587. elv_unregister(&iosched_cfq);
  3588. kmem_cache_destroy(cfq_pool);
  3589. }
  3590. module_init(cfq_init);
  3591. module_exit(cfq_exit);
  3592. MODULE_AUTHOR("Jens Axboe");
  3593. MODULE_LICENSE("GPL");
  3594. MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");