cfq-iosched.c 108 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214
  1. /*
  2. * CFQ, or complete fairness queueing, disk scheduler.
  3. *
  4. * Based on ideas from a previously unfinished io
  5. * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  6. *
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <linux/blkdev.h>
  12. #include <linux/elevator.h>
  13. #include <linux/jiffies.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/ioprio.h>
  16. #include <linux/blktrace_api.h>
  17. #include "blk.h"
  18. #include "blk-cgroup.h"
  19. static struct blkio_policy_type blkio_policy_cfq __maybe_unused;
  20. /*
  21. * tunables
  22. */
  23. /* max queue in one round of service */
  24. static const int cfq_quantum = 8;
  25. static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  26. /* maximum backwards seek, in KiB */
  27. static const int cfq_back_max = 16 * 1024;
  28. /* penalty of a backwards seek */
  29. static const int cfq_back_penalty = 2;
  30. static const int cfq_slice_sync = HZ / 10;
  31. static int cfq_slice_async = HZ / 25;
  32. static const int cfq_slice_async_rq = 2;
  33. static int cfq_slice_idle = HZ / 125;
  34. static int cfq_group_idle = HZ / 125;
  35. static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  36. static const int cfq_hist_divisor = 4;
  37. /*
  38. * offset from end of service tree
  39. */
  40. #define CFQ_IDLE_DELAY (HZ / 5)
  41. /*
  42. * below this threshold, we consider thinktime immediate
  43. */
  44. #define CFQ_MIN_TT (2)
  45. #define CFQ_SLICE_SCALE (5)
  46. #define CFQ_HW_QUEUE_MIN (5)
  47. #define CFQ_SERVICE_SHIFT 12
  48. #define CFQQ_SEEK_THR (sector_t)(8 * 100)
  49. #define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
  50. #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
  51. #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
  52. #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq)
  53. #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0])
  54. #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1])
  55. static struct kmem_cache *cfq_pool;
  56. #define CFQ_PRIO_LISTS IOPRIO_BE_NR
  57. #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  58. #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  59. #define sample_valid(samples) ((samples) > 80)
  60. #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
  61. struct cfq_ttime {
  62. unsigned long last_end_request;
  63. unsigned long ttime_total;
  64. unsigned long ttime_samples;
  65. unsigned long ttime_mean;
  66. };
  67. /*
  68. * Most of our rbtree usage is for sorting with min extraction, so
  69. * if we cache the leftmost node we don't have to walk down the tree
  70. * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  71. * move this into the elevator for the rq sorting as well.
  72. */
  73. struct cfq_rb_root {
  74. struct rb_root rb;
  75. struct rb_node *left;
  76. unsigned count;
  77. unsigned total_weight;
  78. u64 min_vdisktime;
  79. struct cfq_ttime ttime;
  80. };
  81. #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, \
  82. .ttime = {.last_end_request = jiffies,},}
  83. /*
  84. * Per process-grouping structure
  85. */
  86. struct cfq_queue {
  87. /* reference count */
  88. int ref;
  89. /* various state flags, see below */
  90. unsigned int flags;
  91. /* parent cfq_data */
  92. struct cfq_data *cfqd;
  93. /* service_tree member */
  94. struct rb_node rb_node;
  95. /* service_tree key */
  96. unsigned long rb_key;
  97. /* prio tree member */
  98. struct rb_node p_node;
  99. /* prio tree root we belong to, if any */
  100. struct rb_root *p_root;
  101. /* sorted list of pending requests */
  102. struct rb_root sort_list;
  103. /* if fifo isn't expired, next request to serve */
  104. struct request *next_rq;
  105. /* requests queued in sort_list */
  106. int queued[2];
  107. /* currently allocated requests */
  108. int allocated[2];
  109. /* fifo list of requests in sort_list */
  110. struct list_head fifo;
  111. /* time when queue got scheduled in to dispatch first request. */
  112. unsigned long dispatch_start;
  113. unsigned int allocated_slice;
  114. unsigned int slice_dispatch;
  115. /* time when first request from queue completed and slice started. */
  116. unsigned long slice_start;
  117. unsigned long slice_end;
  118. long slice_resid;
  119. /* pending priority requests */
  120. int prio_pending;
  121. /* number of requests that are on the dispatch list or inside driver */
  122. int dispatched;
  123. /* io prio of this group */
  124. unsigned short ioprio, org_ioprio;
  125. unsigned short ioprio_class;
  126. pid_t pid;
  127. u32 seek_history;
  128. sector_t last_request_pos;
  129. struct cfq_rb_root *service_tree;
  130. struct cfq_queue *new_cfqq;
  131. struct cfq_group *cfqg;
  132. /* Number of sectors dispatched from queue in single dispatch round */
  133. unsigned long nr_sectors;
  134. };
  135. /*
  136. * First index in the service_trees.
  137. * IDLE is handled separately, so it has negative index
  138. */
  139. enum wl_prio_t {
  140. BE_WORKLOAD = 0,
  141. RT_WORKLOAD = 1,
  142. IDLE_WORKLOAD = 2,
  143. CFQ_PRIO_NR,
  144. };
  145. /*
  146. * Second index in the service_trees.
  147. */
  148. enum wl_type_t {
  149. ASYNC_WORKLOAD = 0,
  150. SYNC_NOIDLE_WORKLOAD = 1,
  151. SYNC_WORKLOAD = 2
  152. };
  153. struct cfqg_stats {
  154. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  155. /* total bytes transferred */
  156. struct blkg_rwstat service_bytes;
  157. /* total IOs serviced, post merge */
  158. struct blkg_rwstat serviced;
  159. /* number of ios merged */
  160. struct blkg_rwstat merged;
  161. /* total time spent on device in ns, may not be accurate w/ queueing */
  162. struct blkg_rwstat service_time;
  163. /* total time spent waiting in scheduler queue in ns */
  164. struct blkg_rwstat wait_time;
  165. /* number of IOs queued up */
  166. struct blkg_rwstat queued;
  167. /* total sectors transferred */
  168. struct blkg_stat sectors;
  169. /* total disk time and nr sectors dispatched by this group */
  170. struct blkg_stat time;
  171. #ifdef CONFIG_DEBUG_BLK_CGROUP
  172. /* time not charged to this cgroup */
  173. struct blkg_stat unaccounted_time;
  174. /* sum of number of ios queued across all samples */
  175. struct blkg_stat avg_queue_size_sum;
  176. /* count of samples taken for average */
  177. struct blkg_stat avg_queue_size_samples;
  178. /* how many times this group has been removed from service tree */
  179. struct blkg_stat dequeue;
  180. /* total time spent waiting for it to be assigned a timeslice. */
  181. struct blkg_stat group_wait_time;
  182. /* time spent idling for this blkio_group */
  183. struct blkg_stat idle_time;
  184. /* total time with empty current active q with other requests queued */
  185. struct blkg_stat empty_time;
  186. /* fields after this shouldn't be cleared on stat reset */
  187. uint64_t start_group_wait_time;
  188. uint64_t start_idle_time;
  189. uint64_t start_empty_time;
  190. uint16_t flags;
  191. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  192. #endif /* CONFIG_CFQ_GROUP_IOSCHED */
  193. };
  194. /* This is per cgroup per device grouping structure */
  195. struct cfq_group {
  196. /* group service_tree member */
  197. struct rb_node rb_node;
  198. /* group service_tree key */
  199. u64 vdisktime;
  200. unsigned int weight;
  201. unsigned int new_weight;
  202. unsigned int dev_weight;
  203. /* number of cfqq currently on this group */
  204. int nr_cfqq;
  205. /*
  206. * Per group busy queues average. Useful for workload slice calc. We
  207. * create the array for each prio class but at run time it is used
  208. * only for RT and BE class and slot for IDLE class remains unused.
  209. * This is primarily done to avoid confusion and a gcc warning.
  210. */
  211. unsigned int busy_queues_avg[CFQ_PRIO_NR];
  212. /*
  213. * rr lists of queues with requests. We maintain service trees for
  214. * RT and BE classes. These trees are subdivided in subclasses
  215. * of SYNC, SYNC_NOIDLE and ASYNC based on workload type. For IDLE
  216. * class there is no subclassification and all the cfq queues go on
  217. * a single tree service_tree_idle.
  218. * Counts are embedded in the cfq_rb_root
  219. */
  220. struct cfq_rb_root service_trees[2][3];
  221. struct cfq_rb_root service_tree_idle;
  222. unsigned long saved_workload_slice;
  223. enum wl_type_t saved_workload;
  224. enum wl_prio_t saved_serving_prio;
  225. /* number of requests that are on the dispatch list or inside driver */
  226. int dispatched;
  227. struct cfq_ttime ttime;
  228. struct cfqg_stats stats;
  229. };
  230. struct cfq_io_cq {
  231. struct io_cq icq; /* must be the first member */
  232. struct cfq_queue *cfqq[2];
  233. struct cfq_ttime ttime;
  234. int ioprio; /* the current ioprio */
  235. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  236. uint64_t blkcg_id; /* the current blkcg ID */
  237. #endif
  238. };
  239. /*
  240. * Per block device queue structure
  241. */
  242. struct cfq_data {
  243. struct request_queue *queue;
  244. /* Root service tree for cfq_groups */
  245. struct cfq_rb_root grp_service_tree;
  246. struct cfq_group *root_group;
  247. /*
  248. * The priority currently being served
  249. */
  250. enum wl_prio_t serving_prio;
  251. enum wl_type_t serving_type;
  252. unsigned long workload_expires;
  253. struct cfq_group *serving_group;
  254. /*
  255. * Each priority tree is sorted by next_request position. These
  256. * trees are used when determining if two or more queues are
  257. * interleaving requests (see cfq_close_cooperator).
  258. */
  259. struct rb_root prio_trees[CFQ_PRIO_LISTS];
  260. unsigned int busy_queues;
  261. unsigned int busy_sync_queues;
  262. int rq_in_driver;
  263. int rq_in_flight[2];
  264. /*
  265. * queue-depth detection
  266. */
  267. int rq_queued;
  268. int hw_tag;
  269. /*
  270. * hw_tag can be
  271. * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
  272. * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
  273. * 0 => no NCQ
  274. */
  275. int hw_tag_est_depth;
  276. unsigned int hw_tag_samples;
  277. /*
  278. * idle window management
  279. */
  280. struct timer_list idle_slice_timer;
  281. struct work_struct unplug_work;
  282. struct cfq_queue *active_queue;
  283. struct cfq_io_cq *active_cic;
  284. /*
  285. * async queue for each priority case
  286. */
  287. struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
  288. struct cfq_queue *async_idle_cfqq;
  289. sector_t last_position;
  290. /*
  291. * tunables, see top of file
  292. */
  293. unsigned int cfq_quantum;
  294. unsigned int cfq_fifo_expire[2];
  295. unsigned int cfq_back_penalty;
  296. unsigned int cfq_back_max;
  297. unsigned int cfq_slice[2];
  298. unsigned int cfq_slice_async_rq;
  299. unsigned int cfq_slice_idle;
  300. unsigned int cfq_group_idle;
  301. unsigned int cfq_latency;
  302. /*
  303. * Fallback dummy cfqq for extreme OOM conditions
  304. */
  305. struct cfq_queue oom_cfqq;
  306. unsigned long last_delayed_sync;
  307. };
  308. static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
  309. static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
  310. enum wl_prio_t prio,
  311. enum wl_type_t type)
  312. {
  313. if (!cfqg)
  314. return NULL;
  315. if (prio == IDLE_WORKLOAD)
  316. return &cfqg->service_tree_idle;
  317. return &cfqg->service_trees[prio][type];
  318. }
  319. enum cfqq_state_flags {
  320. CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
  321. CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
  322. CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
  323. CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
  324. CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
  325. CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
  326. CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
  327. CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
  328. CFQ_CFQQ_FLAG_sync, /* synchronous queue */
  329. CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
  330. CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
  331. CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
  332. CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
  333. };
  334. #define CFQ_CFQQ_FNS(name) \
  335. static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
  336. { \
  337. (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
  338. } \
  339. static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
  340. { \
  341. (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
  342. } \
  343. static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
  344. { \
  345. return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
  346. }
  347. CFQ_CFQQ_FNS(on_rr);
  348. CFQ_CFQQ_FNS(wait_request);
  349. CFQ_CFQQ_FNS(must_dispatch);
  350. CFQ_CFQQ_FNS(must_alloc_slice);
  351. CFQ_CFQQ_FNS(fifo_expire);
  352. CFQ_CFQQ_FNS(idle_window);
  353. CFQ_CFQQ_FNS(prio_changed);
  354. CFQ_CFQQ_FNS(slice_new);
  355. CFQ_CFQQ_FNS(sync);
  356. CFQ_CFQQ_FNS(coop);
  357. CFQ_CFQQ_FNS(split_coop);
  358. CFQ_CFQQ_FNS(deep);
  359. CFQ_CFQQ_FNS(wait_busy);
  360. #undef CFQ_CFQQ_FNS
  361. #if defined(CONFIG_CFQ_GROUP_IOSCHED) && defined(CONFIG_DEBUG_BLK_CGROUP)
  362. /* cfqg stats flags */
  363. enum cfqg_stats_flags {
  364. CFQG_stats_waiting = 0,
  365. CFQG_stats_idling,
  366. CFQG_stats_empty,
  367. };
  368. #define CFQG_FLAG_FNS(name) \
  369. static inline void cfqg_stats_mark_##name(struct cfqg_stats *stats) \
  370. { \
  371. stats->flags |= (1 << CFQG_stats_##name); \
  372. } \
  373. static inline void cfqg_stats_clear_##name(struct cfqg_stats *stats) \
  374. { \
  375. stats->flags &= ~(1 << CFQG_stats_##name); \
  376. } \
  377. static inline int cfqg_stats_##name(struct cfqg_stats *stats) \
  378. { \
  379. return (stats->flags & (1 << CFQG_stats_##name)) != 0; \
  380. } \
  381. CFQG_FLAG_FNS(waiting)
  382. CFQG_FLAG_FNS(idling)
  383. CFQG_FLAG_FNS(empty)
  384. #undef CFQG_FLAG_FNS
  385. /* This should be called with the queue_lock held. */
  386. static void cfqg_stats_update_group_wait_time(struct cfqg_stats *stats)
  387. {
  388. unsigned long long now;
  389. if (!cfqg_stats_waiting(stats))
  390. return;
  391. now = sched_clock();
  392. if (time_after64(now, stats->start_group_wait_time))
  393. blkg_stat_add(&stats->group_wait_time,
  394. now - stats->start_group_wait_time);
  395. cfqg_stats_clear_waiting(stats);
  396. }
  397. /* This should be called with the queue_lock held. */
  398. static void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg,
  399. struct cfq_group *curr_cfqg)
  400. {
  401. struct cfqg_stats *stats = &cfqg->stats;
  402. if (cfqg_stats_waiting(stats))
  403. return;
  404. if (cfqg == curr_cfqg)
  405. return;
  406. stats->start_group_wait_time = sched_clock();
  407. cfqg_stats_mark_waiting(stats);
  408. }
  409. /* This should be called with the queue_lock held. */
  410. static void cfqg_stats_end_empty_time(struct cfqg_stats *stats)
  411. {
  412. unsigned long long now;
  413. if (!cfqg_stats_empty(stats))
  414. return;
  415. now = sched_clock();
  416. if (time_after64(now, stats->start_empty_time))
  417. blkg_stat_add(&stats->empty_time,
  418. now - stats->start_empty_time);
  419. cfqg_stats_clear_empty(stats);
  420. }
  421. static void cfqg_stats_update_dequeue(struct cfq_group *cfqg)
  422. {
  423. blkg_stat_add(&cfqg->stats.dequeue, 1);
  424. }
  425. static void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg)
  426. {
  427. struct cfqg_stats *stats = &cfqg->stats;
  428. if (blkg_rwstat_sum(&stats->queued))
  429. return;
  430. /*
  431. * group is already marked empty. This can happen if cfqq got new
  432. * request in parent group and moved to this group while being added
  433. * to service tree. Just ignore the event and move on.
  434. */
  435. if (cfqg_stats_empty(stats))
  436. return;
  437. stats->start_empty_time = sched_clock();
  438. cfqg_stats_mark_empty(stats);
  439. }
  440. static void cfqg_stats_update_idle_time(struct cfq_group *cfqg)
  441. {
  442. struct cfqg_stats *stats = &cfqg->stats;
  443. if (cfqg_stats_idling(stats)) {
  444. unsigned long long now = sched_clock();
  445. if (time_after64(now, stats->start_idle_time))
  446. blkg_stat_add(&stats->idle_time,
  447. now - stats->start_idle_time);
  448. cfqg_stats_clear_idling(stats);
  449. }
  450. }
  451. static void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg)
  452. {
  453. struct cfqg_stats *stats = &cfqg->stats;
  454. BUG_ON(cfqg_stats_idling(stats));
  455. stats->start_idle_time = sched_clock();
  456. cfqg_stats_mark_idling(stats);
  457. }
  458. static void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg)
  459. {
  460. struct cfqg_stats *stats = &cfqg->stats;
  461. blkg_stat_add(&stats->avg_queue_size_sum,
  462. blkg_rwstat_sum(&stats->queued));
  463. blkg_stat_add(&stats->avg_queue_size_samples, 1);
  464. cfqg_stats_update_group_wait_time(stats);
  465. }
  466. #else /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
  467. static inline void cfqg_stats_set_start_group_wait_time(struct cfq_group *cfqg, struct cfq_group *curr_cfqg) { }
  468. static inline void cfqg_stats_end_empty_time(struct cfqg_stats *stats) { }
  469. static inline void cfqg_stats_update_dequeue(struct cfq_group *cfqg) { }
  470. static inline void cfqg_stats_set_start_empty_time(struct cfq_group *cfqg) { }
  471. static inline void cfqg_stats_update_idle_time(struct cfq_group *cfqg) { }
  472. static inline void cfqg_stats_set_start_idle_time(struct cfq_group *cfqg) { }
  473. static inline void cfqg_stats_update_avg_queue_size(struct cfq_group *cfqg) { }
  474. #endif /* CONFIG_CFQ_GROUP_IOSCHED && CONFIG_DEBUG_BLK_CGROUP */
  475. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  476. static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg)
  477. {
  478. return blkg_to_pdata(blkg, &blkio_policy_cfq);
  479. }
  480. static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg)
  481. {
  482. return pdata_to_blkg(cfqg);
  483. }
  484. static inline void cfqg_get(struct cfq_group *cfqg)
  485. {
  486. return blkg_get(cfqg_to_blkg(cfqg));
  487. }
  488. static inline void cfqg_put(struct cfq_group *cfqg)
  489. {
  490. return blkg_put(cfqg_to_blkg(cfqg));
  491. }
  492. #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
  493. blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
  494. cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
  495. blkg_path(cfqg_to_blkg((cfqq)->cfqg)), ##args)
  496. #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
  497. blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
  498. blkg_path(cfqg_to_blkg((cfqg))), ##args) \
  499. static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
  500. struct cfq_group *curr_cfqg, int rw)
  501. {
  502. blkg_rwstat_add(&cfqg->stats.queued, rw, 1);
  503. cfqg_stats_end_empty_time(&cfqg->stats);
  504. cfqg_stats_set_start_group_wait_time(cfqg, curr_cfqg);
  505. }
  506. static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
  507. unsigned long time, unsigned long unaccounted_time)
  508. {
  509. blkg_stat_add(&cfqg->stats.time, time);
  510. #ifdef CONFIG_DEBUG_BLK_CGROUP
  511. blkg_stat_add(&cfqg->stats.unaccounted_time, unaccounted_time);
  512. #endif
  513. }
  514. static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw)
  515. {
  516. blkg_rwstat_add(&cfqg->stats.queued, rw, -1);
  517. }
  518. static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw)
  519. {
  520. blkg_rwstat_add(&cfqg->stats.merged, rw, 1);
  521. }
  522. static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
  523. uint64_t bytes, int rw)
  524. {
  525. blkg_stat_add(&cfqg->stats.sectors, bytes >> 9);
  526. blkg_rwstat_add(&cfqg->stats.serviced, rw, 1);
  527. blkg_rwstat_add(&cfqg->stats.service_bytes, rw, bytes);
  528. }
  529. static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
  530. uint64_t start_time, uint64_t io_start_time, int rw)
  531. {
  532. struct cfqg_stats *stats = &cfqg->stats;
  533. unsigned long long now = sched_clock();
  534. if (time_after64(now, io_start_time))
  535. blkg_rwstat_add(&stats->service_time, rw, now - io_start_time);
  536. if (time_after64(io_start_time, start_time))
  537. blkg_rwstat_add(&stats->wait_time, rw,
  538. io_start_time - start_time);
  539. }
  540. static void cfqg_stats_reset(struct blkio_group *blkg)
  541. {
  542. struct cfq_group *cfqg = blkg_to_cfqg(blkg);
  543. struct cfqg_stats *stats = &cfqg->stats;
  544. /* queued stats shouldn't be cleared */
  545. blkg_rwstat_reset(&stats->service_bytes);
  546. blkg_rwstat_reset(&stats->serviced);
  547. blkg_rwstat_reset(&stats->merged);
  548. blkg_rwstat_reset(&stats->service_time);
  549. blkg_rwstat_reset(&stats->wait_time);
  550. blkg_stat_reset(&stats->time);
  551. #ifdef CONFIG_DEBUG_BLK_CGROUP
  552. blkg_stat_reset(&stats->unaccounted_time);
  553. blkg_stat_reset(&stats->avg_queue_size_sum);
  554. blkg_stat_reset(&stats->avg_queue_size_samples);
  555. blkg_stat_reset(&stats->dequeue);
  556. blkg_stat_reset(&stats->group_wait_time);
  557. blkg_stat_reset(&stats->idle_time);
  558. blkg_stat_reset(&stats->empty_time);
  559. #endif
  560. }
  561. #else /* CONFIG_CFQ_GROUP_IOSCHED */
  562. static inline struct cfq_group *blkg_to_cfqg(struct blkio_group *blkg) { return NULL; }
  563. static inline struct blkio_group *cfqg_to_blkg(struct cfq_group *cfqg) { return NULL; }
  564. static inline void cfqg_get(struct cfq_group *cfqg) { }
  565. static inline void cfqg_put(struct cfq_group *cfqg) { }
  566. #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
  567. blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
  568. #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
  569. static inline void cfqg_stats_update_io_add(struct cfq_group *cfqg,
  570. struct cfq_group *curr_cfqg, int rw) { }
  571. static inline void cfqg_stats_update_timeslice_used(struct cfq_group *cfqg,
  572. unsigned long time, unsigned long unaccounted_time) { }
  573. static inline void cfqg_stats_update_io_remove(struct cfq_group *cfqg, int rw) { }
  574. static inline void cfqg_stats_update_io_merged(struct cfq_group *cfqg, int rw) { }
  575. static inline void cfqg_stats_update_dispatch(struct cfq_group *cfqg,
  576. uint64_t bytes, int rw) { }
  577. static inline void cfqg_stats_update_completion(struct cfq_group *cfqg,
  578. uint64_t start_time, uint64_t io_start_time, int rw) { }
  579. #endif /* CONFIG_CFQ_GROUP_IOSCHED */
  580. #define cfq_log(cfqd, fmt, args...) \
  581. blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
  582. /* Traverses through cfq group service trees */
  583. #define for_each_cfqg_st(cfqg, i, j, st) \
  584. for (i = 0; i <= IDLE_WORKLOAD; i++) \
  585. for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
  586. : &cfqg->service_tree_idle; \
  587. (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
  588. (i == IDLE_WORKLOAD && j == 0); \
  589. j++, st = i < IDLE_WORKLOAD ? \
  590. &cfqg->service_trees[i][j]: NULL) \
  591. static inline bool cfq_io_thinktime_big(struct cfq_data *cfqd,
  592. struct cfq_ttime *ttime, bool group_idle)
  593. {
  594. unsigned long slice;
  595. if (!sample_valid(ttime->ttime_samples))
  596. return false;
  597. if (group_idle)
  598. slice = cfqd->cfq_group_idle;
  599. else
  600. slice = cfqd->cfq_slice_idle;
  601. return ttime->ttime_mean > slice;
  602. }
  603. static inline bool iops_mode(struct cfq_data *cfqd)
  604. {
  605. /*
  606. * If we are not idling on queues and it is a NCQ drive, parallel
  607. * execution of requests is on and measuring time is not possible
  608. * in most of the cases until and unless we drive shallower queue
  609. * depths and that becomes a performance bottleneck. In such cases
  610. * switch to start providing fairness in terms of number of IOs.
  611. */
  612. if (!cfqd->cfq_slice_idle && cfqd->hw_tag)
  613. return true;
  614. else
  615. return false;
  616. }
  617. static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
  618. {
  619. if (cfq_class_idle(cfqq))
  620. return IDLE_WORKLOAD;
  621. if (cfq_class_rt(cfqq))
  622. return RT_WORKLOAD;
  623. return BE_WORKLOAD;
  624. }
  625. static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
  626. {
  627. if (!cfq_cfqq_sync(cfqq))
  628. return ASYNC_WORKLOAD;
  629. if (!cfq_cfqq_idle_window(cfqq))
  630. return SYNC_NOIDLE_WORKLOAD;
  631. return SYNC_WORKLOAD;
  632. }
  633. static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
  634. struct cfq_data *cfqd,
  635. struct cfq_group *cfqg)
  636. {
  637. if (wl == IDLE_WORKLOAD)
  638. return cfqg->service_tree_idle.count;
  639. return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
  640. + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
  641. + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
  642. }
  643. static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
  644. struct cfq_group *cfqg)
  645. {
  646. return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
  647. + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
  648. }
  649. static void cfq_dispatch_insert(struct request_queue *, struct request *);
  650. static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, bool is_sync,
  651. struct cfq_io_cq *cic, struct bio *bio,
  652. gfp_t gfp_mask);
  653. static inline struct cfq_io_cq *icq_to_cic(struct io_cq *icq)
  654. {
  655. /* cic->icq is the first member, %NULL will convert to %NULL */
  656. return container_of(icq, struct cfq_io_cq, icq);
  657. }
  658. static inline struct cfq_io_cq *cfq_cic_lookup(struct cfq_data *cfqd,
  659. struct io_context *ioc)
  660. {
  661. if (ioc)
  662. return icq_to_cic(ioc_lookup_icq(ioc, cfqd->queue));
  663. return NULL;
  664. }
  665. static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_cq *cic, bool is_sync)
  666. {
  667. return cic->cfqq[is_sync];
  668. }
  669. static inline void cic_set_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq,
  670. bool is_sync)
  671. {
  672. cic->cfqq[is_sync] = cfqq;
  673. }
  674. static inline struct cfq_data *cic_to_cfqd(struct cfq_io_cq *cic)
  675. {
  676. return cic->icq.q->elevator->elevator_data;
  677. }
  678. /*
  679. * We regard a request as SYNC, if it's either a read or has the SYNC bit
  680. * set (in which case it could also be direct WRITE).
  681. */
  682. static inline bool cfq_bio_sync(struct bio *bio)
  683. {
  684. return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC);
  685. }
  686. /*
  687. * scheduler run of queue, if there are requests pending and no one in the
  688. * driver that will restart queueing
  689. */
  690. static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
  691. {
  692. if (cfqd->busy_queues) {
  693. cfq_log(cfqd, "schedule dispatch");
  694. kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
  695. }
  696. }
  697. /*
  698. * Scale schedule slice based on io priority. Use the sync time slice only
  699. * if a queue is marked sync and has sync io queued. A sync queue with async
  700. * io only, should not get full sync slice length.
  701. */
  702. static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
  703. unsigned short prio)
  704. {
  705. const int base_slice = cfqd->cfq_slice[sync];
  706. WARN_ON(prio >= IOPRIO_BE_NR);
  707. return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
  708. }
  709. static inline int
  710. cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  711. {
  712. return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
  713. }
  714. static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
  715. {
  716. u64 d = delta << CFQ_SERVICE_SHIFT;
  717. d = d * CFQ_WEIGHT_DEFAULT;
  718. do_div(d, cfqg->weight);
  719. return d;
  720. }
  721. static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
  722. {
  723. s64 delta = (s64)(vdisktime - min_vdisktime);
  724. if (delta > 0)
  725. min_vdisktime = vdisktime;
  726. return min_vdisktime;
  727. }
  728. static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
  729. {
  730. s64 delta = (s64)(vdisktime - min_vdisktime);
  731. if (delta < 0)
  732. min_vdisktime = vdisktime;
  733. return min_vdisktime;
  734. }
  735. static void update_min_vdisktime(struct cfq_rb_root *st)
  736. {
  737. struct cfq_group *cfqg;
  738. if (st->left) {
  739. cfqg = rb_entry_cfqg(st->left);
  740. st->min_vdisktime = max_vdisktime(st->min_vdisktime,
  741. cfqg->vdisktime);
  742. }
  743. }
  744. /*
  745. * get averaged number of queues of RT/BE priority.
  746. * average is updated, with a formula that gives more weight to higher numbers,
  747. * to quickly follows sudden increases and decrease slowly
  748. */
  749. static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
  750. struct cfq_group *cfqg, bool rt)
  751. {
  752. unsigned min_q, max_q;
  753. unsigned mult = cfq_hist_divisor - 1;
  754. unsigned round = cfq_hist_divisor / 2;
  755. unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
  756. min_q = min(cfqg->busy_queues_avg[rt], busy);
  757. max_q = max(cfqg->busy_queues_avg[rt], busy);
  758. cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
  759. cfq_hist_divisor;
  760. return cfqg->busy_queues_avg[rt];
  761. }
  762. static inline unsigned
  763. cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
  764. {
  765. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  766. return cfq_target_latency * cfqg->weight / st->total_weight;
  767. }
  768. static inline unsigned
  769. cfq_scaled_cfqq_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  770. {
  771. unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
  772. if (cfqd->cfq_latency) {
  773. /*
  774. * interested queues (we consider only the ones with the same
  775. * priority class in the cfq group)
  776. */
  777. unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
  778. cfq_class_rt(cfqq));
  779. unsigned sync_slice = cfqd->cfq_slice[1];
  780. unsigned expect_latency = sync_slice * iq;
  781. unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
  782. if (expect_latency > group_slice) {
  783. unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
  784. /* scale low_slice according to IO priority
  785. * and sync vs async */
  786. unsigned low_slice =
  787. min(slice, base_low_slice * slice / sync_slice);
  788. /* the adapted slice value is scaled to fit all iqs
  789. * into the target latency */
  790. slice = max(slice * group_slice / expect_latency,
  791. low_slice);
  792. }
  793. }
  794. return slice;
  795. }
  796. static inline void
  797. cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  798. {
  799. unsigned slice = cfq_scaled_cfqq_slice(cfqd, cfqq);
  800. cfqq->slice_start = jiffies;
  801. cfqq->slice_end = jiffies + slice;
  802. cfqq->allocated_slice = slice;
  803. cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
  804. }
  805. /*
  806. * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
  807. * isn't valid until the first request from the dispatch is activated
  808. * and the slice time set.
  809. */
  810. static inline bool cfq_slice_used(struct cfq_queue *cfqq)
  811. {
  812. if (cfq_cfqq_slice_new(cfqq))
  813. return false;
  814. if (time_before(jiffies, cfqq->slice_end))
  815. return false;
  816. return true;
  817. }
  818. /*
  819. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  820. * We choose the request that is closest to the head right now. Distance
  821. * behind the head is penalized and only allowed to a certain extent.
  822. */
  823. static struct request *
  824. cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
  825. {
  826. sector_t s1, s2, d1 = 0, d2 = 0;
  827. unsigned long back_max;
  828. #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  829. #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  830. unsigned wrap = 0; /* bit mask: requests behind the disk head? */
  831. if (rq1 == NULL || rq1 == rq2)
  832. return rq2;
  833. if (rq2 == NULL)
  834. return rq1;
  835. if (rq_is_sync(rq1) != rq_is_sync(rq2))
  836. return rq_is_sync(rq1) ? rq1 : rq2;
  837. if ((rq1->cmd_flags ^ rq2->cmd_flags) & REQ_PRIO)
  838. return rq1->cmd_flags & REQ_PRIO ? rq1 : rq2;
  839. s1 = blk_rq_pos(rq1);
  840. s2 = blk_rq_pos(rq2);
  841. /*
  842. * by definition, 1KiB is 2 sectors
  843. */
  844. back_max = cfqd->cfq_back_max * 2;
  845. /*
  846. * Strict one way elevator _except_ in the case where we allow
  847. * short backward seeks which are biased as twice the cost of a
  848. * similar forward seek.
  849. */
  850. if (s1 >= last)
  851. d1 = s1 - last;
  852. else if (s1 + back_max >= last)
  853. d1 = (last - s1) * cfqd->cfq_back_penalty;
  854. else
  855. wrap |= CFQ_RQ1_WRAP;
  856. if (s2 >= last)
  857. d2 = s2 - last;
  858. else if (s2 + back_max >= last)
  859. d2 = (last - s2) * cfqd->cfq_back_penalty;
  860. else
  861. wrap |= CFQ_RQ2_WRAP;
  862. /* Found required data */
  863. /*
  864. * By doing switch() on the bit mask "wrap" we avoid having to
  865. * check two variables for all permutations: --> faster!
  866. */
  867. switch (wrap) {
  868. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  869. if (d1 < d2)
  870. return rq1;
  871. else if (d2 < d1)
  872. return rq2;
  873. else {
  874. if (s1 >= s2)
  875. return rq1;
  876. else
  877. return rq2;
  878. }
  879. case CFQ_RQ2_WRAP:
  880. return rq1;
  881. case CFQ_RQ1_WRAP:
  882. return rq2;
  883. case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
  884. default:
  885. /*
  886. * Since both rqs are wrapped,
  887. * start with the one that's further behind head
  888. * (--> only *one* back seek required),
  889. * since back seek takes more time than forward.
  890. */
  891. if (s1 <= s2)
  892. return rq1;
  893. else
  894. return rq2;
  895. }
  896. }
  897. /*
  898. * The below is leftmost cache rbtree addon
  899. */
  900. static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
  901. {
  902. /* Service tree is empty */
  903. if (!root->count)
  904. return NULL;
  905. if (!root->left)
  906. root->left = rb_first(&root->rb);
  907. if (root->left)
  908. return rb_entry(root->left, struct cfq_queue, rb_node);
  909. return NULL;
  910. }
  911. static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
  912. {
  913. if (!root->left)
  914. root->left = rb_first(&root->rb);
  915. if (root->left)
  916. return rb_entry_cfqg(root->left);
  917. return NULL;
  918. }
  919. static void rb_erase_init(struct rb_node *n, struct rb_root *root)
  920. {
  921. rb_erase(n, root);
  922. RB_CLEAR_NODE(n);
  923. }
  924. static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
  925. {
  926. if (root->left == n)
  927. root->left = NULL;
  928. rb_erase_init(n, &root->rb);
  929. --root->count;
  930. }
  931. /*
  932. * would be nice to take fifo expire time into account as well
  933. */
  934. static struct request *
  935. cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  936. struct request *last)
  937. {
  938. struct rb_node *rbnext = rb_next(&last->rb_node);
  939. struct rb_node *rbprev = rb_prev(&last->rb_node);
  940. struct request *next = NULL, *prev = NULL;
  941. BUG_ON(RB_EMPTY_NODE(&last->rb_node));
  942. if (rbprev)
  943. prev = rb_entry_rq(rbprev);
  944. if (rbnext)
  945. next = rb_entry_rq(rbnext);
  946. else {
  947. rbnext = rb_first(&cfqq->sort_list);
  948. if (rbnext && rbnext != &last->rb_node)
  949. next = rb_entry_rq(rbnext);
  950. }
  951. return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
  952. }
  953. static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  954. struct cfq_queue *cfqq)
  955. {
  956. /*
  957. * just an approximation, should be ok.
  958. */
  959. return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
  960. cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
  961. }
  962. static inline s64
  963. cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
  964. {
  965. return cfqg->vdisktime - st->min_vdisktime;
  966. }
  967. static void
  968. __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  969. {
  970. struct rb_node **node = &st->rb.rb_node;
  971. struct rb_node *parent = NULL;
  972. struct cfq_group *__cfqg;
  973. s64 key = cfqg_key(st, cfqg);
  974. int left = 1;
  975. while (*node != NULL) {
  976. parent = *node;
  977. __cfqg = rb_entry_cfqg(parent);
  978. if (key < cfqg_key(st, __cfqg))
  979. node = &parent->rb_left;
  980. else {
  981. node = &parent->rb_right;
  982. left = 0;
  983. }
  984. }
  985. if (left)
  986. st->left = &cfqg->rb_node;
  987. rb_link_node(&cfqg->rb_node, parent, node);
  988. rb_insert_color(&cfqg->rb_node, &st->rb);
  989. }
  990. static void
  991. cfq_update_group_weight(struct cfq_group *cfqg)
  992. {
  993. BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  994. if (cfqg->new_weight) {
  995. cfqg->weight = cfqg->new_weight;
  996. cfqg->new_weight = 0;
  997. }
  998. }
  999. static void
  1000. cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  1001. {
  1002. BUG_ON(!RB_EMPTY_NODE(&cfqg->rb_node));
  1003. cfq_update_group_weight(cfqg);
  1004. __cfq_group_service_tree_add(st, cfqg);
  1005. st->total_weight += cfqg->weight;
  1006. }
  1007. static void
  1008. cfq_group_notify_queue_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
  1009. {
  1010. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  1011. struct cfq_group *__cfqg;
  1012. struct rb_node *n;
  1013. cfqg->nr_cfqq++;
  1014. if (!RB_EMPTY_NODE(&cfqg->rb_node))
  1015. return;
  1016. /*
  1017. * Currently put the group at the end. Later implement something
  1018. * so that groups get lesser vtime based on their weights, so that
  1019. * if group does not loose all if it was not continuously backlogged.
  1020. */
  1021. n = rb_last(&st->rb);
  1022. if (n) {
  1023. __cfqg = rb_entry_cfqg(n);
  1024. cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
  1025. } else
  1026. cfqg->vdisktime = st->min_vdisktime;
  1027. cfq_group_service_tree_add(st, cfqg);
  1028. }
  1029. static void
  1030. cfq_group_service_tree_del(struct cfq_rb_root *st, struct cfq_group *cfqg)
  1031. {
  1032. st->total_weight -= cfqg->weight;
  1033. if (!RB_EMPTY_NODE(&cfqg->rb_node))
  1034. cfq_rb_erase(&cfqg->rb_node, st);
  1035. }
  1036. static void
  1037. cfq_group_notify_queue_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
  1038. {
  1039. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  1040. BUG_ON(cfqg->nr_cfqq < 1);
  1041. cfqg->nr_cfqq--;
  1042. /* If there are other cfq queues under this group, don't delete it */
  1043. if (cfqg->nr_cfqq)
  1044. return;
  1045. cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
  1046. cfq_group_service_tree_del(st, cfqg);
  1047. cfqg->saved_workload_slice = 0;
  1048. cfqg_stats_update_dequeue(cfqg);
  1049. }
  1050. static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq,
  1051. unsigned int *unaccounted_time)
  1052. {
  1053. unsigned int slice_used;
  1054. /*
  1055. * Queue got expired before even a single request completed or
  1056. * got expired immediately after first request completion.
  1057. */
  1058. if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
  1059. /*
  1060. * Also charge the seek time incurred to the group, otherwise
  1061. * if there are mutiple queues in the group, each can dispatch
  1062. * a single request on seeky media and cause lots of seek time
  1063. * and group will never know it.
  1064. */
  1065. slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
  1066. 1);
  1067. } else {
  1068. slice_used = jiffies - cfqq->slice_start;
  1069. if (slice_used > cfqq->allocated_slice) {
  1070. *unaccounted_time = slice_used - cfqq->allocated_slice;
  1071. slice_used = cfqq->allocated_slice;
  1072. }
  1073. if (time_after(cfqq->slice_start, cfqq->dispatch_start))
  1074. *unaccounted_time += cfqq->slice_start -
  1075. cfqq->dispatch_start;
  1076. }
  1077. return slice_used;
  1078. }
  1079. static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
  1080. struct cfq_queue *cfqq)
  1081. {
  1082. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  1083. unsigned int used_sl, charge, unaccounted_sl = 0;
  1084. int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
  1085. - cfqg->service_tree_idle.count;
  1086. BUG_ON(nr_sync < 0);
  1087. used_sl = charge = cfq_cfqq_slice_usage(cfqq, &unaccounted_sl);
  1088. if (iops_mode(cfqd))
  1089. charge = cfqq->slice_dispatch;
  1090. else if (!cfq_cfqq_sync(cfqq) && !nr_sync)
  1091. charge = cfqq->allocated_slice;
  1092. /* Can't update vdisktime while group is on service tree */
  1093. cfq_group_service_tree_del(st, cfqg);
  1094. cfqg->vdisktime += cfq_scale_slice(charge, cfqg);
  1095. /* If a new weight was requested, update now, off tree */
  1096. cfq_group_service_tree_add(st, cfqg);
  1097. /* This group is being expired. Save the context */
  1098. if (time_after(cfqd->workload_expires, jiffies)) {
  1099. cfqg->saved_workload_slice = cfqd->workload_expires
  1100. - jiffies;
  1101. cfqg->saved_workload = cfqd->serving_type;
  1102. cfqg->saved_serving_prio = cfqd->serving_prio;
  1103. } else
  1104. cfqg->saved_workload_slice = 0;
  1105. cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
  1106. st->min_vdisktime);
  1107. cfq_log_cfqq(cfqq->cfqd, cfqq,
  1108. "sl_used=%u disp=%u charge=%u iops=%u sect=%lu",
  1109. used_sl, cfqq->slice_dispatch, charge,
  1110. iops_mode(cfqd), cfqq->nr_sectors);
  1111. cfqg_stats_update_timeslice_used(cfqg, used_sl, unaccounted_sl);
  1112. cfqg_stats_set_start_empty_time(cfqg);
  1113. }
  1114. /**
  1115. * cfq_init_cfqg_base - initialize base part of a cfq_group
  1116. * @cfqg: cfq_group to initialize
  1117. *
  1118. * Initialize the base part which is used whether %CONFIG_CFQ_GROUP_IOSCHED
  1119. * is enabled or not.
  1120. */
  1121. static void cfq_init_cfqg_base(struct cfq_group *cfqg)
  1122. {
  1123. struct cfq_rb_root *st;
  1124. int i, j;
  1125. for_each_cfqg_st(cfqg, i, j, st)
  1126. *st = CFQ_RB_ROOT;
  1127. RB_CLEAR_NODE(&cfqg->rb_node);
  1128. cfqg->ttime.last_end_request = jiffies;
  1129. }
  1130. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  1131. static void cfq_init_blkio_group(struct blkio_group *blkg)
  1132. {
  1133. struct cfq_group *cfqg = blkg_to_cfqg(blkg);
  1134. cfq_init_cfqg_base(cfqg);
  1135. cfqg->weight = blkg->blkcg->cfq_weight;
  1136. }
  1137. /*
  1138. * Search for the cfq group current task belongs to. request_queue lock must
  1139. * be held.
  1140. */
  1141. static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
  1142. struct blkio_cgroup *blkcg)
  1143. {
  1144. struct request_queue *q = cfqd->queue;
  1145. struct cfq_group *cfqg = NULL;
  1146. /* avoid lookup for the common case where there's no blkio cgroup */
  1147. if (blkcg == &blkio_root_cgroup) {
  1148. cfqg = cfqd->root_group;
  1149. } else {
  1150. struct blkio_group *blkg;
  1151. blkg = blkg_lookup_create(blkcg, q, false);
  1152. if (!IS_ERR(blkg))
  1153. cfqg = blkg_to_cfqg(blkg);
  1154. }
  1155. return cfqg;
  1156. }
  1157. static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
  1158. {
  1159. /* Currently, all async queues are mapped to root group */
  1160. if (!cfq_cfqq_sync(cfqq))
  1161. cfqg = cfqq->cfqd->root_group;
  1162. cfqq->cfqg = cfqg;
  1163. /* cfqq reference on cfqg */
  1164. cfqg_get(cfqg);
  1165. }
  1166. static u64 cfqg_prfill_weight_device(struct seq_file *sf, void *pdata, int off)
  1167. {
  1168. struct cfq_group *cfqg = pdata;
  1169. if (!cfqg->dev_weight)
  1170. return 0;
  1171. return __blkg_prfill_u64(sf, pdata, cfqg->dev_weight);
  1172. }
  1173. static int cfqg_print_weight_device(struct cgroup *cgrp, struct cftype *cft,
  1174. struct seq_file *sf)
  1175. {
  1176. blkcg_print_blkgs(sf, cgroup_to_blkio_cgroup(cgrp),
  1177. cfqg_prfill_weight_device, BLKIO_POLICY_PROP, 0,
  1178. false);
  1179. return 0;
  1180. }
  1181. static int cfq_print_weight(struct cgroup *cgrp, struct cftype *cft,
  1182. struct seq_file *sf)
  1183. {
  1184. seq_printf(sf, "%u\n", cgroup_to_blkio_cgroup(cgrp)->cfq_weight);
  1185. return 0;
  1186. }
  1187. static int cfqg_set_weight_device(struct cgroup *cgrp, struct cftype *cft,
  1188. const char *buf)
  1189. {
  1190. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  1191. struct blkg_conf_ctx ctx;
  1192. struct cfq_group *cfqg;
  1193. int ret;
  1194. ret = blkg_conf_prep(blkcg, buf, &ctx);
  1195. if (ret)
  1196. return ret;
  1197. ret = -EINVAL;
  1198. cfqg = blkg_to_cfqg(ctx.blkg);
  1199. if (cfqg && (!ctx.v || (ctx.v >= CFQ_WEIGHT_MIN &&
  1200. ctx.v <= CFQ_WEIGHT_MAX))) {
  1201. cfqg->dev_weight = ctx.v;
  1202. cfqg->new_weight = cfqg->dev_weight ?: blkcg->cfq_weight;
  1203. ret = 0;
  1204. }
  1205. blkg_conf_finish(&ctx);
  1206. return ret;
  1207. }
  1208. static int cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1209. {
  1210. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  1211. struct blkio_group *blkg;
  1212. struct hlist_node *n;
  1213. if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX)
  1214. return -EINVAL;
  1215. spin_lock_irq(&blkcg->lock);
  1216. blkcg->cfq_weight = (unsigned int)val;
  1217. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1218. struct cfq_group *cfqg = blkg_to_cfqg(blkg);
  1219. if (cfqg && !cfqg->dev_weight)
  1220. cfqg->new_weight = blkcg->cfq_weight;
  1221. }
  1222. spin_unlock_irq(&blkcg->lock);
  1223. return 0;
  1224. }
  1225. static int cfqg_print_stat(struct cgroup *cgrp, struct cftype *cft,
  1226. struct seq_file *sf)
  1227. {
  1228. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  1229. blkcg_print_blkgs(sf, blkcg, blkg_prfill_stat, BLKIO_POLICY_PROP,
  1230. cft->private, false);
  1231. return 0;
  1232. }
  1233. static int cfqg_print_rwstat(struct cgroup *cgrp, struct cftype *cft,
  1234. struct seq_file *sf)
  1235. {
  1236. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  1237. blkcg_print_blkgs(sf, blkcg, blkg_prfill_rwstat, BLKIO_POLICY_PROP,
  1238. cft->private, true);
  1239. return 0;
  1240. }
  1241. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1242. static u64 cfqg_prfill_avg_queue_size(struct seq_file *sf, void *pdata, int off)
  1243. {
  1244. struct cfq_group *cfqg = pdata;
  1245. u64 samples = blkg_stat_read(&cfqg->stats.avg_queue_size_samples);
  1246. u64 v = 0;
  1247. if (samples) {
  1248. v = blkg_stat_read(&cfqg->stats.avg_queue_size_sum);
  1249. do_div(v, samples);
  1250. }
  1251. __blkg_prfill_u64(sf, pdata, v);
  1252. return 0;
  1253. }
  1254. /* print avg_queue_size */
  1255. static int cfqg_print_avg_queue_size(struct cgroup *cgrp, struct cftype *cft,
  1256. struct seq_file *sf)
  1257. {
  1258. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgrp);
  1259. blkcg_print_blkgs(sf, blkcg, cfqg_prfill_avg_queue_size,
  1260. BLKIO_POLICY_PROP, 0, false);
  1261. return 0;
  1262. }
  1263. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  1264. static struct cftype cfq_blkcg_files[] = {
  1265. {
  1266. .name = "weight_device",
  1267. .read_seq_string = cfqg_print_weight_device,
  1268. .write_string = cfqg_set_weight_device,
  1269. .max_write_len = 256,
  1270. },
  1271. {
  1272. .name = "weight",
  1273. .read_seq_string = cfq_print_weight,
  1274. .write_u64 = cfq_set_weight,
  1275. },
  1276. {
  1277. .name = "time",
  1278. .private = offsetof(struct cfq_group, stats.time),
  1279. .read_seq_string = cfqg_print_stat,
  1280. },
  1281. {
  1282. .name = "sectors",
  1283. .private = offsetof(struct cfq_group, stats.sectors),
  1284. .read_seq_string = cfqg_print_stat,
  1285. },
  1286. {
  1287. .name = "io_service_bytes",
  1288. .private = offsetof(struct cfq_group, stats.service_bytes),
  1289. .read_seq_string = cfqg_print_rwstat,
  1290. },
  1291. {
  1292. .name = "io_serviced",
  1293. .private = offsetof(struct cfq_group, stats.serviced),
  1294. .read_seq_string = cfqg_print_rwstat,
  1295. },
  1296. {
  1297. .name = "io_service_time",
  1298. .private = offsetof(struct cfq_group, stats.service_time),
  1299. .read_seq_string = cfqg_print_rwstat,
  1300. },
  1301. {
  1302. .name = "io_wait_time",
  1303. .private = offsetof(struct cfq_group, stats.wait_time),
  1304. .read_seq_string = cfqg_print_rwstat,
  1305. },
  1306. {
  1307. .name = "io_merged",
  1308. .private = offsetof(struct cfq_group, stats.merged),
  1309. .read_seq_string = cfqg_print_rwstat,
  1310. },
  1311. {
  1312. .name = "io_queued",
  1313. .private = offsetof(struct cfq_group, stats.queued),
  1314. .read_seq_string = cfqg_print_rwstat,
  1315. },
  1316. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1317. {
  1318. .name = "avg_queue_size",
  1319. .read_seq_string = cfqg_print_avg_queue_size,
  1320. },
  1321. {
  1322. .name = "group_wait_time",
  1323. .private = offsetof(struct cfq_group, stats.group_wait_time),
  1324. .read_seq_string = cfqg_print_stat,
  1325. },
  1326. {
  1327. .name = "idle_time",
  1328. .private = offsetof(struct cfq_group, stats.idle_time),
  1329. .read_seq_string = cfqg_print_stat,
  1330. },
  1331. {
  1332. .name = "empty_time",
  1333. .private = offsetof(struct cfq_group, stats.empty_time),
  1334. .read_seq_string = cfqg_print_stat,
  1335. },
  1336. {
  1337. .name = "dequeue",
  1338. .private = offsetof(struct cfq_group, stats.dequeue),
  1339. .read_seq_string = cfqg_print_stat,
  1340. },
  1341. {
  1342. .name = "unaccounted_time",
  1343. .private = offsetof(struct cfq_group, stats.unaccounted_time),
  1344. .read_seq_string = cfqg_print_stat,
  1345. },
  1346. #endif /* CONFIG_DEBUG_BLK_CGROUP */
  1347. { } /* terminate */
  1348. };
  1349. #else /* GROUP_IOSCHED */
  1350. static struct cfq_group *cfq_lookup_create_cfqg(struct cfq_data *cfqd,
  1351. struct blkio_cgroup *blkcg)
  1352. {
  1353. return cfqd->root_group;
  1354. }
  1355. static inline void
  1356. cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
  1357. cfqq->cfqg = cfqg;
  1358. }
  1359. #endif /* GROUP_IOSCHED */
  1360. /*
  1361. * The cfqd->service_trees holds all pending cfq_queue's that have
  1362. * requests waiting to be processed. It is sorted in the order that
  1363. * we will service the queues.
  1364. */
  1365. static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1366. bool add_front)
  1367. {
  1368. struct rb_node **p, *parent;
  1369. struct cfq_queue *__cfqq;
  1370. unsigned long rb_key;
  1371. struct cfq_rb_root *service_tree;
  1372. int left;
  1373. int new_cfqq = 1;
  1374. service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
  1375. cfqq_type(cfqq));
  1376. if (cfq_class_idle(cfqq)) {
  1377. rb_key = CFQ_IDLE_DELAY;
  1378. parent = rb_last(&service_tree->rb);
  1379. if (parent && parent != &cfqq->rb_node) {
  1380. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  1381. rb_key += __cfqq->rb_key;
  1382. } else
  1383. rb_key += jiffies;
  1384. } else if (!add_front) {
  1385. /*
  1386. * Get our rb key offset. Subtract any residual slice
  1387. * value carried from last service. A negative resid
  1388. * count indicates slice overrun, and this should position
  1389. * the next service time further away in the tree.
  1390. */
  1391. rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
  1392. rb_key -= cfqq->slice_resid;
  1393. cfqq->slice_resid = 0;
  1394. } else {
  1395. rb_key = -HZ;
  1396. __cfqq = cfq_rb_first(service_tree);
  1397. rb_key += __cfqq ? __cfqq->rb_key : jiffies;
  1398. }
  1399. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  1400. new_cfqq = 0;
  1401. /*
  1402. * same position, nothing more to do
  1403. */
  1404. if (rb_key == cfqq->rb_key &&
  1405. cfqq->service_tree == service_tree)
  1406. return;
  1407. cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  1408. cfqq->service_tree = NULL;
  1409. }
  1410. left = 1;
  1411. parent = NULL;
  1412. cfqq->service_tree = service_tree;
  1413. p = &service_tree->rb.rb_node;
  1414. while (*p) {
  1415. struct rb_node **n;
  1416. parent = *p;
  1417. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  1418. /*
  1419. * sort by key, that represents service time.
  1420. */
  1421. if (time_before(rb_key, __cfqq->rb_key))
  1422. n = &(*p)->rb_left;
  1423. else {
  1424. n = &(*p)->rb_right;
  1425. left = 0;
  1426. }
  1427. p = n;
  1428. }
  1429. if (left)
  1430. service_tree->left = &cfqq->rb_node;
  1431. cfqq->rb_key = rb_key;
  1432. rb_link_node(&cfqq->rb_node, parent, p);
  1433. rb_insert_color(&cfqq->rb_node, &service_tree->rb);
  1434. service_tree->count++;
  1435. if (add_front || !new_cfqq)
  1436. return;
  1437. cfq_group_notify_queue_add(cfqd, cfqq->cfqg);
  1438. }
  1439. static struct cfq_queue *
  1440. cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
  1441. sector_t sector, struct rb_node **ret_parent,
  1442. struct rb_node ***rb_link)
  1443. {
  1444. struct rb_node **p, *parent;
  1445. struct cfq_queue *cfqq = NULL;
  1446. parent = NULL;
  1447. p = &root->rb_node;
  1448. while (*p) {
  1449. struct rb_node **n;
  1450. parent = *p;
  1451. cfqq = rb_entry(parent, struct cfq_queue, p_node);
  1452. /*
  1453. * Sort strictly based on sector. Smallest to the left,
  1454. * largest to the right.
  1455. */
  1456. if (sector > blk_rq_pos(cfqq->next_rq))
  1457. n = &(*p)->rb_right;
  1458. else if (sector < blk_rq_pos(cfqq->next_rq))
  1459. n = &(*p)->rb_left;
  1460. else
  1461. break;
  1462. p = n;
  1463. cfqq = NULL;
  1464. }
  1465. *ret_parent = parent;
  1466. if (rb_link)
  1467. *rb_link = p;
  1468. return cfqq;
  1469. }
  1470. static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1471. {
  1472. struct rb_node **p, *parent;
  1473. struct cfq_queue *__cfqq;
  1474. if (cfqq->p_root) {
  1475. rb_erase(&cfqq->p_node, cfqq->p_root);
  1476. cfqq->p_root = NULL;
  1477. }
  1478. if (cfq_class_idle(cfqq))
  1479. return;
  1480. if (!cfqq->next_rq)
  1481. return;
  1482. cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
  1483. __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
  1484. blk_rq_pos(cfqq->next_rq), &parent, &p);
  1485. if (!__cfqq) {
  1486. rb_link_node(&cfqq->p_node, parent, p);
  1487. rb_insert_color(&cfqq->p_node, cfqq->p_root);
  1488. } else
  1489. cfqq->p_root = NULL;
  1490. }
  1491. /*
  1492. * Update cfqq's position in the service tree.
  1493. */
  1494. static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1495. {
  1496. /*
  1497. * Resorting requires the cfqq to be on the RR list already.
  1498. */
  1499. if (cfq_cfqq_on_rr(cfqq)) {
  1500. cfq_service_tree_add(cfqd, cfqq, 0);
  1501. cfq_prio_tree_add(cfqd, cfqq);
  1502. }
  1503. }
  1504. /*
  1505. * add to busy list of queues for service, trying to be fair in ordering
  1506. * the pending list according to last request service
  1507. */
  1508. static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1509. {
  1510. cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
  1511. BUG_ON(cfq_cfqq_on_rr(cfqq));
  1512. cfq_mark_cfqq_on_rr(cfqq);
  1513. cfqd->busy_queues++;
  1514. if (cfq_cfqq_sync(cfqq))
  1515. cfqd->busy_sync_queues++;
  1516. cfq_resort_rr_list(cfqd, cfqq);
  1517. }
  1518. /*
  1519. * Called when the cfqq no longer has requests pending, remove it from
  1520. * the service tree.
  1521. */
  1522. static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1523. {
  1524. cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
  1525. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  1526. cfq_clear_cfqq_on_rr(cfqq);
  1527. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  1528. cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  1529. cfqq->service_tree = NULL;
  1530. }
  1531. if (cfqq->p_root) {
  1532. rb_erase(&cfqq->p_node, cfqq->p_root);
  1533. cfqq->p_root = NULL;
  1534. }
  1535. cfq_group_notify_queue_del(cfqd, cfqq->cfqg);
  1536. BUG_ON(!cfqd->busy_queues);
  1537. cfqd->busy_queues--;
  1538. if (cfq_cfqq_sync(cfqq))
  1539. cfqd->busy_sync_queues--;
  1540. }
  1541. /*
  1542. * rb tree support functions
  1543. */
  1544. static void cfq_del_rq_rb(struct request *rq)
  1545. {
  1546. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1547. const int sync = rq_is_sync(rq);
  1548. BUG_ON(!cfqq->queued[sync]);
  1549. cfqq->queued[sync]--;
  1550. elv_rb_del(&cfqq->sort_list, rq);
  1551. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
  1552. /*
  1553. * Queue will be deleted from service tree when we actually
  1554. * expire it later. Right now just remove it from prio tree
  1555. * as it is empty.
  1556. */
  1557. if (cfqq->p_root) {
  1558. rb_erase(&cfqq->p_node, cfqq->p_root);
  1559. cfqq->p_root = NULL;
  1560. }
  1561. }
  1562. }
  1563. static void cfq_add_rq_rb(struct request *rq)
  1564. {
  1565. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1566. struct cfq_data *cfqd = cfqq->cfqd;
  1567. struct request *prev;
  1568. cfqq->queued[rq_is_sync(rq)]++;
  1569. elv_rb_add(&cfqq->sort_list, rq);
  1570. if (!cfq_cfqq_on_rr(cfqq))
  1571. cfq_add_cfqq_rr(cfqd, cfqq);
  1572. /*
  1573. * check if this request is a better next-serve candidate
  1574. */
  1575. prev = cfqq->next_rq;
  1576. cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
  1577. /*
  1578. * adjust priority tree position, if ->next_rq changes
  1579. */
  1580. if (prev != cfqq->next_rq)
  1581. cfq_prio_tree_add(cfqd, cfqq);
  1582. BUG_ON(!cfqq->next_rq);
  1583. }
  1584. static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
  1585. {
  1586. elv_rb_del(&cfqq->sort_list, rq);
  1587. cfqq->queued[rq_is_sync(rq)]--;
  1588. cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
  1589. cfq_add_rq_rb(rq);
  1590. cfqg_stats_update_io_add(RQ_CFQG(rq), cfqq->cfqd->serving_group,
  1591. rq->cmd_flags);
  1592. }
  1593. static struct request *
  1594. cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
  1595. {
  1596. struct task_struct *tsk = current;
  1597. struct cfq_io_cq *cic;
  1598. struct cfq_queue *cfqq;
  1599. cic = cfq_cic_lookup(cfqd, tsk->io_context);
  1600. if (!cic)
  1601. return NULL;
  1602. cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
  1603. if (cfqq) {
  1604. sector_t sector = bio->bi_sector + bio_sectors(bio);
  1605. return elv_rb_find(&cfqq->sort_list, sector);
  1606. }
  1607. return NULL;
  1608. }
  1609. static void cfq_activate_request(struct request_queue *q, struct request *rq)
  1610. {
  1611. struct cfq_data *cfqd = q->elevator->elevator_data;
  1612. cfqd->rq_in_driver++;
  1613. cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
  1614. cfqd->rq_in_driver);
  1615. cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
  1616. }
  1617. static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
  1618. {
  1619. struct cfq_data *cfqd = q->elevator->elevator_data;
  1620. WARN_ON(!cfqd->rq_in_driver);
  1621. cfqd->rq_in_driver--;
  1622. cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
  1623. cfqd->rq_in_driver);
  1624. }
  1625. static void cfq_remove_request(struct request *rq)
  1626. {
  1627. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1628. if (cfqq->next_rq == rq)
  1629. cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
  1630. list_del_init(&rq->queuelist);
  1631. cfq_del_rq_rb(rq);
  1632. cfqq->cfqd->rq_queued--;
  1633. cfqg_stats_update_io_remove(RQ_CFQG(rq), rq->cmd_flags);
  1634. if (rq->cmd_flags & REQ_PRIO) {
  1635. WARN_ON(!cfqq->prio_pending);
  1636. cfqq->prio_pending--;
  1637. }
  1638. }
  1639. static int cfq_merge(struct request_queue *q, struct request **req,
  1640. struct bio *bio)
  1641. {
  1642. struct cfq_data *cfqd = q->elevator->elevator_data;
  1643. struct request *__rq;
  1644. __rq = cfq_find_rq_fmerge(cfqd, bio);
  1645. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  1646. *req = __rq;
  1647. return ELEVATOR_FRONT_MERGE;
  1648. }
  1649. return ELEVATOR_NO_MERGE;
  1650. }
  1651. static void cfq_merged_request(struct request_queue *q, struct request *req,
  1652. int type)
  1653. {
  1654. if (type == ELEVATOR_FRONT_MERGE) {
  1655. struct cfq_queue *cfqq = RQ_CFQQ(req);
  1656. cfq_reposition_rq_rb(cfqq, req);
  1657. }
  1658. }
  1659. static void cfq_bio_merged(struct request_queue *q, struct request *req,
  1660. struct bio *bio)
  1661. {
  1662. cfqg_stats_update_io_merged(RQ_CFQG(req), bio->bi_rw);
  1663. }
  1664. static void
  1665. cfq_merged_requests(struct request_queue *q, struct request *rq,
  1666. struct request *next)
  1667. {
  1668. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1669. struct cfq_data *cfqd = q->elevator->elevator_data;
  1670. /*
  1671. * reposition in fifo if next is older than rq
  1672. */
  1673. if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  1674. time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
  1675. list_move(&rq->queuelist, &next->queuelist);
  1676. rq_set_fifo_time(rq, rq_fifo_time(next));
  1677. }
  1678. if (cfqq->next_rq == next)
  1679. cfqq->next_rq = rq;
  1680. cfq_remove_request(next);
  1681. cfqg_stats_update_io_merged(RQ_CFQG(rq), next->cmd_flags);
  1682. cfqq = RQ_CFQQ(next);
  1683. /*
  1684. * all requests of this queue are merged to other queues, delete it
  1685. * from the service tree. If it's the active_queue,
  1686. * cfq_dispatch_requests() will choose to expire it or do idle
  1687. */
  1688. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list) &&
  1689. cfqq != cfqd->active_queue)
  1690. cfq_del_cfqq_rr(cfqd, cfqq);
  1691. }
  1692. static int cfq_allow_merge(struct request_queue *q, struct request *rq,
  1693. struct bio *bio)
  1694. {
  1695. struct cfq_data *cfqd = q->elevator->elevator_data;
  1696. struct cfq_io_cq *cic;
  1697. struct cfq_queue *cfqq;
  1698. /*
  1699. * Disallow merge of a sync bio into an async request.
  1700. */
  1701. if (cfq_bio_sync(bio) && !rq_is_sync(rq))
  1702. return false;
  1703. /*
  1704. * Lookup the cfqq that this bio will be queued with and allow
  1705. * merge only if rq is queued there.
  1706. */
  1707. cic = cfq_cic_lookup(cfqd, current->io_context);
  1708. if (!cic)
  1709. return false;
  1710. cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
  1711. return cfqq == RQ_CFQQ(rq);
  1712. }
  1713. static inline void cfq_del_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1714. {
  1715. del_timer(&cfqd->idle_slice_timer);
  1716. cfqg_stats_update_idle_time(cfqq->cfqg);
  1717. }
  1718. static void __cfq_set_active_queue(struct cfq_data *cfqd,
  1719. struct cfq_queue *cfqq)
  1720. {
  1721. if (cfqq) {
  1722. cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
  1723. cfqd->serving_prio, cfqd->serving_type);
  1724. cfqg_stats_update_avg_queue_size(cfqq->cfqg);
  1725. cfqq->slice_start = 0;
  1726. cfqq->dispatch_start = jiffies;
  1727. cfqq->allocated_slice = 0;
  1728. cfqq->slice_end = 0;
  1729. cfqq->slice_dispatch = 0;
  1730. cfqq->nr_sectors = 0;
  1731. cfq_clear_cfqq_wait_request(cfqq);
  1732. cfq_clear_cfqq_must_dispatch(cfqq);
  1733. cfq_clear_cfqq_must_alloc_slice(cfqq);
  1734. cfq_clear_cfqq_fifo_expire(cfqq);
  1735. cfq_mark_cfqq_slice_new(cfqq);
  1736. cfq_del_timer(cfqd, cfqq);
  1737. }
  1738. cfqd->active_queue = cfqq;
  1739. }
  1740. /*
  1741. * current cfqq expired its slice (or was too idle), select new one
  1742. */
  1743. static void
  1744. __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1745. bool timed_out)
  1746. {
  1747. cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
  1748. if (cfq_cfqq_wait_request(cfqq))
  1749. cfq_del_timer(cfqd, cfqq);
  1750. cfq_clear_cfqq_wait_request(cfqq);
  1751. cfq_clear_cfqq_wait_busy(cfqq);
  1752. /*
  1753. * If this cfqq is shared between multiple processes, check to
  1754. * make sure that those processes are still issuing I/Os within
  1755. * the mean seek distance. If not, it may be time to break the
  1756. * queues apart again.
  1757. */
  1758. if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
  1759. cfq_mark_cfqq_split_coop(cfqq);
  1760. /*
  1761. * store what was left of this slice, if the queue idled/timed out
  1762. */
  1763. if (timed_out) {
  1764. if (cfq_cfqq_slice_new(cfqq))
  1765. cfqq->slice_resid = cfq_scaled_cfqq_slice(cfqd, cfqq);
  1766. else
  1767. cfqq->slice_resid = cfqq->slice_end - jiffies;
  1768. cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
  1769. }
  1770. cfq_group_served(cfqd, cfqq->cfqg, cfqq);
  1771. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  1772. cfq_del_cfqq_rr(cfqd, cfqq);
  1773. cfq_resort_rr_list(cfqd, cfqq);
  1774. if (cfqq == cfqd->active_queue)
  1775. cfqd->active_queue = NULL;
  1776. if (cfqd->active_cic) {
  1777. put_io_context(cfqd->active_cic->icq.ioc);
  1778. cfqd->active_cic = NULL;
  1779. }
  1780. }
  1781. static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
  1782. {
  1783. struct cfq_queue *cfqq = cfqd->active_queue;
  1784. if (cfqq)
  1785. __cfq_slice_expired(cfqd, cfqq, timed_out);
  1786. }
  1787. /*
  1788. * Get next queue for service. Unless we have a queue preemption,
  1789. * we'll simply select the first cfqq in the service tree.
  1790. */
  1791. static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
  1792. {
  1793. struct cfq_rb_root *service_tree =
  1794. service_tree_for(cfqd->serving_group, cfqd->serving_prio,
  1795. cfqd->serving_type);
  1796. if (!cfqd->rq_queued)
  1797. return NULL;
  1798. /* There is nothing to dispatch */
  1799. if (!service_tree)
  1800. return NULL;
  1801. if (RB_EMPTY_ROOT(&service_tree->rb))
  1802. return NULL;
  1803. return cfq_rb_first(service_tree);
  1804. }
  1805. static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
  1806. {
  1807. struct cfq_group *cfqg;
  1808. struct cfq_queue *cfqq;
  1809. int i, j;
  1810. struct cfq_rb_root *st;
  1811. if (!cfqd->rq_queued)
  1812. return NULL;
  1813. cfqg = cfq_get_next_cfqg(cfqd);
  1814. if (!cfqg)
  1815. return NULL;
  1816. for_each_cfqg_st(cfqg, i, j, st)
  1817. if ((cfqq = cfq_rb_first(st)) != NULL)
  1818. return cfqq;
  1819. return NULL;
  1820. }
  1821. /*
  1822. * Get and set a new active queue for service.
  1823. */
  1824. static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
  1825. struct cfq_queue *cfqq)
  1826. {
  1827. if (!cfqq)
  1828. cfqq = cfq_get_next_queue(cfqd);
  1829. __cfq_set_active_queue(cfqd, cfqq);
  1830. return cfqq;
  1831. }
  1832. static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
  1833. struct request *rq)
  1834. {
  1835. if (blk_rq_pos(rq) >= cfqd->last_position)
  1836. return blk_rq_pos(rq) - cfqd->last_position;
  1837. else
  1838. return cfqd->last_position - blk_rq_pos(rq);
  1839. }
  1840. static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1841. struct request *rq)
  1842. {
  1843. return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
  1844. }
  1845. static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  1846. struct cfq_queue *cur_cfqq)
  1847. {
  1848. struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
  1849. struct rb_node *parent, *node;
  1850. struct cfq_queue *__cfqq;
  1851. sector_t sector = cfqd->last_position;
  1852. if (RB_EMPTY_ROOT(root))
  1853. return NULL;
  1854. /*
  1855. * First, if we find a request starting at the end of the last
  1856. * request, choose it.
  1857. */
  1858. __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
  1859. if (__cfqq)
  1860. return __cfqq;
  1861. /*
  1862. * If the exact sector wasn't found, the parent of the NULL leaf
  1863. * will contain the closest sector.
  1864. */
  1865. __cfqq = rb_entry(parent, struct cfq_queue, p_node);
  1866. if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
  1867. return __cfqq;
  1868. if (blk_rq_pos(__cfqq->next_rq) < sector)
  1869. node = rb_next(&__cfqq->p_node);
  1870. else
  1871. node = rb_prev(&__cfqq->p_node);
  1872. if (!node)
  1873. return NULL;
  1874. __cfqq = rb_entry(node, struct cfq_queue, p_node);
  1875. if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
  1876. return __cfqq;
  1877. return NULL;
  1878. }
  1879. /*
  1880. * cfqd - obvious
  1881. * cur_cfqq - passed in so that we don't decide that the current queue is
  1882. * closely cooperating with itself.
  1883. *
  1884. * So, basically we're assuming that that cur_cfqq has dispatched at least
  1885. * one request, and that cfqd->last_position reflects a position on the disk
  1886. * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
  1887. * assumption.
  1888. */
  1889. static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
  1890. struct cfq_queue *cur_cfqq)
  1891. {
  1892. struct cfq_queue *cfqq;
  1893. if (cfq_class_idle(cur_cfqq))
  1894. return NULL;
  1895. if (!cfq_cfqq_sync(cur_cfqq))
  1896. return NULL;
  1897. if (CFQQ_SEEKY(cur_cfqq))
  1898. return NULL;
  1899. /*
  1900. * Don't search priority tree if it's the only queue in the group.
  1901. */
  1902. if (cur_cfqq->cfqg->nr_cfqq == 1)
  1903. return NULL;
  1904. /*
  1905. * We should notice if some of the queues are cooperating, eg
  1906. * working closely on the same area of the disk. In that case,
  1907. * we can group them together and don't waste time idling.
  1908. */
  1909. cfqq = cfqq_close(cfqd, cur_cfqq);
  1910. if (!cfqq)
  1911. return NULL;
  1912. /* If new queue belongs to different cfq_group, don't choose it */
  1913. if (cur_cfqq->cfqg != cfqq->cfqg)
  1914. return NULL;
  1915. /*
  1916. * It only makes sense to merge sync queues.
  1917. */
  1918. if (!cfq_cfqq_sync(cfqq))
  1919. return NULL;
  1920. if (CFQQ_SEEKY(cfqq))
  1921. return NULL;
  1922. /*
  1923. * Do not merge queues of different priority classes
  1924. */
  1925. if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
  1926. return NULL;
  1927. return cfqq;
  1928. }
  1929. /*
  1930. * Determine whether we should enforce idle window for this queue.
  1931. */
  1932. static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1933. {
  1934. enum wl_prio_t prio = cfqq_prio(cfqq);
  1935. struct cfq_rb_root *service_tree = cfqq->service_tree;
  1936. BUG_ON(!service_tree);
  1937. BUG_ON(!service_tree->count);
  1938. if (!cfqd->cfq_slice_idle)
  1939. return false;
  1940. /* We never do for idle class queues. */
  1941. if (prio == IDLE_WORKLOAD)
  1942. return false;
  1943. /* We do for queues that were marked with idle window flag. */
  1944. if (cfq_cfqq_idle_window(cfqq) &&
  1945. !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
  1946. return true;
  1947. /*
  1948. * Otherwise, we do only if they are the last ones
  1949. * in their service tree.
  1950. */
  1951. if (service_tree->count == 1 && cfq_cfqq_sync(cfqq) &&
  1952. !cfq_io_thinktime_big(cfqd, &service_tree->ttime, false))
  1953. return true;
  1954. cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
  1955. service_tree->count);
  1956. return false;
  1957. }
  1958. static void cfq_arm_slice_timer(struct cfq_data *cfqd)
  1959. {
  1960. struct cfq_queue *cfqq = cfqd->active_queue;
  1961. struct cfq_io_cq *cic;
  1962. unsigned long sl, group_idle = 0;
  1963. /*
  1964. * SSD device without seek penalty, disable idling. But only do so
  1965. * for devices that support queuing, otherwise we still have a problem
  1966. * with sync vs async workloads.
  1967. */
  1968. if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
  1969. return;
  1970. WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
  1971. WARN_ON(cfq_cfqq_slice_new(cfqq));
  1972. /*
  1973. * idle is disabled, either manually or by past process history
  1974. */
  1975. if (!cfq_should_idle(cfqd, cfqq)) {
  1976. /* no queue idling. Check for group idling */
  1977. if (cfqd->cfq_group_idle)
  1978. group_idle = cfqd->cfq_group_idle;
  1979. else
  1980. return;
  1981. }
  1982. /*
  1983. * still active requests from this queue, don't idle
  1984. */
  1985. if (cfqq->dispatched)
  1986. return;
  1987. /*
  1988. * task has exited, don't wait
  1989. */
  1990. cic = cfqd->active_cic;
  1991. if (!cic || !atomic_read(&cic->icq.ioc->active_ref))
  1992. return;
  1993. /*
  1994. * If our average think time is larger than the remaining time
  1995. * slice, then don't idle. This avoids overrunning the allotted
  1996. * time slice.
  1997. */
  1998. if (sample_valid(cic->ttime.ttime_samples) &&
  1999. (cfqq->slice_end - jiffies < cic->ttime.ttime_mean)) {
  2000. cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu",
  2001. cic->ttime.ttime_mean);
  2002. return;
  2003. }
  2004. /* There are other queues in the group, don't do group idle */
  2005. if (group_idle && cfqq->cfqg->nr_cfqq > 1)
  2006. return;
  2007. cfq_mark_cfqq_wait_request(cfqq);
  2008. if (group_idle)
  2009. sl = cfqd->cfq_group_idle;
  2010. else
  2011. sl = cfqd->cfq_slice_idle;
  2012. mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
  2013. cfqg_stats_set_start_idle_time(cfqq->cfqg);
  2014. cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu group_idle: %d", sl,
  2015. group_idle ? 1 : 0);
  2016. }
  2017. /*
  2018. * Move request from internal lists to the request queue dispatch list.
  2019. */
  2020. static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
  2021. {
  2022. struct cfq_data *cfqd = q->elevator->elevator_data;
  2023. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2024. cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
  2025. cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
  2026. cfq_remove_request(rq);
  2027. cfqq->dispatched++;
  2028. (RQ_CFQG(rq))->dispatched++;
  2029. elv_dispatch_sort(q, rq);
  2030. cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++;
  2031. cfqq->nr_sectors += blk_rq_sectors(rq);
  2032. cfqg_stats_update_dispatch(cfqq->cfqg, blk_rq_bytes(rq), rq->cmd_flags);
  2033. }
  2034. /*
  2035. * return expired entry, or NULL to just start from scratch in rbtree
  2036. */
  2037. static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
  2038. {
  2039. struct request *rq = NULL;
  2040. if (cfq_cfqq_fifo_expire(cfqq))
  2041. return NULL;
  2042. cfq_mark_cfqq_fifo_expire(cfqq);
  2043. if (list_empty(&cfqq->fifo))
  2044. return NULL;
  2045. rq = rq_entry_fifo(cfqq->fifo.next);
  2046. if (time_before(jiffies, rq_fifo_time(rq)))
  2047. rq = NULL;
  2048. cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
  2049. return rq;
  2050. }
  2051. static inline int
  2052. cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2053. {
  2054. const int base_rq = cfqd->cfq_slice_async_rq;
  2055. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  2056. return 2 * base_rq * (IOPRIO_BE_NR - cfqq->ioprio);
  2057. }
  2058. /*
  2059. * Must be called with the queue_lock held.
  2060. */
  2061. static int cfqq_process_refs(struct cfq_queue *cfqq)
  2062. {
  2063. int process_refs, io_refs;
  2064. io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
  2065. process_refs = cfqq->ref - io_refs;
  2066. BUG_ON(process_refs < 0);
  2067. return process_refs;
  2068. }
  2069. static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
  2070. {
  2071. int process_refs, new_process_refs;
  2072. struct cfq_queue *__cfqq;
  2073. /*
  2074. * If there are no process references on the new_cfqq, then it is
  2075. * unsafe to follow the ->new_cfqq chain as other cfqq's in the
  2076. * chain may have dropped their last reference (not just their
  2077. * last process reference).
  2078. */
  2079. if (!cfqq_process_refs(new_cfqq))
  2080. return;
  2081. /* Avoid a circular list and skip interim queue merges */
  2082. while ((__cfqq = new_cfqq->new_cfqq)) {
  2083. if (__cfqq == cfqq)
  2084. return;
  2085. new_cfqq = __cfqq;
  2086. }
  2087. process_refs = cfqq_process_refs(cfqq);
  2088. new_process_refs = cfqq_process_refs(new_cfqq);
  2089. /*
  2090. * If the process for the cfqq has gone away, there is no
  2091. * sense in merging the queues.
  2092. */
  2093. if (process_refs == 0 || new_process_refs == 0)
  2094. return;
  2095. /*
  2096. * Merge in the direction of the lesser amount of work.
  2097. */
  2098. if (new_process_refs >= process_refs) {
  2099. cfqq->new_cfqq = new_cfqq;
  2100. new_cfqq->ref += process_refs;
  2101. } else {
  2102. new_cfqq->new_cfqq = cfqq;
  2103. cfqq->ref += new_process_refs;
  2104. }
  2105. }
  2106. static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
  2107. struct cfq_group *cfqg, enum wl_prio_t prio)
  2108. {
  2109. struct cfq_queue *queue;
  2110. int i;
  2111. bool key_valid = false;
  2112. unsigned long lowest_key = 0;
  2113. enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
  2114. for (i = 0; i <= SYNC_WORKLOAD; ++i) {
  2115. /* select the one with lowest rb_key */
  2116. queue = cfq_rb_first(service_tree_for(cfqg, prio, i));
  2117. if (queue &&
  2118. (!key_valid || time_before(queue->rb_key, lowest_key))) {
  2119. lowest_key = queue->rb_key;
  2120. cur_best = i;
  2121. key_valid = true;
  2122. }
  2123. }
  2124. return cur_best;
  2125. }
  2126. static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
  2127. {
  2128. unsigned slice;
  2129. unsigned count;
  2130. struct cfq_rb_root *st;
  2131. unsigned group_slice;
  2132. enum wl_prio_t original_prio = cfqd->serving_prio;
  2133. /* Choose next priority. RT > BE > IDLE */
  2134. if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
  2135. cfqd->serving_prio = RT_WORKLOAD;
  2136. else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
  2137. cfqd->serving_prio = BE_WORKLOAD;
  2138. else {
  2139. cfqd->serving_prio = IDLE_WORKLOAD;
  2140. cfqd->workload_expires = jiffies + 1;
  2141. return;
  2142. }
  2143. if (original_prio != cfqd->serving_prio)
  2144. goto new_workload;
  2145. /*
  2146. * For RT and BE, we have to choose also the type
  2147. * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
  2148. * expiration time
  2149. */
  2150. st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
  2151. count = st->count;
  2152. /*
  2153. * check workload expiration, and that we still have other queues ready
  2154. */
  2155. if (count && !time_after(jiffies, cfqd->workload_expires))
  2156. return;
  2157. new_workload:
  2158. /* otherwise select new workload type */
  2159. cfqd->serving_type =
  2160. cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio);
  2161. st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type);
  2162. count = st->count;
  2163. /*
  2164. * the workload slice is computed as a fraction of target latency
  2165. * proportional to the number of queues in that workload, over
  2166. * all the queues in the same priority class
  2167. */
  2168. group_slice = cfq_group_slice(cfqd, cfqg);
  2169. slice = group_slice * count /
  2170. max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
  2171. cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
  2172. if (cfqd->serving_type == ASYNC_WORKLOAD) {
  2173. unsigned int tmp;
  2174. /*
  2175. * Async queues are currently system wide. Just taking
  2176. * proportion of queues with-in same group will lead to higher
  2177. * async ratio system wide as generally root group is going
  2178. * to have higher weight. A more accurate thing would be to
  2179. * calculate system wide asnc/sync ratio.
  2180. */
  2181. tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
  2182. tmp = tmp/cfqd->busy_queues;
  2183. slice = min_t(unsigned, slice, tmp);
  2184. /* async workload slice is scaled down according to
  2185. * the sync/async slice ratio. */
  2186. slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
  2187. } else
  2188. /* sync workload slice is at least 2 * cfq_slice_idle */
  2189. slice = max(slice, 2 * cfqd->cfq_slice_idle);
  2190. slice = max_t(unsigned, slice, CFQ_MIN_TT);
  2191. cfq_log(cfqd, "workload slice:%d", slice);
  2192. cfqd->workload_expires = jiffies + slice;
  2193. }
  2194. static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
  2195. {
  2196. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  2197. struct cfq_group *cfqg;
  2198. if (RB_EMPTY_ROOT(&st->rb))
  2199. return NULL;
  2200. cfqg = cfq_rb_first_group(st);
  2201. update_min_vdisktime(st);
  2202. return cfqg;
  2203. }
  2204. static void cfq_choose_cfqg(struct cfq_data *cfqd)
  2205. {
  2206. struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
  2207. cfqd->serving_group = cfqg;
  2208. /* Restore the workload type data */
  2209. if (cfqg->saved_workload_slice) {
  2210. cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
  2211. cfqd->serving_type = cfqg->saved_workload;
  2212. cfqd->serving_prio = cfqg->saved_serving_prio;
  2213. } else
  2214. cfqd->workload_expires = jiffies - 1;
  2215. choose_service_tree(cfqd, cfqg);
  2216. }
  2217. /*
  2218. * Select a queue for service. If we have a current active queue,
  2219. * check whether to continue servicing it, or retrieve and set a new one.
  2220. */
  2221. static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
  2222. {
  2223. struct cfq_queue *cfqq, *new_cfqq = NULL;
  2224. cfqq = cfqd->active_queue;
  2225. if (!cfqq)
  2226. goto new_queue;
  2227. if (!cfqd->rq_queued)
  2228. return NULL;
  2229. /*
  2230. * We were waiting for group to get backlogged. Expire the queue
  2231. */
  2232. if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
  2233. goto expire;
  2234. /*
  2235. * The active queue has run out of time, expire it and select new.
  2236. */
  2237. if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
  2238. /*
  2239. * If slice had not expired at the completion of last request
  2240. * we might not have turned on wait_busy flag. Don't expire
  2241. * the queue yet. Allow the group to get backlogged.
  2242. *
  2243. * The very fact that we have used the slice, that means we
  2244. * have been idling all along on this queue and it should be
  2245. * ok to wait for this request to complete.
  2246. */
  2247. if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
  2248. && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  2249. cfqq = NULL;
  2250. goto keep_queue;
  2251. } else
  2252. goto check_group_idle;
  2253. }
  2254. /*
  2255. * The active queue has requests and isn't expired, allow it to
  2256. * dispatch.
  2257. */
  2258. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  2259. goto keep_queue;
  2260. /*
  2261. * If another queue has a request waiting within our mean seek
  2262. * distance, let it run. The expire code will check for close
  2263. * cooperators and put the close queue at the front of the service
  2264. * tree. If possible, merge the expiring queue with the new cfqq.
  2265. */
  2266. new_cfqq = cfq_close_cooperator(cfqd, cfqq);
  2267. if (new_cfqq) {
  2268. if (!cfqq->new_cfqq)
  2269. cfq_setup_merge(cfqq, new_cfqq);
  2270. goto expire;
  2271. }
  2272. /*
  2273. * No requests pending. If the active queue still has requests in
  2274. * flight or is idling for a new request, allow either of these
  2275. * conditions to happen (or time out) before selecting a new queue.
  2276. */
  2277. if (timer_pending(&cfqd->idle_slice_timer)) {
  2278. cfqq = NULL;
  2279. goto keep_queue;
  2280. }
  2281. /*
  2282. * This is a deep seek queue, but the device is much faster than
  2283. * the queue can deliver, don't idle
  2284. **/
  2285. if (CFQQ_SEEKY(cfqq) && cfq_cfqq_idle_window(cfqq) &&
  2286. (cfq_cfqq_slice_new(cfqq) ||
  2287. (cfqq->slice_end - jiffies > jiffies - cfqq->slice_start))) {
  2288. cfq_clear_cfqq_deep(cfqq);
  2289. cfq_clear_cfqq_idle_window(cfqq);
  2290. }
  2291. if (cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  2292. cfqq = NULL;
  2293. goto keep_queue;
  2294. }
  2295. /*
  2296. * If group idle is enabled and there are requests dispatched from
  2297. * this group, wait for requests to complete.
  2298. */
  2299. check_group_idle:
  2300. if (cfqd->cfq_group_idle && cfqq->cfqg->nr_cfqq == 1 &&
  2301. cfqq->cfqg->dispatched &&
  2302. !cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true)) {
  2303. cfqq = NULL;
  2304. goto keep_queue;
  2305. }
  2306. expire:
  2307. cfq_slice_expired(cfqd, 0);
  2308. new_queue:
  2309. /*
  2310. * Current queue expired. Check if we have to switch to a new
  2311. * service tree
  2312. */
  2313. if (!new_cfqq)
  2314. cfq_choose_cfqg(cfqd);
  2315. cfqq = cfq_set_active_queue(cfqd, new_cfqq);
  2316. keep_queue:
  2317. return cfqq;
  2318. }
  2319. static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
  2320. {
  2321. int dispatched = 0;
  2322. while (cfqq->next_rq) {
  2323. cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  2324. dispatched++;
  2325. }
  2326. BUG_ON(!list_empty(&cfqq->fifo));
  2327. /* By default cfqq is not expired if it is empty. Do it explicitly */
  2328. __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
  2329. return dispatched;
  2330. }
  2331. /*
  2332. * Drain our current requests. Used for barriers and when switching
  2333. * io schedulers on-the-fly.
  2334. */
  2335. static int cfq_forced_dispatch(struct cfq_data *cfqd)
  2336. {
  2337. struct cfq_queue *cfqq;
  2338. int dispatched = 0;
  2339. /* Expire the timeslice of the current active queue first */
  2340. cfq_slice_expired(cfqd, 0);
  2341. while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
  2342. __cfq_set_active_queue(cfqd, cfqq);
  2343. dispatched += __cfq_forced_dispatch_cfqq(cfqq);
  2344. }
  2345. BUG_ON(cfqd->busy_queues);
  2346. cfq_log(cfqd, "forced_dispatch=%d", dispatched);
  2347. return dispatched;
  2348. }
  2349. static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
  2350. struct cfq_queue *cfqq)
  2351. {
  2352. /* the queue hasn't finished any request, can't estimate */
  2353. if (cfq_cfqq_slice_new(cfqq))
  2354. return true;
  2355. if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched,
  2356. cfqq->slice_end))
  2357. return true;
  2358. return false;
  2359. }
  2360. static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2361. {
  2362. unsigned int max_dispatch;
  2363. /*
  2364. * Drain async requests before we start sync IO
  2365. */
  2366. if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC])
  2367. return false;
  2368. /*
  2369. * If this is an async queue and we have sync IO in flight, let it wait
  2370. */
  2371. if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
  2372. return false;
  2373. max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
  2374. if (cfq_class_idle(cfqq))
  2375. max_dispatch = 1;
  2376. /*
  2377. * Does this cfqq already have too much IO in flight?
  2378. */
  2379. if (cfqq->dispatched >= max_dispatch) {
  2380. bool promote_sync = false;
  2381. /*
  2382. * idle queue must always only have a single IO in flight
  2383. */
  2384. if (cfq_class_idle(cfqq))
  2385. return false;
  2386. /*
  2387. * If there is only one sync queue
  2388. * we can ignore async queue here and give the sync
  2389. * queue no dispatch limit. The reason is a sync queue can
  2390. * preempt async queue, limiting the sync queue doesn't make
  2391. * sense. This is useful for aiostress test.
  2392. */
  2393. if (cfq_cfqq_sync(cfqq) && cfqd->busy_sync_queues == 1)
  2394. promote_sync = true;
  2395. /*
  2396. * We have other queues, don't allow more IO from this one
  2397. */
  2398. if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq) &&
  2399. !promote_sync)
  2400. return false;
  2401. /*
  2402. * Sole queue user, no limit
  2403. */
  2404. if (cfqd->busy_queues == 1 || promote_sync)
  2405. max_dispatch = -1;
  2406. else
  2407. /*
  2408. * Normally we start throttling cfqq when cfq_quantum/2
  2409. * requests have been dispatched. But we can drive
  2410. * deeper queue depths at the beginning of slice
  2411. * subjected to upper limit of cfq_quantum.
  2412. * */
  2413. max_dispatch = cfqd->cfq_quantum;
  2414. }
  2415. /*
  2416. * Async queues must wait a bit before being allowed dispatch.
  2417. * We also ramp up the dispatch depth gradually for async IO,
  2418. * based on the last sync IO we serviced
  2419. */
  2420. if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
  2421. unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
  2422. unsigned int depth;
  2423. depth = last_sync / cfqd->cfq_slice[1];
  2424. if (!depth && !cfqq->dispatched)
  2425. depth = 1;
  2426. if (depth < max_dispatch)
  2427. max_dispatch = depth;
  2428. }
  2429. /*
  2430. * If we're below the current max, allow a dispatch
  2431. */
  2432. return cfqq->dispatched < max_dispatch;
  2433. }
  2434. /*
  2435. * Dispatch a request from cfqq, moving them to the request queue
  2436. * dispatch list.
  2437. */
  2438. static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2439. {
  2440. struct request *rq;
  2441. BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  2442. if (!cfq_may_dispatch(cfqd, cfqq))
  2443. return false;
  2444. /*
  2445. * follow expired path, else get first next available
  2446. */
  2447. rq = cfq_check_fifo(cfqq);
  2448. if (!rq)
  2449. rq = cfqq->next_rq;
  2450. /*
  2451. * insert request into driver dispatch list
  2452. */
  2453. cfq_dispatch_insert(cfqd->queue, rq);
  2454. if (!cfqd->active_cic) {
  2455. struct cfq_io_cq *cic = RQ_CIC(rq);
  2456. atomic_long_inc(&cic->icq.ioc->refcount);
  2457. cfqd->active_cic = cic;
  2458. }
  2459. return true;
  2460. }
  2461. /*
  2462. * Find the cfqq that we need to service and move a request from that to the
  2463. * dispatch list
  2464. */
  2465. static int cfq_dispatch_requests(struct request_queue *q, int force)
  2466. {
  2467. struct cfq_data *cfqd = q->elevator->elevator_data;
  2468. struct cfq_queue *cfqq;
  2469. if (!cfqd->busy_queues)
  2470. return 0;
  2471. if (unlikely(force))
  2472. return cfq_forced_dispatch(cfqd);
  2473. cfqq = cfq_select_queue(cfqd);
  2474. if (!cfqq)
  2475. return 0;
  2476. /*
  2477. * Dispatch a request from this cfqq, if it is allowed
  2478. */
  2479. if (!cfq_dispatch_request(cfqd, cfqq))
  2480. return 0;
  2481. cfqq->slice_dispatch++;
  2482. cfq_clear_cfqq_must_dispatch(cfqq);
  2483. /*
  2484. * expire an async queue immediately if it has used up its slice. idle
  2485. * queue always expire after 1 dispatch round.
  2486. */
  2487. if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
  2488. cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  2489. cfq_class_idle(cfqq))) {
  2490. cfqq->slice_end = jiffies + 1;
  2491. cfq_slice_expired(cfqd, 0);
  2492. }
  2493. cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
  2494. return 1;
  2495. }
  2496. /*
  2497. * task holds one reference to the queue, dropped when task exits. each rq
  2498. * in-flight on this queue also holds a reference, dropped when rq is freed.
  2499. *
  2500. * Each cfq queue took a reference on the parent group. Drop it now.
  2501. * queue lock must be held here.
  2502. */
  2503. static void cfq_put_queue(struct cfq_queue *cfqq)
  2504. {
  2505. struct cfq_data *cfqd = cfqq->cfqd;
  2506. struct cfq_group *cfqg;
  2507. BUG_ON(cfqq->ref <= 0);
  2508. cfqq->ref--;
  2509. if (cfqq->ref)
  2510. return;
  2511. cfq_log_cfqq(cfqd, cfqq, "put_queue");
  2512. BUG_ON(rb_first(&cfqq->sort_list));
  2513. BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
  2514. cfqg = cfqq->cfqg;
  2515. if (unlikely(cfqd->active_queue == cfqq)) {
  2516. __cfq_slice_expired(cfqd, cfqq, 0);
  2517. cfq_schedule_dispatch(cfqd);
  2518. }
  2519. BUG_ON(cfq_cfqq_on_rr(cfqq));
  2520. kmem_cache_free(cfq_pool, cfqq);
  2521. cfqg_put(cfqg);
  2522. }
  2523. static void cfq_put_cooperator(struct cfq_queue *cfqq)
  2524. {
  2525. struct cfq_queue *__cfqq, *next;
  2526. /*
  2527. * If this queue was scheduled to merge with another queue, be
  2528. * sure to drop the reference taken on that queue (and others in
  2529. * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
  2530. */
  2531. __cfqq = cfqq->new_cfqq;
  2532. while (__cfqq) {
  2533. if (__cfqq == cfqq) {
  2534. WARN(1, "cfqq->new_cfqq loop detected\n");
  2535. break;
  2536. }
  2537. next = __cfqq->new_cfqq;
  2538. cfq_put_queue(__cfqq);
  2539. __cfqq = next;
  2540. }
  2541. }
  2542. static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2543. {
  2544. if (unlikely(cfqq == cfqd->active_queue)) {
  2545. __cfq_slice_expired(cfqd, cfqq, 0);
  2546. cfq_schedule_dispatch(cfqd);
  2547. }
  2548. cfq_put_cooperator(cfqq);
  2549. cfq_put_queue(cfqq);
  2550. }
  2551. static void cfq_init_icq(struct io_cq *icq)
  2552. {
  2553. struct cfq_io_cq *cic = icq_to_cic(icq);
  2554. cic->ttime.last_end_request = jiffies;
  2555. }
  2556. static void cfq_exit_icq(struct io_cq *icq)
  2557. {
  2558. struct cfq_io_cq *cic = icq_to_cic(icq);
  2559. struct cfq_data *cfqd = cic_to_cfqd(cic);
  2560. if (cic->cfqq[BLK_RW_ASYNC]) {
  2561. cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
  2562. cic->cfqq[BLK_RW_ASYNC] = NULL;
  2563. }
  2564. if (cic->cfqq[BLK_RW_SYNC]) {
  2565. cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
  2566. cic->cfqq[BLK_RW_SYNC] = NULL;
  2567. }
  2568. }
  2569. static void cfq_init_prio_data(struct cfq_queue *cfqq, struct cfq_io_cq *cic)
  2570. {
  2571. struct task_struct *tsk = current;
  2572. int ioprio_class;
  2573. if (!cfq_cfqq_prio_changed(cfqq))
  2574. return;
  2575. ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
  2576. switch (ioprio_class) {
  2577. default:
  2578. printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
  2579. case IOPRIO_CLASS_NONE:
  2580. /*
  2581. * no prio set, inherit CPU scheduling settings
  2582. */
  2583. cfqq->ioprio = task_nice_ioprio(tsk);
  2584. cfqq->ioprio_class = task_nice_ioclass(tsk);
  2585. break;
  2586. case IOPRIO_CLASS_RT:
  2587. cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
  2588. cfqq->ioprio_class = IOPRIO_CLASS_RT;
  2589. break;
  2590. case IOPRIO_CLASS_BE:
  2591. cfqq->ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
  2592. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  2593. break;
  2594. case IOPRIO_CLASS_IDLE:
  2595. cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  2596. cfqq->ioprio = 7;
  2597. cfq_clear_cfqq_idle_window(cfqq);
  2598. break;
  2599. }
  2600. /*
  2601. * keep track of original prio settings in case we have to temporarily
  2602. * elevate the priority of this queue
  2603. */
  2604. cfqq->org_ioprio = cfqq->ioprio;
  2605. cfq_clear_cfqq_prio_changed(cfqq);
  2606. }
  2607. static void check_ioprio_changed(struct cfq_io_cq *cic, struct bio *bio)
  2608. {
  2609. int ioprio = cic->icq.ioc->ioprio;
  2610. struct cfq_data *cfqd = cic_to_cfqd(cic);
  2611. struct cfq_queue *cfqq;
  2612. /*
  2613. * Check whether ioprio has changed. The condition may trigger
  2614. * spuriously on a newly created cic but there's no harm.
  2615. */
  2616. if (unlikely(!cfqd) || likely(cic->ioprio == ioprio))
  2617. return;
  2618. cfqq = cic->cfqq[BLK_RW_ASYNC];
  2619. if (cfqq) {
  2620. struct cfq_queue *new_cfqq;
  2621. new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic, bio,
  2622. GFP_ATOMIC);
  2623. if (new_cfqq) {
  2624. cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
  2625. cfq_put_queue(cfqq);
  2626. }
  2627. }
  2628. cfqq = cic->cfqq[BLK_RW_SYNC];
  2629. if (cfqq)
  2630. cfq_mark_cfqq_prio_changed(cfqq);
  2631. cic->ioprio = ioprio;
  2632. }
  2633. static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2634. pid_t pid, bool is_sync)
  2635. {
  2636. RB_CLEAR_NODE(&cfqq->rb_node);
  2637. RB_CLEAR_NODE(&cfqq->p_node);
  2638. INIT_LIST_HEAD(&cfqq->fifo);
  2639. cfqq->ref = 0;
  2640. cfqq->cfqd = cfqd;
  2641. cfq_mark_cfqq_prio_changed(cfqq);
  2642. if (is_sync) {
  2643. if (!cfq_class_idle(cfqq))
  2644. cfq_mark_cfqq_idle_window(cfqq);
  2645. cfq_mark_cfqq_sync(cfqq);
  2646. }
  2647. cfqq->pid = pid;
  2648. }
  2649. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2650. static void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio)
  2651. {
  2652. struct cfq_data *cfqd = cic_to_cfqd(cic);
  2653. struct cfq_queue *sync_cfqq;
  2654. uint64_t id;
  2655. rcu_read_lock();
  2656. id = bio_blkio_cgroup(bio)->id;
  2657. rcu_read_unlock();
  2658. /*
  2659. * Check whether blkcg has changed. The condition may trigger
  2660. * spuriously on a newly created cic but there's no harm.
  2661. */
  2662. if (unlikely(!cfqd) || likely(cic->blkcg_id == id))
  2663. return;
  2664. sync_cfqq = cic_to_cfqq(cic, 1);
  2665. if (sync_cfqq) {
  2666. /*
  2667. * Drop reference to sync queue. A new sync queue will be
  2668. * assigned in new group upon arrival of a fresh request.
  2669. */
  2670. cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
  2671. cic_set_cfqq(cic, NULL, 1);
  2672. cfq_put_queue(sync_cfqq);
  2673. }
  2674. cic->blkcg_id = id;
  2675. }
  2676. #else
  2677. static inline void check_blkcg_changed(struct cfq_io_cq *cic, struct bio *bio) { }
  2678. #endif /* CONFIG_CFQ_GROUP_IOSCHED */
  2679. static struct cfq_queue *
  2680. cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
  2681. struct bio *bio, gfp_t gfp_mask)
  2682. {
  2683. struct blkio_cgroup *blkcg;
  2684. struct cfq_queue *cfqq, *new_cfqq = NULL;
  2685. struct cfq_group *cfqg;
  2686. retry:
  2687. rcu_read_lock();
  2688. blkcg = bio_blkio_cgroup(bio);
  2689. cfqg = cfq_lookup_create_cfqg(cfqd, blkcg);
  2690. cfqq = cic_to_cfqq(cic, is_sync);
  2691. /*
  2692. * Always try a new alloc if we fell back to the OOM cfqq
  2693. * originally, since it should just be a temporary situation.
  2694. */
  2695. if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  2696. cfqq = NULL;
  2697. if (new_cfqq) {
  2698. cfqq = new_cfqq;
  2699. new_cfqq = NULL;
  2700. } else if (gfp_mask & __GFP_WAIT) {
  2701. rcu_read_unlock();
  2702. spin_unlock_irq(cfqd->queue->queue_lock);
  2703. new_cfqq = kmem_cache_alloc_node(cfq_pool,
  2704. gfp_mask | __GFP_ZERO,
  2705. cfqd->queue->node);
  2706. spin_lock_irq(cfqd->queue->queue_lock);
  2707. if (new_cfqq)
  2708. goto retry;
  2709. } else {
  2710. cfqq = kmem_cache_alloc_node(cfq_pool,
  2711. gfp_mask | __GFP_ZERO,
  2712. cfqd->queue->node);
  2713. }
  2714. if (cfqq) {
  2715. cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
  2716. cfq_init_prio_data(cfqq, cic);
  2717. cfq_link_cfqq_cfqg(cfqq, cfqg);
  2718. cfq_log_cfqq(cfqd, cfqq, "alloced");
  2719. } else
  2720. cfqq = &cfqd->oom_cfqq;
  2721. }
  2722. if (new_cfqq)
  2723. kmem_cache_free(cfq_pool, new_cfqq);
  2724. rcu_read_unlock();
  2725. return cfqq;
  2726. }
  2727. static struct cfq_queue **
  2728. cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
  2729. {
  2730. switch (ioprio_class) {
  2731. case IOPRIO_CLASS_RT:
  2732. return &cfqd->async_cfqq[0][ioprio];
  2733. case IOPRIO_CLASS_NONE:
  2734. ioprio = IOPRIO_NORM;
  2735. /* fall through */
  2736. case IOPRIO_CLASS_BE:
  2737. return &cfqd->async_cfqq[1][ioprio];
  2738. case IOPRIO_CLASS_IDLE:
  2739. return &cfqd->async_idle_cfqq;
  2740. default:
  2741. BUG();
  2742. }
  2743. }
  2744. static struct cfq_queue *
  2745. cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
  2746. struct bio *bio, gfp_t gfp_mask)
  2747. {
  2748. const int ioprio_class = IOPRIO_PRIO_CLASS(cic->ioprio);
  2749. const int ioprio = IOPRIO_PRIO_DATA(cic->ioprio);
  2750. struct cfq_queue **async_cfqq = NULL;
  2751. struct cfq_queue *cfqq = NULL;
  2752. if (!is_sync) {
  2753. async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
  2754. cfqq = *async_cfqq;
  2755. }
  2756. if (!cfqq)
  2757. cfqq = cfq_find_alloc_queue(cfqd, is_sync, cic, bio, gfp_mask);
  2758. /*
  2759. * pin the queue now that it's allocated, scheduler exit will prune it
  2760. */
  2761. if (!is_sync && !(*async_cfqq)) {
  2762. cfqq->ref++;
  2763. *async_cfqq = cfqq;
  2764. }
  2765. cfqq->ref++;
  2766. return cfqq;
  2767. }
  2768. static void
  2769. __cfq_update_io_thinktime(struct cfq_ttime *ttime, unsigned long slice_idle)
  2770. {
  2771. unsigned long elapsed = jiffies - ttime->last_end_request;
  2772. elapsed = min(elapsed, 2UL * slice_idle);
  2773. ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8;
  2774. ttime->ttime_total = (7*ttime->ttime_total + 256*elapsed) / 8;
  2775. ttime->ttime_mean = (ttime->ttime_total + 128) / ttime->ttime_samples;
  2776. }
  2777. static void
  2778. cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2779. struct cfq_io_cq *cic)
  2780. {
  2781. if (cfq_cfqq_sync(cfqq)) {
  2782. __cfq_update_io_thinktime(&cic->ttime, cfqd->cfq_slice_idle);
  2783. __cfq_update_io_thinktime(&cfqq->service_tree->ttime,
  2784. cfqd->cfq_slice_idle);
  2785. }
  2786. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2787. __cfq_update_io_thinktime(&cfqq->cfqg->ttime, cfqd->cfq_group_idle);
  2788. #endif
  2789. }
  2790. static void
  2791. cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2792. struct request *rq)
  2793. {
  2794. sector_t sdist = 0;
  2795. sector_t n_sec = blk_rq_sectors(rq);
  2796. if (cfqq->last_request_pos) {
  2797. if (cfqq->last_request_pos < blk_rq_pos(rq))
  2798. sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
  2799. else
  2800. sdist = cfqq->last_request_pos - blk_rq_pos(rq);
  2801. }
  2802. cfqq->seek_history <<= 1;
  2803. if (blk_queue_nonrot(cfqd->queue))
  2804. cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT);
  2805. else
  2806. cfqq->seek_history |= (sdist > CFQQ_SEEK_THR);
  2807. }
  2808. /*
  2809. * Disable idle window if the process thinks too long or seeks so much that
  2810. * it doesn't matter
  2811. */
  2812. static void
  2813. cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2814. struct cfq_io_cq *cic)
  2815. {
  2816. int old_idle, enable_idle;
  2817. /*
  2818. * Don't idle for async or idle io prio class
  2819. */
  2820. if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
  2821. return;
  2822. enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
  2823. if (cfqq->queued[0] + cfqq->queued[1] >= 4)
  2824. cfq_mark_cfqq_deep(cfqq);
  2825. if (cfqq->next_rq && (cfqq->next_rq->cmd_flags & REQ_NOIDLE))
  2826. enable_idle = 0;
  2827. else if (!atomic_read(&cic->icq.ioc->active_ref) ||
  2828. !cfqd->cfq_slice_idle ||
  2829. (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq)))
  2830. enable_idle = 0;
  2831. else if (sample_valid(cic->ttime.ttime_samples)) {
  2832. if (cic->ttime.ttime_mean > cfqd->cfq_slice_idle)
  2833. enable_idle = 0;
  2834. else
  2835. enable_idle = 1;
  2836. }
  2837. if (old_idle != enable_idle) {
  2838. cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
  2839. if (enable_idle)
  2840. cfq_mark_cfqq_idle_window(cfqq);
  2841. else
  2842. cfq_clear_cfqq_idle_window(cfqq);
  2843. }
  2844. }
  2845. /*
  2846. * Check if new_cfqq should preempt the currently active queue. Return 0 for
  2847. * no or if we aren't sure, a 1 will cause a preempt.
  2848. */
  2849. static bool
  2850. cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  2851. struct request *rq)
  2852. {
  2853. struct cfq_queue *cfqq;
  2854. cfqq = cfqd->active_queue;
  2855. if (!cfqq)
  2856. return false;
  2857. if (cfq_class_idle(new_cfqq))
  2858. return false;
  2859. if (cfq_class_idle(cfqq))
  2860. return true;
  2861. /*
  2862. * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
  2863. */
  2864. if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
  2865. return false;
  2866. /*
  2867. * if the new request is sync, but the currently running queue is
  2868. * not, let the sync request have priority.
  2869. */
  2870. if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
  2871. return true;
  2872. if (new_cfqq->cfqg != cfqq->cfqg)
  2873. return false;
  2874. if (cfq_slice_used(cfqq))
  2875. return true;
  2876. /* Allow preemption only if we are idling on sync-noidle tree */
  2877. if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
  2878. cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
  2879. new_cfqq->service_tree->count == 2 &&
  2880. RB_EMPTY_ROOT(&cfqq->sort_list))
  2881. return true;
  2882. /*
  2883. * So both queues are sync. Let the new request get disk time if
  2884. * it's a metadata request and the current queue is doing regular IO.
  2885. */
  2886. if ((rq->cmd_flags & REQ_PRIO) && !cfqq->prio_pending)
  2887. return true;
  2888. /*
  2889. * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
  2890. */
  2891. if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
  2892. return true;
  2893. /* An idle queue should not be idle now for some reason */
  2894. if (RB_EMPTY_ROOT(&cfqq->sort_list) && !cfq_should_idle(cfqd, cfqq))
  2895. return true;
  2896. if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
  2897. return false;
  2898. /*
  2899. * if this request is as-good as one we would expect from the
  2900. * current cfqq, let it preempt
  2901. */
  2902. if (cfq_rq_close(cfqd, cfqq, rq))
  2903. return true;
  2904. return false;
  2905. }
  2906. /*
  2907. * cfqq preempts the active queue. if we allowed preempt with no slice left,
  2908. * let it have half of its nominal slice.
  2909. */
  2910. static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2911. {
  2912. enum wl_type_t old_type = cfqq_type(cfqd->active_queue);
  2913. cfq_log_cfqq(cfqd, cfqq, "preempt");
  2914. cfq_slice_expired(cfqd, 1);
  2915. /*
  2916. * workload type is changed, don't save slice, otherwise preempt
  2917. * doesn't happen
  2918. */
  2919. if (old_type != cfqq_type(cfqq))
  2920. cfqq->cfqg->saved_workload_slice = 0;
  2921. /*
  2922. * Put the new queue at the front of the of the current list,
  2923. * so we know that it will be selected next.
  2924. */
  2925. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  2926. cfq_service_tree_add(cfqd, cfqq, 1);
  2927. cfqq->slice_end = 0;
  2928. cfq_mark_cfqq_slice_new(cfqq);
  2929. }
  2930. /*
  2931. * Called when a new fs request (rq) is added (to cfqq). Check if there's
  2932. * something we should do about it
  2933. */
  2934. static void
  2935. cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2936. struct request *rq)
  2937. {
  2938. struct cfq_io_cq *cic = RQ_CIC(rq);
  2939. cfqd->rq_queued++;
  2940. if (rq->cmd_flags & REQ_PRIO)
  2941. cfqq->prio_pending++;
  2942. cfq_update_io_thinktime(cfqd, cfqq, cic);
  2943. cfq_update_io_seektime(cfqd, cfqq, rq);
  2944. cfq_update_idle_window(cfqd, cfqq, cic);
  2945. cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
  2946. if (cfqq == cfqd->active_queue) {
  2947. /*
  2948. * Remember that we saw a request from this process, but
  2949. * don't start queuing just yet. Otherwise we risk seeing lots
  2950. * of tiny requests, because we disrupt the normal plugging
  2951. * and merging. If the request is already larger than a single
  2952. * page, let it rip immediately. For that case we assume that
  2953. * merging is already done. Ditto for a busy system that
  2954. * has other work pending, don't risk delaying until the
  2955. * idle timer unplug to continue working.
  2956. */
  2957. if (cfq_cfqq_wait_request(cfqq)) {
  2958. if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
  2959. cfqd->busy_queues > 1) {
  2960. cfq_del_timer(cfqd, cfqq);
  2961. cfq_clear_cfqq_wait_request(cfqq);
  2962. __blk_run_queue(cfqd->queue);
  2963. } else {
  2964. cfqg_stats_update_idle_time(cfqq->cfqg);
  2965. cfq_mark_cfqq_must_dispatch(cfqq);
  2966. }
  2967. }
  2968. } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
  2969. /*
  2970. * not the active queue - expire current slice if it is
  2971. * idle and has expired it's mean thinktime or this new queue
  2972. * has some old slice time left and is of higher priority or
  2973. * this new queue is RT and the current one is BE
  2974. */
  2975. cfq_preempt_queue(cfqd, cfqq);
  2976. __blk_run_queue(cfqd->queue);
  2977. }
  2978. }
  2979. static void cfq_insert_request(struct request_queue *q, struct request *rq)
  2980. {
  2981. struct cfq_data *cfqd = q->elevator->elevator_data;
  2982. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2983. cfq_log_cfqq(cfqd, cfqq, "insert_request");
  2984. cfq_init_prio_data(cfqq, RQ_CIC(rq));
  2985. rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
  2986. list_add_tail(&rq->queuelist, &cfqq->fifo);
  2987. cfq_add_rq_rb(rq);
  2988. cfqg_stats_update_io_add(RQ_CFQG(rq), cfqd->serving_group,
  2989. rq->cmd_flags);
  2990. cfq_rq_enqueued(cfqd, cfqq, rq);
  2991. }
  2992. /*
  2993. * Update hw_tag based on peak queue depth over 50 samples under
  2994. * sufficient load.
  2995. */
  2996. static void cfq_update_hw_tag(struct cfq_data *cfqd)
  2997. {
  2998. struct cfq_queue *cfqq = cfqd->active_queue;
  2999. if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth)
  3000. cfqd->hw_tag_est_depth = cfqd->rq_in_driver;
  3001. if (cfqd->hw_tag == 1)
  3002. return;
  3003. if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
  3004. cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN)
  3005. return;
  3006. /*
  3007. * If active queue hasn't enough requests and can idle, cfq might not
  3008. * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  3009. * case
  3010. */
  3011. if (cfqq && cfq_cfqq_idle_window(cfqq) &&
  3012. cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
  3013. CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN)
  3014. return;
  3015. if (cfqd->hw_tag_samples++ < 50)
  3016. return;
  3017. if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
  3018. cfqd->hw_tag = 1;
  3019. else
  3020. cfqd->hw_tag = 0;
  3021. }
  3022. static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  3023. {
  3024. struct cfq_io_cq *cic = cfqd->active_cic;
  3025. /* If the queue already has requests, don't wait */
  3026. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  3027. return false;
  3028. /* If there are other queues in the group, don't wait */
  3029. if (cfqq->cfqg->nr_cfqq > 1)
  3030. return false;
  3031. /* the only queue in the group, but think time is big */
  3032. if (cfq_io_thinktime_big(cfqd, &cfqq->cfqg->ttime, true))
  3033. return false;
  3034. if (cfq_slice_used(cfqq))
  3035. return true;
  3036. /* if slice left is less than think time, wait busy */
  3037. if (cic && sample_valid(cic->ttime.ttime_samples)
  3038. && (cfqq->slice_end - jiffies < cic->ttime.ttime_mean))
  3039. return true;
  3040. /*
  3041. * If think times is less than a jiffy than ttime_mean=0 and above
  3042. * will not be true. It might happen that slice has not expired yet
  3043. * but will expire soon (4-5 ns) during select_queue(). To cover the
  3044. * case where think time is less than a jiffy, mark the queue wait
  3045. * busy if only 1 jiffy is left in the slice.
  3046. */
  3047. if (cfqq->slice_end - jiffies == 1)
  3048. return true;
  3049. return false;
  3050. }
  3051. static void cfq_completed_request(struct request_queue *q, struct request *rq)
  3052. {
  3053. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  3054. struct cfq_data *cfqd = cfqq->cfqd;
  3055. const int sync = rq_is_sync(rq);
  3056. unsigned long now;
  3057. now = jiffies;
  3058. cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d",
  3059. !!(rq->cmd_flags & REQ_NOIDLE));
  3060. cfq_update_hw_tag(cfqd);
  3061. WARN_ON(!cfqd->rq_in_driver);
  3062. WARN_ON(!cfqq->dispatched);
  3063. cfqd->rq_in_driver--;
  3064. cfqq->dispatched--;
  3065. (RQ_CFQG(rq))->dispatched--;
  3066. cfqg_stats_update_completion(cfqq->cfqg, rq_start_time_ns(rq),
  3067. rq_io_start_time_ns(rq), rq->cmd_flags);
  3068. cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--;
  3069. if (sync) {
  3070. struct cfq_rb_root *service_tree;
  3071. RQ_CIC(rq)->ttime.last_end_request = now;
  3072. if (cfq_cfqq_on_rr(cfqq))
  3073. service_tree = cfqq->service_tree;
  3074. else
  3075. service_tree = service_tree_for(cfqq->cfqg,
  3076. cfqq_prio(cfqq), cfqq_type(cfqq));
  3077. service_tree->ttime.last_end_request = now;
  3078. if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
  3079. cfqd->last_delayed_sync = now;
  3080. }
  3081. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3082. cfqq->cfqg->ttime.last_end_request = now;
  3083. #endif
  3084. /*
  3085. * If this is the active queue, check if it needs to be expired,
  3086. * or if we want to idle in case it has no pending requests.
  3087. */
  3088. if (cfqd->active_queue == cfqq) {
  3089. const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
  3090. if (cfq_cfqq_slice_new(cfqq)) {
  3091. cfq_set_prio_slice(cfqd, cfqq);
  3092. cfq_clear_cfqq_slice_new(cfqq);
  3093. }
  3094. /*
  3095. * Should we wait for next request to come in before we expire
  3096. * the queue.
  3097. */
  3098. if (cfq_should_wait_busy(cfqd, cfqq)) {
  3099. unsigned long extend_sl = cfqd->cfq_slice_idle;
  3100. if (!cfqd->cfq_slice_idle)
  3101. extend_sl = cfqd->cfq_group_idle;
  3102. cfqq->slice_end = jiffies + extend_sl;
  3103. cfq_mark_cfqq_wait_busy(cfqq);
  3104. cfq_log_cfqq(cfqd, cfqq, "will busy wait");
  3105. }
  3106. /*
  3107. * Idling is not enabled on:
  3108. * - expired queues
  3109. * - idle-priority queues
  3110. * - async queues
  3111. * - queues with still some requests queued
  3112. * - when there is a close cooperator
  3113. */
  3114. if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
  3115. cfq_slice_expired(cfqd, 1);
  3116. else if (sync && cfqq_empty &&
  3117. !cfq_close_cooperator(cfqd, cfqq)) {
  3118. cfq_arm_slice_timer(cfqd);
  3119. }
  3120. }
  3121. if (!cfqd->rq_in_driver)
  3122. cfq_schedule_dispatch(cfqd);
  3123. }
  3124. static inline int __cfq_may_queue(struct cfq_queue *cfqq)
  3125. {
  3126. if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
  3127. cfq_mark_cfqq_must_alloc_slice(cfqq);
  3128. return ELV_MQUEUE_MUST;
  3129. }
  3130. return ELV_MQUEUE_MAY;
  3131. }
  3132. static int cfq_may_queue(struct request_queue *q, int rw)
  3133. {
  3134. struct cfq_data *cfqd = q->elevator->elevator_data;
  3135. struct task_struct *tsk = current;
  3136. struct cfq_io_cq *cic;
  3137. struct cfq_queue *cfqq;
  3138. /*
  3139. * don't force setup of a queue from here, as a call to may_queue
  3140. * does not necessarily imply that a request actually will be queued.
  3141. * so just lookup a possibly existing queue, or return 'may queue'
  3142. * if that fails
  3143. */
  3144. cic = cfq_cic_lookup(cfqd, tsk->io_context);
  3145. if (!cic)
  3146. return ELV_MQUEUE_MAY;
  3147. cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
  3148. if (cfqq) {
  3149. cfq_init_prio_data(cfqq, cic);
  3150. return __cfq_may_queue(cfqq);
  3151. }
  3152. return ELV_MQUEUE_MAY;
  3153. }
  3154. /*
  3155. * queue lock held here
  3156. */
  3157. static void cfq_put_request(struct request *rq)
  3158. {
  3159. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  3160. if (cfqq) {
  3161. const int rw = rq_data_dir(rq);
  3162. BUG_ON(!cfqq->allocated[rw]);
  3163. cfqq->allocated[rw]--;
  3164. /* Put down rq reference on cfqg */
  3165. cfqg_put(RQ_CFQG(rq));
  3166. rq->elv.priv[0] = NULL;
  3167. rq->elv.priv[1] = NULL;
  3168. cfq_put_queue(cfqq);
  3169. }
  3170. }
  3171. static struct cfq_queue *
  3172. cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_cq *cic,
  3173. struct cfq_queue *cfqq)
  3174. {
  3175. cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
  3176. cic_set_cfqq(cic, cfqq->new_cfqq, 1);
  3177. cfq_mark_cfqq_coop(cfqq->new_cfqq);
  3178. cfq_put_queue(cfqq);
  3179. return cic_to_cfqq(cic, 1);
  3180. }
  3181. /*
  3182. * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
  3183. * was the last process referring to said cfqq.
  3184. */
  3185. static struct cfq_queue *
  3186. split_cfqq(struct cfq_io_cq *cic, struct cfq_queue *cfqq)
  3187. {
  3188. if (cfqq_process_refs(cfqq) == 1) {
  3189. cfqq->pid = current->pid;
  3190. cfq_clear_cfqq_coop(cfqq);
  3191. cfq_clear_cfqq_split_coop(cfqq);
  3192. return cfqq;
  3193. }
  3194. cic_set_cfqq(cic, NULL, 1);
  3195. cfq_put_cooperator(cfqq);
  3196. cfq_put_queue(cfqq);
  3197. return NULL;
  3198. }
  3199. /*
  3200. * Allocate cfq data structures associated with this request.
  3201. */
  3202. static int
  3203. cfq_set_request(struct request_queue *q, struct request *rq, struct bio *bio,
  3204. gfp_t gfp_mask)
  3205. {
  3206. struct cfq_data *cfqd = q->elevator->elevator_data;
  3207. struct cfq_io_cq *cic = icq_to_cic(rq->elv.icq);
  3208. const int rw = rq_data_dir(rq);
  3209. const bool is_sync = rq_is_sync(rq);
  3210. struct cfq_queue *cfqq;
  3211. might_sleep_if(gfp_mask & __GFP_WAIT);
  3212. spin_lock_irq(q->queue_lock);
  3213. check_ioprio_changed(cic, bio);
  3214. check_blkcg_changed(cic, bio);
  3215. new_queue:
  3216. cfqq = cic_to_cfqq(cic, is_sync);
  3217. if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  3218. cfqq = cfq_get_queue(cfqd, is_sync, cic, bio, gfp_mask);
  3219. cic_set_cfqq(cic, cfqq, is_sync);
  3220. } else {
  3221. /*
  3222. * If the queue was seeky for too long, break it apart.
  3223. */
  3224. if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
  3225. cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
  3226. cfqq = split_cfqq(cic, cfqq);
  3227. if (!cfqq)
  3228. goto new_queue;
  3229. }
  3230. /*
  3231. * Check to see if this queue is scheduled to merge with
  3232. * another, closely cooperating queue. The merging of
  3233. * queues happens here as it must be done in process context.
  3234. * The reference on new_cfqq was taken in merge_cfqqs.
  3235. */
  3236. if (cfqq->new_cfqq)
  3237. cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
  3238. }
  3239. cfqq->allocated[rw]++;
  3240. cfqq->ref++;
  3241. cfqg_get(cfqq->cfqg);
  3242. rq->elv.priv[0] = cfqq;
  3243. rq->elv.priv[1] = cfqq->cfqg;
  3244. spin_unlock_irq(q->queue_lock);
  3245. return 0;
  3246. }
  3247. static void cfq_kick_queue(struct work_struct *work)
  3248. {
  3249. struct cfq_data *cfqd =
  3250. container_of(work, struct cfq_data, unplug_work);
  3251. struct request_queue *q = cfqd->queue;
  3252. spin_lock_irq(q->queue_lock);
  3253. __blk_run_queue(cfqd->queue);
  3254. spin_unlock_irq(q->queue_lock);
  3255. }
  3256. /*
  3257. * Timer running if the active_queue is currently idling inside its time slice
  3258. */
  3259. static void cfq_idle_slice_timer(unsigned long data)
  3260. {
  3261. struct cfq_data *cfqd = (struct cfq_data *) data;
  3262. struct cfq_queue *cfqq;
  3263. unsigned long flags;
  3264. int timed_out = 1;
  3265. cfq_log(cfqd, "idle timer fired");
  3266. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  3267. cfqq = cfqd->active_queue;
  3268. if (cfqq) {
  3269. timed_out = 0;
  3270. /*
  3271. * We saw a request before the queue expired, let it through
  3272. */
  3273. if (cfq_cfqq_must_dispatch(cfqq))
  3274. goto out_kick;
  3275. /*
  3276. * expired
  3277. */
  3278. if (cfq_slice_used(cfqq))
  3279. goto expire;
  3280. /*
  3281. * only expire and reinvoke request handler, if there are
  3282. * other queues with pending requests
  3283. */
  3284. if (!cfqd->busy_queues)
  3285. goto out_cont;
  3286. /*
  3287. * not expired and it has a request pending, let it dispatch
  3288. */
  3289. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  3290. goto out_kick;
  3291. /*
  3292. * Queue depth flag is reset only when the idle didn't succeed
  3293. */
  3294. cfq_clear_cfqq_deep(cfqq);
  3295. }
  3296. expire:
  3297. cfq_slice_expired(cfqd, timed_out);
  3298. out_kick:
  3299. cfq_schedule_dispatch(cfqd);
  3300. out_cont:
  3301. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  3302. }
  3303. static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  3304. {
  3305. del_timer_sync(&cfqd->idle_slice_timer);
  3306. cancel_work_sync(&cfqd->unplug_work);
  3307. }
  3308. static void cfq_put_async_queues(struct cfq_data *cfqd)
  3309. {
  3310. int i;
  3311. for (i = 0; i < IOPRIO_BE_NR; i++) {
  3312. if (cfqd->async_cfqq[0][i])
  3313. cfq_put_queue(cfqd->async_cfqq[0][i]);
  3314. if (cfqd->async_cfqq[1][i])
  3315. cfq_put_queue(cfqd->async_cfqq[1][i]);
  3316. }
  3317. if (cfqd->async_idle_cfqq)
  3318. cfq_put_queue(cfqd->async_idle_cfqq);
  3319. }
  3320. static void cfq_exit_queue(struct elevator_queue *e)
  3321. {
  3322. struct cfq_data *cfqd = e->elevator_data;
  3323. struct request_queue *q = cfqd->queue;
  3324. cfq_shutdown_timer_wq(cfqd);
  3325. spin_lock_irq(q->queue_lock);
  3326. if (cfqd->active_queue)
  3327. __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
  3328. cfq_put_async_queues(cfqd);
  3329. spin_unlock_irq(q->queue_lock);
  3330. cfq_shutdown_timer_wq(cfqd);
  3331. #ifndef CONFIG_CFQ_GROUP_IOSCHED
  3332. kfree(cfqd->root_group);
  3333. #endif
  3334. update_root_blkg_pd(q, BLKIO_POLICY_PROP);
  3335. kfree(cfqd);
  3336. }
  3337. static int cfq_init_queue(struct request_queue *q)
  3338. {
  3339. struct cfq_data *cfqd;
  3340. struct blkio_group *blkg __maybe_unused;
  3341. int i;
  3342. cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
  3343. if (!cfqd)
  3344. return -ENOMEM;
  3345. cfqd->queue = q;
  3346. q->elevator->elevator_data = cfqd;
  3347. /* Init root service tree */
  3348. cfqd->grp_service_tree = CFQ_RB_ROOT;
  3349. /* Init root group and prefer root group over other groups by default */
  3350. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3351. rcu_read_lock();
  3352. spin_lock_irq(q->queue_lock);
  3353. blkg = blkg_lookup_create(&blkio_root_cgroup, q, true);
  3354. if (!IS_ERR(blkg))
  3355. cfqd->root_group = blkg_to_cfqg(blkg);
  3356. spin_unlock_irq(q->queue_lock);
  3357. rcu_read_unlock();
  3358. #else
  3359. cfqd->root_group = kzalloc_node(sizeof(*cfqd->root_group),
  3360. GFP_KERNEL, cfqd->queue->node);
  3361. if (cfqd->root_group)
  3362. cfq_init_cfqg_base(cfqd->root_group);
  3363. #endif
  3364. if (!cfqd->root_group) {
  3365. kfree(cfqd);
  3366. return -ENOMEM;
  3367. }
  3368. cfqd->root_group->weight = 2 * CFQ_WEIGHT_DEFAULT;
  3369. /*
  3370. * Not strictly needed (since RB_ROOT just clears the node and we
  3371. * zeroed cfqd on alloc), but better be safe in case someone decides
  3372. * to add magic to the rb code
  3373. */
  3374. for (i = 0; i < CFQ_PRIO_LISTS; i++)
  3375. cfqd->prio_trees[i] = RB_ROOT;
  3376. /*
  3377. * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
  3378. * Grab a permanent reference to it, so that the normal code flow
  3379. * will not attempt to free it. oom_cfqq is linked to root_group
  3380. * but shouldn't hold a reference as it'll never be unlinked. Lose
  3381. * the reference from linking right away.
  3382. */
  3383. cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
  3384. cfqd->oom_cfqq.ref++;
  3385. spin_lock_irq(q->queue_lock);
  3386. cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, cfqd->root_group);
  3387. cfqg_put(cfqd->root_group);
  3388. spin_unlock_irq(q->queue_lock);
  3389. init_timer(&cfqd->idle_slice_timer);
  3390. cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  3391. cfqd->idle_slice_timer.data = (unsigned long) cfqd;
  3392. INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
  3393. cfqd->cfq_quantum = cfq_quantum;
  3394. cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  3395. cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
  3396. cfqd->cfq_back_max = cfq_back_max;
  3397. cfqd->cfq_back_penalty = cfq_back_penalty;
  3398. cfqd->cfq_slice[0] = cfq_slice_async;
  3399. cfqd->cfq_slice[1] = cfq_slice_sync;
  3400. cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  3401. cfqd->cfq_slice_idle = cfq_slice_idle;
  3402. cfqd->cfq_group_idle = cfq_group_idle;
  3403. cfqd->cfq_latency = 1;
  3404. cfqd->hw_tag = -1;
  3405. /*
  3406. * we optimistically start assuming sync ops weren't delayed in last
  3407. * second, in order to have larger depth for async operations.
  3408. */
  3409. cfqd->last_delayed_sync = jiffies - HZ;
  3410. return 0;
  3411. }
  3412. /*
  3413. * sysfs parts below -->
  3414. */
  3415. static ssize_t
  3416. cfq_var_show(unsigned int var, char *page)
  3417. {
  3418. return sprintf(page, "%d\n", var);
  3419. }
  3420. static ssize_t
  3421. cfq_var_store(unsigned int *var, const char *page, size_t count)
  3422. {
  3423. char *p = (char *) page;
  3424. *var = simple_strtoul(p, &p, 10);
  3425. return count;
  3426. }
  3427. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  3428. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  3429. { \
  3430. struct cfq_data *cfqd = e->elevator_data; \
  3431. unsigned int __data = __VAR; \
  3432. if (__CONV) \
  3433. __data = jiffies_to_msecs(__data); \
  3434. return cfq_var_show(__data, (page)); \
  3435. }
  3436. SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
  3437. SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  3438. SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
  3439. SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  3440. SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
  3441. SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
  3442. SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
  3443. SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  3444. SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  3445. SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
  3446. SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
  3447. #undef SHOW_FUNCTION
  3448. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  3449. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  3450. { \
  3451. struct cfq_data *cfqd = e->elevator_data; \
  3452. unsigned int __data; \
  3453. int ret = cfq_var_store(&__data, (page), count); \
  3454. if (__data < (MIN)) \
  3455. __data = (MIN); \
  3456. else if (__data > (MAX)) \
  3457. __data = (MAX); \
  3458. if (__CONV) \
  3459. *(__PTR) = msecs_to_jiffies(__data); \
  3460. else \
  3461. *(__PTR) = __data; \
  3462. return ret; \
  3463. }
  3464. STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
  3465. STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
  3466. UINT_MAX, 1);
  3467. STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
  3468. UINT_MAX, 1);
  3469. STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
  3470. STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
  3471. UINT_MAX, 0);
  3472. STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
  3473. STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
  3474. STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  3475. STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
  3476. STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
  3477. UINT_MAX, 0);
  3478. STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
  3479. #undef STORE_FUNCTION
  3480. #define CFQ_ATTR(name) \
  3481. __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  3482. static struct elv_fs_entry cfq_attrs[] = {
  3483. CFQ_ATTR(quantum),
  3484. CFQ_ATTR(fifo_expire_sync),
  3485. CFQ_ATTR(fifo_expire_async),
  3486. CFQ_ATTR(back_seek_max),
  3487. CFQ_ATTR(back_seek_penalty),
  3488. CFQ_ATTR(slice_sync),
  3489. CFQ_ATTR(slice_async),
  3490. CFQ_ATTR(slice_async_rq),
  3491. CFQ_ATTR(slice_idle),
  3492. CFQ_ATTR(group_idle),
  3493. CFQ_ATTR(low_latency),
  3494. __ATTR_NULL
  3495. };
  3496. static struct elevator_type iosched_cfq = {
  3497. .ops = {
  3498. .elevator_merge_fn = cfq_merge,
  3499. .elevator_merged_fn = cfq_merged_request,
  3500. .elevator_merge_req_fn = cfq_merged_requests,
  3501. .elevator_allow_merge_fn = cfq_allow_merge,
  3502. .elevator_bio_merged_fn = cfq_bio_merged,
  3503. .elevator_dispatch_fn = cfq_dispatch_requests,
  3504. .elevator_add_req_fn = cfq_insert_request,
  3505. .elevator_activate_req_fn = cfq_activate_request,
  3506. .elevator_deactivate_req_fn = cfq_deactivate_request,
  3507. .elevator_completed_req_fn = cfq_completed_request,
  3508. .elevator_former_req_fn = elv_rb_former_request,
  3509. .elevator_latter_req_fn = elv_rb_latter_request,
  3510. .elevator_init_icq_fn = cfq_init_icq,
  3511. .elevator_exit_icq_fn = cfq_exit_icq,
  3512. .elevator_set_req_fn = cfq_set_request,
  3513. .elevator_put_req_fn = cfq_put_request,
  3514. .elevator_may_queue_fn = cfq_may_queue,
  3515. .elevator_init_fn = cfq_init_queue,
  3516. .elevator_exit_fn = cfq_exit_queue,
  3517. },
  3518. .icq_size = sizeof(struct cfq_io_cq),
  3519. .icq_align = __alignof__(struct cfq_io_cq),
  3520. .elevator_attrs = cfq_attrs,
  3521. .elevator_name = "cfq",
  3522. .elevator_owner = THIS_MODULE,
  3523. };
  3524. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3525. static struct blkio_policy_type blkio_policy_cfq = {
  3526. .ops = {
  3527. .blkio_init_group_fn = cfq_init_blkio_group,
  3528. .blkio_reset_group_stats_fn = cfqg_stats_reset,
  3529. },
  3530. .plid = BLKIO_POLICY_PROP,
  3531. .pdata_size = sizeof(struct cfq_group),
  3532. .cftypes = cfq_blkcg_files,
  3533. };
  3534. #endif
  3535. static int __init cfq_init(void)
  3536. {
  3537. int ret;
  3538. /*
  3539. * could be 0 on HZ < 1000 setups
  3540. */
  3541. if (!cfq_slice_async)
  3542. cfq_slice_async = 1;
  3543. if (!cfq_slice_idle)
  3544. cfq_slice_idle = 1;
  3545. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3546. if (!cfq_group_idle)
  3547. cfq_group_idle = 1;
  3548. #else
  3549. cfq_group_idle = 0;
  3550. #endif
  3551. cfq_pool = KMEM_CACHE(cfq_queue, 0);
  3552. if (!cfq_pool)
  3553. return -ENOMEM;
  3554. ret = elv_register(&iosched_cfq);
  3555. if (ret) {
  3556. kmem_cache_destroy(cfq_pool);
  3557. return ret;
  3558. }
  3559. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3560. blkio_policy_register(&blkio_policy_cfq);
  3561. #endif
  3562. return 0;
  3563. }
  3564. static void __exit cfq_exit(void)
  3565. {
  3566. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3567. blkio_policy_unregister(&blkio_policy_cfq);
  3568. #endif
  3569. elv_unregister(&iosched_cfq);
  3570. kmem_cache_destroy(cfq_pool);
  3571. }
  3572. module_init(cfq_init);
  3573. module_exit(cfq_exit);
  3574. MODULE_AUTHOR("Jens Axboe");
  3575. MODULE_LICENSE("GPL");
  3576. MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");