cfq-iosched.c 99 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970
  1. /*
  2. * CFQ, or complete fairness queueing, disk scheduler.
  3. *
  4. * Based on ideas from a previously unfinished io
  5. * scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
  6. *
  7. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  8. */
  9. #include <linux/module.h>
  10. #include <linux/blkdev.h>
  11. #include <linux/elevator.h>
  12. #include <linux/jiffies.h>
  13. #include <linux/rbtree.h>
  14. #include <linux/ioprio.h>
  15. #include <linux/blktrace_api.h>
  16. #include "blk-cgroup.h"
  17. /*
  18. * tunables
  19. */
  20. /* max queue in one round of service */
  21. static const int cfq_quantum = 4;
  22. static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
  23. /* maximum backwards seek, in KiB */
  24. static const int cfq_back_max = 16 * 1024;
  25. /* penalty of a backwards seek */
  26. static const int cfq_back_penalty = 2;
  27. static const int cfq_slice_sync = HZ / 10;
  28. static int cfq_slice_async = HZ / 25;
  29. static const int cfq_slice_async_rq = 2;
  30. static int cfq_slice_idle = HZ / 125;
  31. static const int cfq_target_latency = HZ * 3/10; /* 300 ms */
  32. static const int cfq_hist_divisor = 4;
  33. /*
  34. * offset from end of service tree
  35. */
  36. #define CFQ_IDLE_DELAY (HZ / 5)
  37. /*
  38. * below this threshold, we consider thinktime immediate
  39. */
  40. #define CFQ_MIN_TT (2)
  41. /*
  42. * Allow merged cfqqs to perform this amount of seeky I/O before
  43. * deciding to break the queues up again.
  44. */
  45. #define CFQQ_COOP_TOUT (HZ)
  46. #define CFQ_SLICE_SCALE (5)
  47. #define CFQ_HW_QUEUE_MIN (5)
  48. #define CFQ_SERVICE_SHIFT 12
  49. #define RQ_CIC(rq) \
  50. ((struct cfq_io_context *) (rq)->elevator_private)
  51. #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
  52. static struct kmem_cache *cfq_pool;
  53. static struct kmem_cache *cfq_ioc_pool;
  54. static DEFINE_PER_CPU(unsigned long, cfq_ioc_count);
  55. static struct completion *ioc_gone;
  56. static DEFINE_SPINLOCK(ioc_gone_lock);
  57. #define CFQ_PRIO_LISTS IOPRIO_BE_NR
  58. #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
  59. #define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
  60. #define sample_valid(samples) ((samples) > 80)
  61. #define rb_entry_cfqg(node) rb_entry((node), struct cfq_group, rb_node)
  62. /*
  63. * Most of our rbtree usage is for sorting with min extraction, so
  64. * if we cache the leftmost node we don't have to walk down the tree
  65. * to find it. Idea borrowed from Ingo Molnars CFS scheduler. We should
  66. * move this into the elevator for the rq sorting as well.
  67. */
  68. struct cfq_rb_root {
  69. struct rb_root rb;
  70. struct rb_node *left;
  71. unsigned count;
  72. u64 min_vdisktime;
  73. struct rb_node *active;
  74. unsigned total_weight;
  75. };
  76. #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, 0, }
  77. /*
  78. * Per process-grouping structure
  79. */
  80. struct cfq_queue {
  81. /* reference count */
  82. atomic_t ref;
  83. /* various state flags, see below */
  84. unsigned int flags;
  85. /* parent cfq_data */
  86. struct cfq_data *cfqd;
  87. /* service_tree member */
  88. struct rb_node rb_node;
  89. /* service_tree key */
  90. unsigned long rb_key;
  91. /* prio tree member */
  92. struct rb_node p_node;
  93. /* prio tree root we belong to, if any */
  94. struct rb_root *p_root;
  95. /* sorted list of pending requests */
  96. struct rb_root sort_list;
  97. /* if fifo isn't expired, next request to serve */
  98. struct request *next_rq;
  99. /* requests queued in sort_list */
  100. int queued[2];
  101. /* currently allocated requests */
  102. int allocated[2];
  103. /* fifo list of requests in sort_list */
  104. struct list_head fifo;
  105. /* time when queue got scheduled in to dispatch first request. */
  106. unsigned long dispatch_start;
  107. unsigned int allocated_slice;
  108. /* time when first request from queue completed and slice started. */
  109. unsigned long slice_start;
  110. unsigned long slice_end;
  111. long slice_resid;
  112. unsigned int slice_dispatch;
  113. /* pending metadata requests */
  114. int meta_pending;
  115. /* number of requests that are on the dispatch list or inside driver */
  116. int dispatched;
  117. /* io prio of this group */
  118. unsigned short ioprio, org_ioprio;
  119. unsigned short ioprio_class, org_ioprio_class;
  120. unsigned int seek_samples;
  121. u64 seek_total;
  122. sector_t seek_mean;
  123. sector_t last_request_pos;
  124. unsigned long seeky_start;
  125. pid_t pid;
  126. struct cfq_rb_root *service_tree;
  127. struct cfq_queue *new_cfqq;
  128. struct cfq_group *cfqg;
  129. struct cfq_group *orig_cfqg;
  130. /* Sectors dispatched in current dispatch round */
  131. unsigned long nr_sectors;
  132. };
  133. /*
  134. * First index in the service_trees.
  135. * IDLE is handled separately, so it has negative index
  136. */
  137. enum wl_prio_t {
  138. BE_WORKLOAD = 0,
  139. RT_WORKLOAD = 1,
  140. IDLE_WORKLOAD = 2,
  141. };
  142. /*
  143. * Second index in the service_trees.
  144. */
  145. enum wl_type_t {
  146. ASYNC_WORKLOAD = 0,
  147. SYNC_NOIDLE_WORKLOAD = 1,
  148. SYNC_WORKLOAD = 2
  149. };
  150. /* This is per cgroup per device grouping structure */
  151. struct cfq_group {
  152. /* group service_tree member */
  153. struct rb_node rb_node;
  154. /* group service_tree key */
  155. u64 vdisktime;
  156. unsigned int weight;
  157. bool on_st;
  158. /* number of cfqq currently on this group */
  159. int nr_cfqq;
  160. /* Per group busy queus average. Useful for workload slice calc. */
  161. unsigned int busy_queues_avg[2];
  162. /*
  163. * rr lists of queues with requests, onle rr for each priority class.
  164. * Counts are embedded in the cfq_rb_root
  165. */
  166. struct cfq_rb_root service_trees[2][3];
  167. struct cfq_rb_root service_tree_idle;
  168. unsigned long saved_workload_slice;
  169. enum wl_type_t saved_workload;
  170. enum wl_prio_t saved_serving_prio;
  171. struct blkio_group blkg;
  172. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  173. struct hlist_node cfqd_node;
  174. atomic_t ref;
  175. #endif
  176. };
  177. /*
  178. * Per block device queue structure
  179. */
  180. struct cfq_data {
  181. struct request_queue *queue;
  182. /* Root service tree for cfq_groups */
  183. struct cfq_rb_root grp_service_tree;
  184. struct cfq_group root_group;
  185. /* Number of active cfq groups on group service tree */
  186. int nr_groups;
  187. /*
  188. * The priority currently being served
  189. */
  190. enum wl_prio_t serving_prio;
  191. enum wl_type_t serving_type;
  192. unsigned long workload_expires;
  193. struct cfq_group *serving_group;
  194. bool noidle_tree_requires_idle;
  195. /*
  196. * Each priority tree is sorted by next_request position. These
  197. * trees are used when determining if two or more queues are
  198. * interleaving requests (see cfq_close_cooperator).
  199. */
  200. struct rb_root prio_trees[CFQ_PRIO_LISTS];
  201. unsigned int busy_queues;
  202. int rq_in_driver[2];
  203. int sync_flight;
  204. /*
  205. * queue-depth detection
  206. */
  207. int rq_queued;
  208. int hw_tag;
  209. /*
  210. * hw_tag can be
  211. * -1 => indeterminate, (cfq will behave as if NCQ is present, to allow better detection)
  212. * 1 => NCQ is present (hw_tag_est_depth is the estimated max depth)
  213. * 0 => no NCQ
  214. */
  215. int hw_tag_est_depth;
  216. unsigned int hw_tag_samples;
  217. /*
  218. * idle window management
  219. */
  220. struct timer_list idle_slice_timer;
  221. struct work_struct unplug_work;
  222. struct cfq_queue *active_queue;
  223. struct cfq_io_context *active_cic;
  224. /*
  225. * async queue for each priority case
  226. */
  227. struct cfq_queue *async_cfqq[2][IOPRIO_BE_NR];
  228. struct cfq_queue *async_idle_cfqq;
  229. sector_t last_position;
  230. /*
  231. * tunables, see top of file
  232. */
  233. unsigned int cfq_quantum;
  234. unsigned int cfq_fifo_expire[2];
  235. unsigned int cfq_back_penalty;
  236. unsigned int cfq_back_max;
  237. unsigned int cfq_slice[2];
  238. unsigned int cfq_slice_async_rq;
  239. unsigned int cfq_slice_idle;
  240. unsigned int cfq_latency;
  241. unsigned int cfq_group_isolation;
  242. struct list_head cic_list;
  243. /*
  244. * Fallback dummy cfqq for extreme OOM conditions
  245. */
  246. struct cfq_queue oom_cfqq;
  247. unsigned long last_delayed_sync;
  248. /* List of cfq groups being managed on this device*/
  249. struct hlist_head cfqg_list;
  250. struct rcu_head rcu;
  251. };
  252. static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd);
  253. static struct cfq_rb_root *service_tree_for(struct cfq_group *cfqg,
  254. enum wl_prio_t prio,
  255. enum wl_type_t type,
  256. struct cfq_data *cfqd)
  257. {
  258. if (!cfqg)
  259. return NULL;
  260. if (prio == IDLE_WORKLOAD)
  261. return &cfqg->service_tree_idle;
  262. return &cfqg->service_trees[prio][type];
  263. }
  264. enum cfqq_state_flags {
  265. CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
  266. CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
  267. CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
  268. CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
  269. CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
  270. CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
  271. CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
  272. CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
  273. CFQ_CFQQ_FLAG_sync, /* synchronous queue */
  274. CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
  275. CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
  276. CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
  277. };
  278. #define CFQ_CFQQ_FNS(name) \
  279. static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
  280. { \
  281. (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
  282. } \
  283. static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
  284. { \
  285. (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
  286. } \
  287. static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
  288. { \
  289. return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
  290. }
  291. CFQ_CFQQ_FNS(on_rr);
  292. CFQ_CFQQ_FNS(wait_request);
  293. CFQ_CFQQ_FNS(must_dispatch);
  294. CFQ_CFQQ_FNS(must_alloc_slice);
  295. CFQ_CFQQ_FNS(fifo_expire);
  296. CFQ_CFQQ_FNS(idle_window);
  297. CFQ_CFQQ_FNS(prio_changed);
  298. CFQ_CFQQ_FNS(slice_new);
  299. CFQ_CFQQ_FNS(sync);
  300. CFQ_CFQQ_FNS(coop);
  301. CFQ_CFQQ_FNS(deep);
  302. CFQ_CFQQ_FNS(wait_busy);
  303. #undef CFQ_CFQQ_FNS
  304. #ifdef CONFIG_DEBUG_CFQ_IOSCHED
  305. #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
  306. blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
  307. cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
  308. blkg_path(&(cfqq)->cfqg->blkg), ##args);
  309. #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
  310. blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
  311. blkg_path(&(cfqg)->blkg), ##args); \
  312. #else
  313. #define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
  314. blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
  315. #define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0);
  316. #endif
  317. #define cfq_log(cfqd, fmt, args...) \
  318. blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
  319. /* Traverses through cfq group service trees */
  320. #define for_each_cfqg_st(cfqg, i, j, st) \
  321. for (i = 0; i <= IDLE_WORKLOAD; i++) \
  322. for (j = 0, st = i < IDLE_WORKLOAD ? &cfqg->service_trees[i][j]\
  323. : &cfqg->service_tree_idle; \
  324. (i < IDLE_WORKLOAD && j <= SYNC_WORKLOAD) || \
  325. (i == IDLE_WORKLOAD && j == 0); \
  326. j++, st = i < IDLE_WORKLOAD ? \
  327. &cfqg->service_trees[i][j]: NULL) \
  328. static inline enum wl_prio_t cfqq_prio(struct cfq_queue *cfqq)
  329. {
  330. if (cfq_class_idle(cfqq))
  331. return IDLE_WORKLOAD;
  332. if (cfq_class_rt(cfqq))
  333. return RT_WORKLOAD;
  334. return BE_WORKLOAD;
  335. }
  336. static enum wl_type_t cfqq_type(struct cfq_queue *cfqq)
  337. {
  338. if (!cfq_cfqq_sync(cfqq))
  339. return ASYNC_WORKLOAD;
  340. if (!cfq_cfqq_idle_window(cfqq))
  341. return SYNC_NOIDLE_WORKLOAD;
  342. return SYNC_WORKLOAD;
  343. }
  344. static inline int cfq_group_busy_queues_wl(enum wl_prio_t wl,
  345. struct cfq_data *cfqd,
  346. struct cfq_group *cfqg)
  347. {
  348. if (wl == IDLE_WORKLOAD)
  349. return cfqg->service_tree_idle.count;
  350. return cfqg->service_trees[wl][ASYNC_WORKLOAD].count
  351. + cfqg->service_trees[wl][SYNC_NOIDLE_WORKLOAD].count
  352. + cfqg->service_trees[wl][SYNC_WORKLOAD].count;
  353. }
  354. static inline int cfqg_busy_async_queues(struct cfq_data *cfqd,
  355. struct cfq_group *cfqg)
  356. {
  357. return cfqg->service_trees[RT_WORKLOAD][ASYNC_WORKLOAD].count
  358. + cfqg->service_trees[BE_WORKLOAD][ASYNC_WORKLOAD].count;
  359. }
  360. static void cfq_dispatch_insert(struct request_queue *, struct request *);
  361. static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool,
  362. struct io_context *, gfp_t);
  363. static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *,
  364. struct io_context *);
  365. static inline int rq_in_driver(struct cfq_data *cfqd)
  366. {
  367. return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1];
  368. }
  369. static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic,
  370. bool is_sync)
  371. {
  372. return cic->cfqq[is_sync];
  373. }
  374. static inline void cic_set_cfqq(struct cfq_io_context *cic,
  375. struct cfq_queue *cfqq, bool is_sync)
  376. {
  377. cic->cfqq[is_sync] = cfqq;
  378. }
  379. /*
  380. * We regard a request as SYNC, if it's either a read or has the SYNC bit
  381. * set (in which case it could also be direct WRITE).
  382. */
  383. static inline bool cfq_bio_sync(struct bio *bio)
  384. {
  385. return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO);
  386. }
  387. /*
  388. * scheduler run of queue, if there are requests pending and no one in the
  389. * driver that will restart queueing
  390. */
  391. static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
  392. {
  393. if (cfqd->busy_queues) {
  394. cfq_log(cfqd, "schedule dispatch");
  395. kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work);
  396. }
  397. }
  398. static int cfq_queue_empty(struct request_queue *q)
  399. {
  400. struct cfq_data *cfqd = q->elevator->elevator_data;
  401. return !cfqd->rq_queued;
  402. }
  403. /*
  404. * Scale schedule slice based on io priority. Use the sync time slice only
  405. * if a queue is marked sync and has sync io queued. A sync queue with async
  406. * io only, should not get full sync slice length.
  407. */
  408. static inline int cfq_prio_slice(struct cfq_data *cfqd, bool sync,
  409. unsigned short prio)
  410. {
  411. const int base_slice = cfqd->cfq_slice[sync];
  412. WARN_ON(prio >= IOPRIO_BE_NR);
  413. return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - prio));
  414. }
  415. static inline int
  416. cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  417. {
  418. return cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio);
  419. }
  420. static inline u64 cfq_scale_slice(unsigned long delta, struct cfq_group *cfqg)
  421. {
  422. u64 d = delta << CFQ_SERVICE_SHIFT;
  423. d = d * BLKIO_WEIGHT_DEFAULT;
  424. do_div(d, cfqg->weight);
  425. return d;
  426. }
  427. static inline u64 max_vdisktime(u64 min_vdisktime, u64 vdisktime)
  428. {
  429. s64 delta = (s64)(vdisktime - min_vdisktime);
  430. if (delta > 0)
  431. min_vdisktime = vdisktime;
  432. return min_vdisktime;
  433. }
  434. static inline u64 min_vdisktime(u64 min_vdisktime, u64 vdisktime)
  435. {
  436. s64 delta = (s64)(vdisktime - min_vdisktime);
  437. if (delta < 0)
  438. min_vdisktime = vdisktime;
  439. return min_vdisktime;
  440. }
  441. static void update_min_vdisktime(struct cfq_rb_root *st)
  442. {
  443. u64 vdisktime = st->min_vdisktime;
  444. struct cfq_group *cfqg;
  445. if (st->active) {
  446. cfqg = rb_entry_cfqg(st->active);
  447. vdisktime = cfqg->vdisktime;
  448. }
  449. if (st->left) {
  450. cfqg = rb_entry_cfqg(st->left);
  451. vdisktime = min_vdisktime(vdisktime, cfqg->vdisktime);
  452. }
  453. st->min_vdisktime = max_vdisktime(st->min_vdisktime, vdisktime);
  454. }
  455. /*
  456. * get averaged number of queues of RT/BE priority.
  457. * average is updated, with a formula that gives more weight to higher numbers,
  458. * to quickly follows sudden increases and decrease slowly
  459. */
  460. static inline unsigned cfq_group_get_avg_queues(struct cfq_data *cfqd,
  461. struct cfq_group *cfqg, bool rt)
  462. {
  463. unsigned min_q, max_q;
  464. unsigned mult = cfq_hist_divisor - 1;
  465. unsigned round = cfq_hist_divisor / 2;
  466. unsigned busy = cfq_group_busy_queues_wl(rt, cfqd, cfqg);
  467. min_q = min(cfqg->busy_queues_avg[rt], busy);
  468. max_q = max(cfqg->busy_queues_avg[rt], busy);
  469. cfqg->busy_queues_avg[rt] = (mult * max_q + min_q + round) /
  470. cfq_hist_divisor;
  471. return cfqg->busy_queues_avg[rt];
  472. }
  473. static inline unsigned
  474. cfq_group_slice(struct cfq_data *cfqd, struct cfq_group *cfqg)
  475. {
  476. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  477. return cfq_target_latency * cfqg->weight / st->total_weight;
  478. }
  479. static inline void
  480. cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  481. {
  482. unsigned slice = cfq_prio_to_slice(cfqd, cfqq);
  483. if (cfqd->cfq_latency) {
  484. /*
  485. * interested queues (we consider only the ones with the same
  486. * priority class in the cfq group)
  487. */
  488. unsigned iq = cfq_group_get_avg_queues(cfqd, cfqq->cfqg,
  489. cfq_class_rt(cfqq));
  490. unsigned sync_slice = cfqd->cfq_slice[1];
  491. unsigned expect_latency = sync_slice * iq;
  492. unsigned group_slice = cfq_group_slice(cfqd, cfqq->cfqg);
  493. if (expect_latency > group_slice) {
  494. unsigned base_low_slice = 2 * cfqd->cfq_slice_idle;
  495. /* scale low_slice according to IO priority
  496. * and sync vs async */
  497. unsigned low_slice =
  498. min(slice, base_low_slice * slice / sync_slice);
  499. /* the adapted slice value is scaled to fit all iqs
  500. * into the target latency */
  501. slice = max(slice * group_slice / expect_latency,
  502. low_slice);
  503. }
  504. }
  505. cfqq->slice_start = jiffies;
  506. cfqq->slice_end = jiffies + slice;
  507. cfqq->allocated_slice = slice;
  508. cfq_log_cfqq(cfqd, cfqq, "set_slice=%lu", cfqq->slice_end - jiffies);
  509. }
  510. /*
  511. * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
  512. * isn't valid until the first request from the dispatch is activated
  513. * and the slice time set.
  514. */
  515. static inline bool cfq_slice_used(struct cfq_queue *cfqq)
  516. {
  517. if (cfq_cfqq_slice_new(cfqq))
  518. return 0;
  519. if (time_before(jiffies, cfqq->slice_end))
  520. return 0;
  521. return 1;
  522. }
  523. /*
  524. * Lifted from AS - choose which of rq1 and rq2 that is best served now.
  525. * We choose the request that is closest to the head right now. Distance
  526. * behind the head is penalized and only allowed to a certain extent.
  527. */
  528. static struct request *
  529. cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2, sector_t last)
  530. {
  531. sector_t s1, s2, d1 = 0, d2 = 0;
  532. unsigned long back_max;
  533. #define CFQ_RQ1_WRAP 0x01 /* request 1 wraps */
  534. #define CFQ_RQ2_WRAP 0x02 /* request 2 wraps */
  535. unsigned wrap = 0; /* bit mask: requests behind the disk head? */
  536. if (rq1 == NULL || rq1 == rq2)
  537. return rq2;
  538. if (rq2 == NULL)
  539. return rq1;
  540. if (rq_is_sync(rq1) && !rq_is_sync(rq2))
  541. return rq1;
  542. else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
  543. return rq2;
  544. if (rq_is_meta(rq1) && !rq_is_meta(rq2))
  545. return rq1;
  546. else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
  547. return rq2;
  548. s1 = blk_rq_pos(rq1);
  549. s2 = blk_rq_pos(rq2);
  550. /*
  551. * by definition, 1KiB is 2 sectors
  552. */
  553. back_max = cfqd->cfq_back_max * 2;
  554. /*
  555. * Strict one way elevator _except_ in the case where we allow
  556. * short backward seeks which are biased as twice the cost of a
  557. * similar forward seek.
  558. */
  559. if (s1 >= last)
  560. d1 = s1 - last;
  561. else if (s1 + back_max >= last)
  562. d1 = (last - s1) * cfqd->cfq_back_penalty;
  563. else
  564. wrap |= CFQ_RQ1_WRAP;
  565. if (s2 >= last)
  566. d2 = s2 - last;
  567. else if (s2 + back_max >= last)
  568. d2 = (last - s2) * cfqd->cfq_back_penalty;
  569. else
  570. wrap |= CFQ_RQ2_WRAP;
  571. /* Found required data */
  572. /*
  573. * By doing switch() on the bit mask "wrap" we avoid having to
  574. * check two variables for all permutations: --> faster!
  575. */
  576. switch (wrap) {
  577. case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
  578. if (d1 < d2)
  579. return rq1;
  580. else if (d2 < d1)
  581. return rq2;
  582. else {
  583. if (s1 >= s2)
  584. return rq1;
  585. else
  586. return rq2;
  587. }
  588. case CFQ_RQ2_WRAP:
  589. return rq1;
  590. case CFQ_RQ1_WRAP:
  591. return rq2;
  592. case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
  593. default:
  594. /*
  595. * Since both rqs are wrapped,
  596. * start with the one that's further behind head
  597. * (--> only *one* back seek required),
  598. * since back seek takes more time than forward.
  599. */
  600. if (s1 <= s2)
  601. return rq1;
  602. else
  603. return rq2;
  604. }
  605. }
  606. /*
  607. * The below is leftmost cache rbtree addon
  608. */
  609. static struct cfq_queue *cfq_rb_first(struct cfq_rb_root *root)
  610. {
  611. /* Service tree is empty */
  612. if (!root->count)
  613. return NULL;
  614. if (!root->left)
  615. root->left = rb_first(&root->rb);
  616. if (root->left)
  617. return rb_entry(root->left, struct cfq_queue, rb_node);
  618. return NULL;
  619. }
  620. static struct cfq_group *cfq_rb_first_group(struct cfq_rb_root *root)
  621. {
  622. if (!root->left)
  623. root->left = rb_first(&root->rb);
  624. if (root->left)
  625. return rb_entry_cfqg(root->left);
  626. return NULL;
  627. }
  628. static void rb_erase_init(struct rb_node *n, struct rb_root *root)
  629. {
  630. rb_erase(n, root);
  631. RB_CLEAR_NODE(n);
  632. }
  633. static void cfq_rb_erase(struct rb_node *n, struct cfq_rb_root *root)
  634. {
  635. if (root->left == n)
  636. root->left = NULL;
  637. rb_erase_init(n, &root->rb);
  638. --root->count;
  639. }
  640. /*
  641. * would be nice to take fifo expire time into account as well
  642. */
  643. static struct request *
  644. cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  645. struct request *last)
  646. {
  647. struct rb_node *rbnext = rb_next(&last->rb_node);
  648. struct rb_node *rbprev = rb_prev(&last->rb_node);
  649. struct request *next = NULL, *prev = NULL;
  650. BUG_ON(RB_EMPTY_NODE(&last->rb_node));
  651. if (rbprev)
  652. prev = rb_entry_rq(rbprev);
  653. if (rbnext)
  654. next = rb_entry_rq(rbnext);
  655. else {
  656. rbnext = rb_first(&cfqq->sort_list);
  657. if (rbnext && rbnext != &last->rb_node)
  658. next = rb_entry_rq(rbnext);
  659. }
  660. return cfq_choose_req(cfqd, next, prev, blk_rq_pos(last));
  661. }
  662. static unsigned long cfq_slice_offset(struct cfq_data *cfqd,
  663. struct cfq_queue *cfqq)
  664. {
  665. /*
  666. * just an approximation, should be ok.
  667. */
  668. return (cfqq->cfqg->nr_cfqq - 1) * (cfq_prio_slice(cfqd, 1, 0) -
  669. cfq_prio_slice(cfqd, cfq_cfqq_sync(cfqq), cfqq->ioprio));
  670. }
  671. static inline s64
  672. cfqg_key(struct cfq_rb_root *st, struct cfq_group *cfqg)
  673. {
  674. return cfqg->vdisktime - st->min_vdisktime;
  675. }
  676. static void
  677. __cfq_group_service_tree_add(struct cfq_rb_root *st, struct cfq_group *cfqg)
  678. {
  679. struct rb_node **node = &st->rb.rb_node;
  680. struct rb_node *parent = NULL;
  681. struct cfq_group *__cfqg;
  682. s64 key = cfqg_key(st, cfqg);
  683. int left = 1;
  684. while (*node != NULL) {
  685. parent = *node;
  686. __cfqg = rb_entry_cfqg(parent);
  687. if (key < cfqg_key(st, __cfqg))
  688. node = &parent->rb_left;
  689. else {
  690. node = &parent->rb_right;
  691. left = 0;
  692. }
  693. }
  694. if (left)
  695. st->left = &cfqg->rb_node;
  696. rb_link_node(&cfqg->rb_node, parent, node);
  697. rb_insert_color(&cfqg->rb_node, &st->rb);
  698. }
  699. static void
  700. cfq_group_service_tree_add(struct cfq_data *cfqd, struct cfq_group *cfqg)
  701. {
  702. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  703. struct cfq_group *__cfqg;
  704. struct rb_node *n;
  705. cfqg->nr_cfqq++;
  706. if (cfqg->on_st)
  707. return;
  708. /*
  709. * Currently put the group at the end. Later implement something
  710. * so that groups get lesser vtime based on their weights, so that
  711. * if group does not loose all if it was not continously backlogged.
  712. */
  713. n = rb_last(&st->rb);
  714. if (n) {
  715. __cfqg = rb_entry_cfqg(n);
  716. cfqg->vdisktime = __cfqg->vdisktime + CFQ_IDLE_DELAY;
  717. } else
  718. cfqg->vdisktime = st->min_vdisktime;
  719. __cfq_group_service_tree_add(st, cfqg);
  720. cfqg->on_st = true;
  721. cfqd->nr_groups++;
  722. st->total_weight += cfqg->weight;
  723. }
  724. static void
  725. cfq_group_service_tree_del(struct cfq_data *cfqd, struct cfq_group *cfqg)
  726. {
  727. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  728. if (st->active == &cfqg->rb_node)
  729. st->active = NULL;
  730. BUG_ON(cfqg->nr_cfqq < 1);
  731. cfqg->nr_cfqq--;
  732. /* If there are other cfq queues under this group, don't delete it */
  733. if (cfqg->nr_cfqq)
  734. return;
  735. cfq_log_cfqg(cfqd, cfqg, "del_from_rr group");
  736. cfqg->on_st = false;
  737. cfqd->nr_groups--;
  738. st->total_weight -= cfqg->weight;
  739. if (!RB_EMPTY_NODE(&cfqg->rb_node))
  740. cfq_rb_erase(&cfqg->rb_node, st);
  741. cfqg->saved_workload_slice = 0;
  742. blkiocg_update_blkio_group_dequeue_stats(&cfqg->blkg, 1);
  743. }
  744. static inline unsigned int cfq_cfqq_slice_usage(struct cfq_queue *cfqq)
  745. {
  746. unsigned int slice_used;
  747. /*
  748. * Queue got expired before even a single request completed or
  749. * got expired immediately after first request completion.
  750. */
  751. if (!cfqq->slice_start || cfqq->slice_start == jiffies) {
  752. /*
  753. * Also charge the seek time incurred to the group, otherwise
  754. * if there are mutiple queues in the group, each can dispatch
  755. * a single request on seeky media and cause lots of seek time
  756. * and group will never know it.
  757. */
  758. slice_used = max_t(unsigned, (jiffies - cfqq->dispatch_start),
  759. 1);
  760. } else {
  761. slice_used = jiffies - cfqq->slice_start;
  762. if (slice_used > cfqq->allocated_slice)
  763. slice_used = cfqq->allocated_slice;
  764. }
  765. cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u sect=%lu", slice_used,
  766. cfqq->nr_sectors);
  767. return slice_used;
  768. }
  769. static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg,
  770. struct cfq_queue *cfqq)
  771. {
  772. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  773. unsigned int used_sl, charge_sl;
  774. int nr_sync = cfqg->nr_cfqq - cfqg_busy_async_queues(cfqd, cfqg)
  775. - cfqg->service_tree_idle.count;
  776. BUG_ON(nr_sync < 0);
  777. used_sl = charge_sl = cfq_cfqq_slice_usage(cfqq);
  778. if (!cfq_cfqq_sync(cfqq) && !nr_sync)
  779. charge_sl = cfqq->allocated_slice;
  780. /* Can't update vdisktime while group is on service tree */
  781. cfq_rb_erase(&cfqg->rb_node, st);
  782. cfqg->vdisktime += cfq_scale_slice(charge_sl, cfqg);
  783. __cfq_group_service_tree_add(st, cfqg);
  784. /* This group is being expired. Save the context */
  785. if (time_after(cfqd->workload_expires, jiffies)) {
  786. cfqg->saved_workload_slice = cfqd->workload_expires
  787. - jiffies;
  788. cfqg->saved_workload = cfqd->serving_type;
  789. cfqg->saved_serving_prio = cfqd->serving_prio;
  790. } else
  791. cfqg->saved_workload_slice = 0;
  792. cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime,
  793. st->min_vdisktime);
  794. blkiocg_update_blkio_group_stats(&cfqg->blkg, used_sl,
  795. cfqq->nr_sectors);
  796. }
  797. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  798. static inline struct cfq_group *cfqg_of_blkg(struct blkio_group *blkg)
  799. {
  800. if (blkg)
  801. return container_of(blkg, struct cfq_group, blkg);
  802. return NULL;
  803. }
  804. void
  805. cfq_update_blkio_group_weight(struct blkio_group *blkg, unsigned int weight)
  806. {
  807. cfqg_of_blkg(blkg)->weight = weight;
  808. }
  809. static struct cfq_group *
  810. cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
  811. {
  812. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  813. struct cfq_group *cfqg = NULL;
  814. void *key = cfqd;
  815. int i, j;
  816. struct cfq_rb_root *st;
  817. struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info;
  818. unsigned int major, minor;
  819. /* Do we need to take this reference */
  820. if (!blkiocg_css_tryget(blkcg))
  821. return NULL;;
  822. cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
  823. if (cfqg || !create)
  824. goto done;
  825. cfqg = kzalloc_node(sizeof(*cfqg), GFP_ATOMIC, cfqd->queue->node);
  826. if (!cfqg)
  827. goto done;
  828. cfqg->weight = blkcg->weight;
  829. for_each_cfqg_st(cfqg, i, j, st)
  830. *st = CFQ_RB_ROOT;
  831. RB_CLEAR_NODE(&cfqg->rb_node);
  832. /*
  833. * Take the initial reference that will be released on destroy
  834. * This can be thought of a joint reference by cgroup and
  835. * elevator which will be dropped by either elevator exit
  836. * or cgroup deletion path depending on who is exiting first.
  837. */
  838. atomic_set(&cfqg->ref, 1);
  839. /* Add group onto cgroup list */
  840. sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
  841. blkiocg_add_blkio_group(blkcg, &cfqg->blkg, (void *)cfqd,
  842. MKDEV(major, minor));
  843. /* Add group on cfqd list */
  844. hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list);
  845. done:
  846. blkiocg_css_put(blkcg);
  847. return cfqg;
  848. }
  849. /*
  850. * Search for the cfq group current task belongs to. If create = 1, then also
  851. * create the cfq group if it does not exist. request_queue lock must be held.
  852. */
  853. static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
  854. {
  855. struct cgroup *cgroup;
  856. struct cfq_group *cfqg = NULL;
  857. rcu_read_lock();
  858. cgroup = task_cgroup(current, blkio_subsys_id);
  859. cfqg = cfq_find_alloc_cfqg(cfqd, cgroup, create);
  860. if (!cfqg && create)
  861. cfqg = &cfqd->root_group;
  862. rcu_read_unlock();
  863. return cfqg;
  864. }
  865. static void cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg)
  866. {
  867. /* Currently, all async queues are mapped to root group */
  868. if (!cfq_cfqq_sync(cfqq))
  869. cfqg = &cfqq->cfqd->root_group;
  870. cfqq->cfqg = cfqg;
  871. /* cfqq reference on cfqg */
  872. atomic_inc(&cfqq->cfqg->ref);
  873. }
  874. static void cfq_put_cfqg(struct cfq_group *cfqg)
  875. {
  876. struct cfq_rb_root *st;
  877. int i, j;
  878. BUG_ON(atomic_read(&cfqg->ref) <= 0);
  879. if (!atomic_dec_and_test(&cfqg->ref))
  880. return;
  881. for_each_cfqg_st(cfqg, i, j, st)
  882. BUG_ON(!RB_EMPTY_ROOT(&st->rb) || st->active != NULL);
  883. kfree(cfqg);
  884. }
  885. static void cfq_destroy_cfqg(struct cfq_data *cfqd, struct cfq_group *cfqg)
  886. {
  887. /* Something wrong if we are trying to remove same group twice */
  888. BUG_ON(hlist_unhashed(&cfqg->cfqd_node));
  889. hlist_del_init(&cfqg->cfqd_node);
  890. /*
  891. * Put the reference taken at the time of creation so that when all
  892. * queues are gone, group can be destroyed.
  893. */
  894. cfq_put_cfqg(cfqg);
  895. }
  896. static void cfq_release_cfq_groups(struct cfq_data *cfqd)
  897. {
  898. struct hlist_node *pos, *n;
  899. struct cfq_group *cfqg;
  900. hlist_for_each_entry_safe(cfqg, pos, n, &cfqd->cfqg_list, cfqd_node) {
  901. /*
  902. * If cgroup removal path got to blk_group first and removed
  903. * it from cgroup list, then it will take care of destroying
  904. * cfqg also.
  905. */
  906. if (!blkiocg_del_blkio_group(&cfqg->blkg))
  907. cfq_destroy_cfqg(cfqd, cfqg);
  908. }
  909. }
  910. /*
  911. * Blk cgroup controller notification saying that blkio_group object is being
  912. * delinked as associated cgroup object is going away. That also means that
  913. * no new IO will come in this group. So get rid of this group as soon as
  914. * any pending IO in the group is finished.
  915. *
  916. * This function is called under rcu_read_lock(). key is the rcu protected
  917. * pointer. That means "key" is a valid cfq_data pointer as long as we are rcu
  918. * read lock.
  919. *
  920. * "key" was fetched from blkio_group under blkio_cgroup->lock. That means
  921. * it should not be NULL as even if elevator was exiting, cgroup deltion
  922. * path got to it first.
  923. */
  924. void cfq_unlink_blkio_group(void *key, struct blkio_group *blkg)
  925. {
  926. unsigned long flags;
  927. struct cfq_data *cfqd = key;
  928. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  929. cfq_destroy_cfqg(cfqd, cfqg_of_blkg(blkg));
  930. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  931. }
  932. #else /* GROUP_IOSCHED */
  933. static struct cfq_group *cfq_get_cfqg(struct cfq_data *cfqd, int create)
  934. {
  935. return &cfqd->root_group;
  936. }
  937. static inline void
  938. cfq_link_cfqq_cfqg(struct cfq_queue *cfqq, struct cfq_group *cfqg) {
  939. cfqq->cfqg = cfqg;
  940. }
  941. static void cfq_release_cfq_groups(struct cfq_data *cfqd) {}
  942. static inline void cfq_put_cfqg(struct cfq_group *cfqg) {}
  943. #endif /* GROUP_IOSCHED */
  944. /*
  945. * The cfqd->service_trees holds all pending cfq_queue's that have
  946. * requests waiting to be processed. It is sorted in the order that
  947. * we will service the queues.
  948. */
  949. static void cfq_service_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  950. bool add_front)
  951. {
  952. struct rb_node **p, *parent;
  953. struct cfq_queue *__cfqq;
  954. unsigned long rb_key;
  955. struct cfq_rb_root *service_tree;
  956. int left;
  957. int new_cfqq = 1;
  958. int group_changed = 0;
  959. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  960. if (!cfqd->cfq_group_isolation
  961. && cfqq_type(cfqq) == SYNC_NOIDLE_WORKLOAD
  962. && cfqq->cfqg && cfqq->cfqg != &cfqd->root_group) {
  963. /* Move this cfq to root group */
  964. cfq_log_cfqq(cfqd, cfqq, "moving to root group");
  965. if (!RB_EMPTY_NODE(&cfqq->rb_node))
  966. cfq_group_service_tree_del(cfqd, cfqq->cfqg);
  967. cfqq->orig_cfqg = cfqq->cfqg;
  968. cfqq->cfqg = &cfqd->root_group;
  969. atomic_inc(&cfqd->root_group.ref);
  970. group_changed = 1;
  971. } else if (!cfqd->cfq_group_isolation
  972. && cfqq_type(cfqq) == SYNC_WORKLOAD && cfqq->orig_cfqg) {
  973. /* cfqq is sequential now needs to go to its original group */
  974. BUG_ON(cfqq->cfqg != &cfqd->root_group);
  975. if (!RB_EMPTY_NODE(&cfqq->rb_node))
  976. cfq_group_service_tree_del(cfqd, cfqq->cfqg);
  977. cfq_put_cfqg(cfqq->cfqg);
  978. cfqq->cfqg = cfqq->orig_cfqg;
  979. cfqq->orig_cfqg = NULL;
  980. group_changed = 1;
  981. cfq_log_cfqq(cfqd, cfqq, "moved to origin group");
  982. }
  983. #endif
  984. service_tree = service_tree_for(cfqq->cfqg, cfqq_prio(cfqq),
  985. cfqq_type(cfqq), cfqd);
  986. if (cfq_class_idle(cfqq)) {
  987. rb_key = CFQ_IDLE_DELAY;
  988. parent = rb_last(&service_tree->rb);
  989. if (parent && parent != &cfqq->rb_node) {
  990. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  991. rb_key += __cfqq->rb_key;
  992. } else
  993. rb_key += jiffies;
  994. } else if (!add_front) {
  995. /*
  996. * Get our rb key offset. Subtract any residual slice
  997. * value carried from last service. A negative resid
  998. * count indicates slice overrun, and this should position
  999. * the next service time further away in the tree.
  1000. */
  1001. rb_key = cfq_slice_offset(cfqd, cfqq) + jiffies;
  1002. rb_key -= cfqq->slice_resid;
  1003. cfqq->slice_resid = 0;
  1004. } else {
  1005. rb_key = -HZ;
  1006. __cfqq = cfq_rb_first(service_tree);
  1007. rb_key += __cfqq ? __cfqq->rb_key : jiffies;
  1008. }
  1009. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  1010. new_cfqq = 0;
  1011. /*
  1012. * same position, nothing more to do
  1013. */
  1014. if (rb_key == cfqq->rb_key &&
  1015. cfqq->service_tree == service_tree)
  1016. return;
  1017. cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  1018. cfqq->service_tree = NULL;
  1019. }
  1020. left = 1;
  1021. parent = NULL;
  1022. cfqq->service_tree = service_tree;
  1023. p = &service_tree->rb.rb_node;
  1024. while (*p) {
  1025. struct rb_node **n;
  1026. parent = *p;
  1027. __cfqq = rb_entry(parent, struct cfq_queue, rb_node);
  1028. /*
  1029. * sort by key, that represents service time.
  1030. */
  1031. if (time_before(rb_key, __cfqq->rb_key))
  1032. n = &(*p)->rb_left;
  1033. else {
  1034. n = &(*p)->rb_right;
  1035. left = 0;
  1036. }
  1037. p = n;
  1038. }
  1039. if (left)
  1040. service_tree->left = &cfqq->rb_node;
  1041. cfqq->rb_key = rb_key;
  1042. rb_link_node(&cfqq->rb_node, parent, p);
  1043. rb_insert_color(&cfqq->rb_node, &service_tree->rb);
  1044. service_tree->count++;
  1045. if ((add_front || !new_cfqq) && !group_changed)
  1046. return;
  1047. cfq_group_service_tree_add(cfqd, cfqq->cfqg);
  1048. }
  1049. static struct cfq_queue *
  1050. cfq_prio_tree_lookup(struct cfq_data *cfqd, struct rb_root *root,
  1051. sector_t sector, struct rb_node **ret_parent,
  1052. struct rb_node ***rb_link)
  1053. {
  1054. struct rb_node **p, *parent;
  1055. struct cfq_queue *cfqq = NULL;
  1056. parent = NULL;
  1057. p = &root->rb_node;
  1058. while (*p) {
  1059. struct rb_node **n;
  1060. parent = *p;
  1061. cfqq = rb_entry(parent, struct cfq_queue, p_node);
  1062. /*
  1063. * Sort strictly based on sector. Smallest to the left,
  1064. * largest to the right.
  1065. */
  1066. if (sector > blk_rq_pos(cfqq->next_rq))
  1067. n = &(*p)->rb_right;
  1068. else if (sector < blk_rq_pos(cfqq->next_rq))
  1069. n = &(*p)->rb_left;
  1070. else
  1071. break;
  1072. p = n;
  1073. cfqq = NULL;
  1074. }
  1075. *ret_parent = parent;
  1076. if (rb_link)
  1077. *rb_link = p;
  1078. return cfqq;
  1079. }
  1080. static void cfq_prio_tree_add(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1081. {
  1082. struct rb_node **p, *parent;
  1083. struct cfq_queue *__cfqq;
  1084. if (cfqq->p_root) {
  1085. rb_erase(&cfqq->p_node, cfqq->p_root);
  1086. cfqq->p_root = NULL;
  1087. }
  1088. if (cfq_class_idle(cfqq))
  1089. return;
  1090. if (!cfqq->next_rq)
  1091. return;
  1092. cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio];
  1093. __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root,
  1094. blk_rq_pos(cfqq->next_rq), &parent, &p);
  1095. if (!__cfqq) {
  1096. rb_link_node(&cfqq->p_node, parent, p);
  1097. rb_insert_color(&cfqq->p_node, cfqq->p_root);
  1098. } else
  1099. cfqq->p_root = NULL;
  1100. }
  1101. /*
  1102. * Update cfqq's position in the service tree.
  1103. */
  1104. static void cfq_resort_rr_list(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1105. {
  1106. /*
  1107. * Resorting requires the cfqq to be on the RR list already.
  1108. */
  1109. if (cfq_cfqq_on_rr(cfqq)) {
  1110. cfq_service_tree_add(cfqd, cfqq, 0);
  1111. cfq_prio_tree_add(cfqd, cfqq);
  1112. }
  1113. }
  1114. /*
  1115. * add to busy list of queues for service, trying to be fair in ordering
  1116. * the pending list according to last request service
  1117. */
  1118. static void cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1119. {
  1120. cfq_log_cfqq(cfqd, cfqq, "add_to_rr");
  1121. BUG_ON(cfq_cfqq_on_rr(cfqq));
  1122. cfq_mark_cfqq_on_rr(cfqq);
  1123. cfqd->busy_queues++;
  1124. cfq_resort_rr_list(cfqd, cfqq);
  1125. }
  1126. /*
  1127. * Called when the cfqq no longer has requests pending, remove it from
  1128. * the service tree.
  1129. */
  1130. static void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1131. {
  1132. cfq_log_cfqq(cfqd, cfqq, "del_from_rr");
  1133. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  1134. cfq_clear_cfqq_on_rr(cfqq);
  1135. if (!RB_EMPTY_NODE(&cfqq->rb_node)) {
  1136. cfq_rb_erase(&cfqq->rb_node, cfqq->service_tree);
  1137. cfqq->service_tree = NULL;
  1138. }
  1139. if (cfqq->p_root) {
  1140. rb_erase(&cfqq->p_node, cfqq->p_root);
  1141. cfqq->p_root = NULL;
  1142. }
  1143. cfq_group_service_tree_del(cfqd, cfqq->cfqg);
  1144. BUG_ON(!cfqd->busy_queues);
  1145. cfqd->busy_queues--;
  1146. }
  1147. /*
  1148. * rb tree support functions
  1149. */
  1150. static void cfq_del_rq_rb(struct request *rq)
  1151. {
  1152. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1153. const int sync = rq_is_sync(rq);
  1154. BUG_ON(!cfqq->queued[sync]);
  1155. cfqq->queued[sync]--;
  1156. elv_rb_del(&cfqq->sort_list, rq);
  1157. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list)) {
  1158. /*
  1159. * Queue will be deleted from service tree when we actually
  1160. * expire it later. Right now just remove it from prio tree
  1161. * as it is empty.
  1162. */
  1163. if (cfqq->p_root) {
  1164. rb_erase(&cfqq->p_node, cfqq->p_root);
  1165. cfqq->p_root = NULL;
  1166. }
  1167. }
  1168. }
  1169. static void cfq_add_rq_rb(struct request *rq)
  1170. {
  1171. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1172. struct cfq_data *cfqd = cfqq->cfqd;
  1173. struct request *__alias, *prev;
  1174. cfqq->queued[rq_is_sync(rq)]++;
  1175. /*
  1176. * looks a little odd, but the first insert might return an alias.
  1177. * if that happens, put the alias on the dispatch list
  1178. */
  1179. while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
  1180. cfq_dispatch_insert(cfqd->queue, __alias);
  1181. if (!cfq_cfqq_on_rr(cfqq))
  1182. cfq_add_cfqq_rr(cfqd, cfqq);
  1183. /*
  1184. * check if this request is a better next-serve candidate
  1185. */
  1186. prev = cfqq->next_rq;
  1187. cfqq->next_rq = cfq_choose_req(cfqd, cfqq->next_rq, rq, cfqd->last_position);
  1188. /*
  1189. * adjust priority tree position, if ->next_rq changes
  1190. */
  1191. if (prev != cfqq->next_rq)
  1192. cfq_prio_tree_add(cfqd, cfqq);
  1193. BUG_ON(!cfqq->next_rq);
  1194. }
  1195. static void cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
  1196. {
  1197. elv_rb_del(&cfqq->sort_list, rq);
  1198. cfqq->queued[rq_is_sync(rq)]--;
  1199. cfq_add_rq_rb(rq);
  1200. }
  1201. static struct request *
  1202. cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
  1203. {
  1204. struct task_struct *tsk = current;
  1205. struct cfq_io_context *cic;
  1206. struct cfq_queue *cfqq;
  1207. cic = cfq_cic_lookup(cfqd, tsk->io_context);
  1208. if (!cic)
  1209. return NULL;
  1210. cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
  1211. if (cfqq) {
  1212. sector_t sector = bio->bi_sector + bio_sectors(bio);
  1213. return elv_rb_find(&cfqq->sort_list, sector);
  1214. }
  1215. return NULL;
  1216. }
  1217. static void cfq_activate_request(struct request_queue *q, struct request *rq)
  1218. {
  1219. struct cfq_data *cfqd = q->elevator->elevator_data;
  1220. cfqd->rq_in_driver[rq_is_sync(rq)]++;
  1221. cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d",
  1222. rq_in_driver(cfqd));
  1223. cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
  1224. }
  1225. static void cfq_deactivate_request(struct request_queue *q, struct request *rq)
  1226. {
  1227. struct cfq_data *cfqd = q->elevator->elevator_data;
  1228. const int sync = rq_is_sync(rq);
  1229. WARN_ON(!cfqd->rq_in_driver[sync]);
  1230. cfqd->rq_in_driver[sync]--;
  1231. cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d",
  1232. rq_in_driver(cfqd));
  1233. }
  1234. static void cfq_remove_request(struct request *rq)
  1235. {
  1236. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1237. if (cfqq->next_rq == rq)
  1238. cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
  1239. list_del_init(&rq->queuelist);
  1240. cfq_del_rq_rb(rq);
  1241. cfqq->cfqd->rq_queued--;
  1242. if (rq_is_meta(rq)) {
  1243. WARN_ON(!cfqq->meta_pending);
  1244. cfqq->meta_pending--;
  1245. }
  1246. }
  1247. static int cfq_merge(struct request_queue *q, struct request **req,
  1248. struct bio *bio)
  1249. {
  1250. struct cfq_data *cfqd = q->elevator->elevator_data;
  1251. struct request *__rq;
  1252. __rq = cfq_find_rq_fmerge(cfqd, bio);
  1253. if (__rq && elv_rq_merge_ok(__rq, bio)) {
  1254. *req = __rq;
  1255. return ELEVATOR_FRONT_MERGE;
  1256. }
  1257. return ELEVATOR_NO_MERGE;
  1258. }
  1259. static void cfq_merged_request(struct request_queue *q, struct request *req,
  1260. int type)
  1261. {
  1262. if (type == ELEVATOR_FRONT_MERGE) {
  1263. struct cfq_queue *cfqq = RQ_CFQQ(req);
  1264. cfq_reposition_rq_rb(cfqq, req);
  1265. }
  1266. }
  1267. static void
  1268. cfq_merged_requests(struct request_queue *q, struct request *rq,
  1269. struct request *next)
  1270. {
  1271. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1272. /*
  1273. * reposition in fifo if next is older than rq
  1274. */
  1275. if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
  1276. time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
  1277. list_move(&rq->queuelist, &next->queuelist);
  1278. rq_set_fifo_time(rq, rq_fifo_time(next));
  1279. }
  1280. if (cfqq->next_rq == next)
  1281. cfqq->next_rq = rq;
  1282. cfq_remove_request(next);
  1283. }
  1284. static int cfq_allow_merge(struct request_queue *q, struct request *rq,
  1285. struct bio *bio)
  1286. {
  1287. struct cfq_data *cfqd = q->elevator->elevator_data;
  1288. struct cfq_io_context *cic;
  1289. struct cfq_queue *cfqq;
  1290. /*
  1291. * Disallow merge of a sync bio into an async request.
  1292. */
  1293. if (cfq_bio_sync(bio) && !rq_is_sync(rq))
  1294. return false;
  1295. /*
  1296. * Lookup the cfqq that this bio will be queued with. Allow
  1297. * merge only if rq is queued there.
  1298. */
  1299. cic = cfq_cic_lookup(cfqd, current->io_context);
  1300. if (!cic)
  1301. return false;
  1302. cfqq = cic_to_cfqq(cic, cfq_bio_sync(bio));
  1303. return cfqq == RQ_CFQQ(rq);
  1304. }
  1305. static void __cfq_set_active_queue(struct cfq_data *cfqd,
  1306. struct cfq_queue *cfqq)
  1307. {
  1308. if (cfqq) {
  1309. cfq_log_cfqq(cfqd, cfqq, "set_active");
  1310. cfqq->slice_start = 0;
  1311. cfqq->dispatch_start = jiffies;
  1312. cfqq->allocated_slice = 0;
  1313. cfqq->slice_end = 0;
  1314. cfqq->slice_dispatch = 0;
  1315. cfqq->nr_sectors = 0;
  1316. cfq_clear_cfqq_wait_request(cfqq);
  1317. cfq_clear_cfqq_must_dispatch(cfqq);
  1318. cfq_clear_cfqq_must_alloc_slice(cfqq);
  1319. cfq_clear_cfqq_fifo_expire(cfqq);
  1320. cfq_mark_cfqq_slice_new(cfqq);
  1321. del_timer(&cfqd->idle_slice_timer);
  1322. }
  1323. cfqd->active_queue = cfqq;
  1324. }
  1325. /*
  1326. * current cfqq expired its slice (or was too idle), select new one
  1327. */
  1328. static void
  1329. __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1330. bool timed_out)
  1331. {
  1332. cfq_log_cfqq(cfqd, cfqq, "slice expired t=%d", timed_out);
  1333. if (cfq_cfqq_wait_request(cfqq))
  1334. del_timer(&cfqd->idle_slice_timer);
  1335. cfq_clear_cfqq_wait_request(cfqq);
  1336. cfq_clear_cfqq_wait_busy(cfqq);
  1337. /*
  1338. * store what was left of this slice, if the queue idled/timed out
  1339. */
  1340. if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
  1341. cfqq->slice_resid = cfqq->slice_end - jiffies;
  1342. cfq_log_cfqq(cfqd, cfqq, "resid=%ld", cfqq->slice_resid);
  1343. }
  1344. cfq_group_served(cfqd, cfqq->cfqg, cfqq);
  1345. if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
  1346. cfq_del_cfqq_rr(cfqd, cfqq);
  1347. cfq_resort_rr_list(cfqd, cfqq);
  1348. if (cfqq == cfqd->active_queue)
  1349. cfqd->active_queue = NULL;
  1350. if (&cfqq->cfqg->rb_node == cfqd->grp_service_tree.active)
  1351. cfqd->grp_service_tree.active = NULL;
  1352. if (cfqd->active_cic) {
  1353. put_io_context(cfqd->active_cic->ioc);
  1354. cfqd->active_cic = NULL;
  1355. }
  1356. }
  1357. static inline void cfq_slice_expired(struct cfq_data *cfqd, bool timed_out)
  1358. {
  1359. struct cfq_queue *cfqq = cfqd->active_queue;
  1360. if (cfqq)
  1361. __cfq_slice_expired(cfqd, cfqq, timed_out);
  1362. }
  1363. /*
  1364. * Get next queue for service. Unless we have a queue preemption,
  1365. * we'll simply select the first cfqq in the service tree.
  1366. */
  1367. static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd)
  1368. {
  1369. struct cfq_rb_root *service_tree =
  1370. service_tree_for(cfqd->serving_group, cfqd->serving_prio,
  1371. cfqd->serving_type, cfqd);
  1372. if (!cfqd->rq_queued)
  1373. return NULL;
  1374. /* There is nothing to dispatch */
  1375. if (!service_tree)
  1376. return NULL;
  1377. if (RB_EMPTY_ROOT(&service_tree->rb))
  1378. return NULL;
  1379. return cfq_rb_first(service_tree);
  1380. }
  1381. static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
  1382. {
  1383. struct cfq_group *cfqg;
  1384. struct cfq_queue *cfqq;
  1385. int i, j;
  1386. struct cfq_rb_root *st;
  1387. if (!cfqd->rq_queued)
  1388. return NULL;
  1389. cfqg = cfq_get_next_cfqg(cfqd);
  1390. if (!cfqg)
  1391. return NULL;
  1392. for_each_cfqg_st(cfqg, i, j, st)
  1393. if ((cfqq = cfq_rb_first(st)) != NULL)
  1394. return cfqq;
  1395. return NULL;
  1396. }
  1397. /*
  1398. * Get and set a new active queue for service.
  1399. */
  1400. static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd,
  1401. struct cfq_queue *cfqq)
  1402. {
  1403. if (!cfqq)
  1404. cfqq = cfq_get_next_queue(cfqd);
  1405. __cfq_set_active_queue(cfqd, cfqq);
  1406. return cfqq;
  1407. }
  1408. static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
  1409. struct request *rq)
  1410. {
  1411. if (blk_rq_pos(rq) >= cfqd->last_position)
  1412. return blk_rq_pos(rq) - cfqd->last_position;
  1413. else
  1414. return cfqd->last_position - blk_rq_pos(rq);
  1415. }
  1416. #define CFQQ_SEEK_THR 8 * 1024
  1417. #define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
  1418. static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  1419. struct request *rq)
  1420. {
  1421. sector_t sdist = cfqq->seek_mean;
  1422. if (!sample_valid(cfqq->seek_samples))
  1423. sdist = CFQQ_SEEK_THR;
  1424. return cfq_dist_from_last(cfqd, rq) <= sdist;
  1425. }
  1426. static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
  1427. struct cfq_queue *cur_cfqq)
  1428. {
  1429. struct rb_root *root = &cfqd->prio_trees[cur_cfqq->org_ioprio];
  1430. struct rb_node *parent, *node;
  1431. struct cfq_queue *__cfqq;
  1432. sector_t sector = cfqd->last_position;
  1433. if (RB_EMPTY_ROOT(root))
  1434. return NULL;
  1435. /*
  1436. * First, if we find a request starting at the end of the last
  1437. * request, choose it.
  1438. */
  1439. __cfqq = cfq_prio_tree_lookup(cfqd, root, sector, &parent, NULL);
  1440. if (__cfqq)
  1441. return __cfqq;
  1442. /*
  1443. * If the exact sector wasn't found, the parent of the NULL leaf
  1444. * will contain the closest sector.
  1445. */
  1446. __cfqq = rb_entry(parent, struct cfq_queue, p_node);
  1447. if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
  1448. return __cfqq;
  1449. if (blk_rq_pos(__cfqq->next_rq) < sector)
  1450. node = rb_next(&__cfqq->p_node);
  1451. else
  1452. node = rb_prev(&__cfqq->p_node);
  1453. if (!node)
  1454. return NULL;
  1455. __cfqq = rb_entry(node, struct cfq_queue, p_node);
  1456. if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
  1457. return __cfqq;
  1458. return NULL;
  1459. }
  1460. /*
  1461. * cfqd - obvious
  1462. * cur_cfqq - passed in so that we don't decide that the current queue is
  1463. * closely cooperating with itself.
  1464. *
  1465. * So, basically we're assuming that that cur_cfqq has dispatched at least
  1466. * one request, and that cfqd->last_position reflects a position on the disk
  1467. * associated with the I/O issued by cur_cfqq. I'm not sure this is a valid
  1468. * assumption.
  1469. */
  1470. static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
  1471. struct cfq_queue *cur_cfqq)
  1472. {
  1473. struct cfq_queue *cfqq;
  1474. if (!cfq_cfqq_sync(cur_cfqq))
  1475. return NULL;
  1476. if (CFQQ_SEEKY(cur_cfqq))
  1477. return NULL;
  1478. /*
  1479. * Don't search priority tree if it's the only queue in the group.
  1480. */
  1481. if (cur_cfqq->cfqg->nr_cfqq == 1)
  1482. return NULL;
  1483. /*
  1484. * We should notice if some of the queues are cooperating, eg
  1485. * working closely on the same area of the disk. In that case,
  1486. * we can group them together and don't waste time idling.
  1487. */
  1488. cfqq = cfqq_close(cfqd, cur_cfqq);
  1489. if (!cfqq)
  1490. return NULL;
  1491. /* If new queue belongs to different cfq_group, don't choose it */
  1492. if (cur_cfqq->cfqg != cfqq->cfqg)
  1493. return NULL;
  1494. /*
  1495. * It only makes sense to merge sync queues.
  1496. */
  1497. if (!cfq_cfqq_sync(cfqq))
  1498. return NULL;
  1499. if (CFQQ_SEEKY(cfqq))
  1500. return NULL;
  1501. /*
  1502. * Do not merge queues of different priority classes
  1503. */
  1504. if (cfq_class_rt(cfqq) != cfq_class_rt(cur_cfqq))
  1505. return NULL;
  1506. return cfqq;
  1507. }
  1508. /*
  1509. * Determine whether we should enforce idle window for this queue.
  1510. */
  1511. static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1512. {
  1513. enum wl_prio_t prio = cfqq_prio(cfqq);
  1514. struct cfq_rb_root *service_tree = cfqq->service_tree;
  1515. BUG_ON(!service_tree);
  1516. BUG_ON(!service_tree->count);
  1517. /* We never do for idle class queues. */
  1518. if (prio == IDLE_WORKLOAD)
  1519. return false;
  1520. /* We do for queues that were marked with idle window flag. */
  1521. if (cfq_cfqq_idle_window(cfqq) &&
  1522. !(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
  1523. return true;
  1524. /*
  1525. * Otherwise, we do only if they are the last ones
  1526. * in their service tree.
  1527. */
  1528. return service_tree->count == 1;
  1529. }
  1530. static void cfq_arm_slice_timer(struct cfq_data *cfqd)
  1531. {
  1532. struct cfq_queue *cfqq = cfqd->active_queue;
  1533. struct cfq_io_context *cic;
  1534. unsigned long sl;
  1535. /*
  1536. * SSD device without seek penalty, disable idling. But only do so
  1537. * for devices that support queuing, otherwise we still have a problem
  1538. * with sync vs async workloads.
  1539. */
  1540. if (blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag)
  1541. return;
  1542. WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
  1543. WARN_ON(cfq_cfqq_slice_new(cfqq));
  1544. /*
  1545. * idle is disabled, either manually or by past process history
  1546. */
  1547. if (!cfqd->cfq_slice_idle || !cfq_should_idle(cfqd, cfqq))
  1548. return;
  1549. /*
  1550. * still active requests from this queue, don't idle
  1551. */
  1552. if (cfqq->dispatched)
  1553. return;
  1554. /*
  1555. * task has exited, don't wait
  1556. */
  1557. cic = cfqd->active_cic;
  1558. if (!cic || !atomic_read(&cic->ioc->nr_tasks))
  1559. return;
  1560. /*
  1561. * If our average think time is larger than the remaining time
  1562. * slice, then don't idle. This avoids overrunning the allotted
  1563. * time slice.
  1564. */
  1565. if (sample_valid(cic->ttime_samples) &&
  1566. (cfqq->slice_end - jiffies < cic->ttime_mean))
  1567. return;
  1568. cfq_mark_cfqq_wait_request(cfqq);
  1569. sl = cfqd->cfq_slice_idle;
  1570. mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
  1571. cfq_log_cfqq(cfqd, cfqq, "arm_idle: %lu", sl);
  1572. }
  1573. /*
  1574. * Move request from internal lists to the request queue dispatch list.
  1575. */
  1576. static void cfq_dispatch_insert(struct request_queue *q, struct request *rq)
  1577. {
  1578. struct cfq_data *cfqd = q->elevator->elevator_data;
  1579. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  1580. cfq_log_cfqq(cfqd, cfqq, "dispatch_insert");
  1581. cfqq->next_rq = cfq_find_next_rq(cfqd, cfqq, rq);
  1582. cfq_remove_request(rq);
  1583. cfqq->dispatched++;
  1584. elv_dispatch_sort(q, rq);
  1585. if (cfq_cfqq_sync(cfqq))
  1586. cfqd->sync_flight++;
  1587. cfqq->nr_sectors += blk_rq_sectors(rq);
  1588. }
  1589. /*
  1590. * return expired entry, or NULL to just start from scratch in rbtree
  1591. */
  1592. static struct request *cfq_check_fifo(struct cfq_queue *cfqq)
  1593. {
  1594. struct request *rq = NULL;
  1595. if (cfq_cfqq_fifo_expire(cfqq))
  1596. return NULL;
  1597. cfq_mark_cfqq_fifo_expire(cfqq);
  1598. if (list_empty(&cfqq->fifo))
  1599. return NULL;
  1600. rq = rq_entry_fifo(cfqq->fifo.next);
  1601. if (time_before(jiffies, rq_fifo_time(rq)))
  1602. rq = NULL;
  1603. cfq_log_cfqq(cfqq->cfqd, cfqq, "fifo=%p", rq);
  1604. return rq;
  1605. }
  1606. static inline int
  1607. cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1608. {
  1609. const int base_rq = cfqd->cfq_slice_async_rq;
  1610. WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
  1611. return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
  1612. }
  1613. /*
  1614. * Must be called with the queue_lock held.
  1615. */
  1616. static int cfqq_process_refs(struct cfq_queue *cfqq)
  1617. {
  1618. int process_refs, io_refs;
  1619. io_refs = cfqq->allocated[READ] + cfqq->allocated[WRITE];
  1620. process_refs = atomic_read(&cfqq->ref) - io_refs;
  1621. BUG_ON(process_refs < 0);
  1622. return process_refs;
  1623. }
  1624. static void cfq_setup_merge(struct cfq_queue *cfqq, struct cfq_queue *new_cfqq)
  1625. {
  1626. int process_refs, new_process_refs;
  1627. struct cfq_queue *__cfqq;
  1628. /* Avoid a circular list and skip interim queue merges */
  1629. while ((__cfqq = new_cfqq->new_cfqq)) {
  1630. if (__cfqq == cfqq)
  1631. return;
  1632. new_cfqq = __cfqq;
  1633. }
  1634. process_refs = cfqq_process_refs(cfqq);
  1635. /*
  1636. * If the process for the cfqq has gone away, there is no
  1637. * sense in merging the queues.
  1638. */
  1639. if (process_refs == 0)
  1640. return;
  1641. /*
  1642. * Merge in the direction of the lesser amount of work.
  1643. */
  1644. new_process_refs = cfqq_process_refs(new_cfqq);
  1645. if (new_process_refs >= process_refs) {
  1646. cfqq->new_cfqq = new_cfqq;
  1647. atomic_add(process_refs, &new_cfqq->ref);
  1648. } else {
  1649. new_cfqq->new_cfqq = cfqq;
  1650. atomic_add(new_process_refs, &cfqq->ref);
  1651. }
  1652. }
  1653. static enum wl_type_t cfq_choose_wl(struct cfq_data *cfqd,
  1654. struct cfq_group *cfqg, enum wl_prio_t prio,
  1655. bool prio_changed)
  1656. {
  1657. struct cfq_queue *queue;
  1658. int i;
  1659. bool key_valid = false;
  1660. unsigned long lowest_key = 0;
  1661. enum wl_type_t cur_best = SYNC_NOIDLE_WORKLOAD;
  1662. if (prio_changed) {
  1663. /*
  1664. * When priorities switched, we prefer starting
  1665. * from SYNC_NOIDLE (first choice), or just SYNC
  1666. * over ASYNC
  1667. */
  1668. if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
  1669. return cur_best;
  1670. cur_best = SYNC_WORKLOAD;
  1671. if (service_tree_for(cfqg, prio, cur_best, cfqd)->count)
  1672. return cur_best;
  1673. return ASYNC_WORKLOAD;
  1674. }
  1675. for (i = 0; i < 3; ++i) {
  1676. /* otherwise, select the one with lowest rb_key */
  1677. queue = cfq_rb_first(service_tree_for(cfqg, prio, i, cfqd));
  1678. if (queue &&
  1679. (!key_valid || time_before(queue->rb_key, lowest_key))) {
  1680. lowest_key = queue->rb_key;
  1681. cur_best = i;
  1682. key_valid = true;
  1683. }
  1684. }
  1685. return cur_best;
  1686. }
  1687. static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
  1688. {
  1689. enum wl_prio_t previous_prio = cfqd->serving_prio;
  1690. bool prio_changed;
  1691. unsigned slice;
  1692. unsigned count;
  1693. struct cfq_rb_root *st;
  1694. unsigned group_slice;
  1695. if (!cfqg) {
  1696. cfqd->serving_prio = IDLE_WORKLOAD;
  1697. cfqd->workload_expires = jiffies + 1;
  1698. return;
  1699. }
  1700. /* Choose next priority. RT > BE > IDLE */
  1701. if (cfq_group_busy_queues_wl(RT_WORKLOAD, cfqd, cfqg))
  1702. cfqd->serving_prio = RT_WORKLOAD;
  1703. else if (cfq_group_busy_queues_wl(BE_WORKLOAD, cfqd, cfqg))
  1704. cfqd->serving_prio = BE_WORKLOAD;
  1705. else {
  1706. cfqd->serving_prio = IDLE_WORKLOAD;
  1707. cfqd->workload_expires = jiffies + 1;
  1708. return;
  1709. }
  1710. /*
  1711. * For RT and BE, we have to choose also the type
  1712. * (SYNC, SYNC_NOIDLE, ASYNC), and to compute a workload
  1713. * expiration time
  1714. */
  1715. prio_changed = (cfqd->serving_prio != previous_prio);
  1716. st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
  1717. cfqd);
  1718. count = st->count;
  1719. /*
  1720. * If priority didn't change, check workload expiration,
  1721. * and that we still have other queues ready
  1722. */
  1723. if (!prio_changed && count &&
  1724. !time_after(jiffies, cfqd->workload_expires))
  1725. return;
  1726. /* otherwise select new workload type */
  1727. cfqd->serving_type =
  1728. cfq_choose_wl(cfqd, cfqg, cfqd->serving_prio, prio_changed);
  1729. st = service_tree_for(cfqg, cfqd->serving_prio, cfqd->serving_type,
  1730. cfqd);
  1731. count = st->count;
  1732. /*
  1733. * the workload slice is computed as a fraction of target latency
  1734. * proportional to the number of queues in that workload, over
  1735. * all the queues in the same priority class
  1736. */
  1737. group_slice = cfq_group_slice(cfqd, cfqg);
  1738. slice = group_slice * count /
  1739. max_t(unsigned, cfqg->busy_queues_avg[cfqd->serving_prio],
  1740. cfq_group_busy_queues_wl(cfqd->serving_prio, cfqd, cfqg));
  1741. if (cfqd->serving_type == ASYNC_WORKLOAD) {
  1742. unsigned int tmp;
  1743. /*
  1744. * Async queues are currently system wide. Just taking
  1745. * proportion of queues with-in same group will lead to higher
  1746. * async ratio system wide as generally root group is going
  1747. * to have higher weight. A more accurate thing would be to
  1748. * calculate system wide asnc/sync ratio.
  1749. */
  1750. tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
  1751. tmp = tmp/cfqd->busy_queues;
  1752. slice = min_t(unsigned, slice, tmp);
  1753. /* async workload slice is scaled down according to
  1754. * the sync/async slice ratio. */
  1755. slice = slice * cfqd->cfq_slice[0] / cfqd->cfq_slice[1];
  1756. } else
  1757. /* sync workload slice is at least 2 * cfq_slice_idle */
  1758. slice = max(slice, 2 * cfqd->cfq_slice_idle);
  1759. slice = max_t(unsigned, slice, CFQ_MIN_TT);
  1760. cfqd->workload_expires = jiffies + slice;
  1761. cfqd->noidle_tree_requires_idle = false;
  1762. }
  1763. static struct cfq_group *cfq_get_next_cfqg(struct cfq_data *cfqd)
  1764. {
  1765. struct cfq_rb_root *st = &cfqd->grp_service_tree;
  1766. struct cfq_group *cfqg;
  1767. if (RB_EMPTY_ROOT(&st->rb))
  1768. return NULL;
  1769. cfqg = cfq_rb_first_group(st);
  1770. st->active = &cfqg->rb_node;
  1771. update_min_vdisktime(st);
  1772. return cfqg;
  1773. }
  1774. static void cfq_choose_cfqg(struct cfq_data *cfqd)
  1775. {
  1776. struct cfq_group *cfqg = cfq_get_next_cfqg(cfqd);
  1777. cfqd->serving_group = cfqg;
  1778. /* Restore the workload type data */
  1779. if (cfqg->saved_workload_slice) {
  1780. cfqd->workload_expires = jiffies + cfqg->saved_workload_slice;
  1781. cfqd->serving_type = cfqg->saved_workload;
  1782. cfqd->serving_prio = cfqg->saved_serving_prio;
  1783. } else
  1784. cfqd->workload_expires = jiffies - 1;
  1785. choose_service_tree(cfqd, cfqg);
  1786. }
  1787. /*
  1788. * Select a queue for service. If we have a current active queue,
  1789. * check whether to continue servicing it, or retrieve and set a new one.
  1790. */
  1791. static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
  1792. {
  1793. struct cfq_queue *cfqq, *new_cfqq = NULL;
  1794. cfqq = cfqd->active_queue;
  1795. if (!cfqq)
  1796. goto new_queue;
  1797. if (!cfqd->rq_queued)
  1798. return NULL;
  1799. /*
  1800. * We were waiting for group to get backlogged. Expire the queue
  1801. */
  1802. if (cfq_cfqq_wait_busy(cfqq) && !RB_EMPTY_ROOT(&cfqq->sort_list))
  1803. goto expire;
  1804. /*
  1805. * The active queue has run out of time, expire it and select new.
  1806. */
  1807. if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq)) {
  1808. /*
  1809. * If slice had not expired at the completion of last request
  1810. * we might not have turned on wait_busy flag. Don't expire
  1811. * the queue yet. Allow the group to get backlogged.
  1812. *
  1813. * The very fact that we have used the slice, that means we
  1814. * have been idling all along on this queue and it should be
  1815. * ok to wait for this request to complete.
  1816. */
  1817. if (cfqq->cfqg->nr_cfqq == 1 && RB_EMPTY_ROOT(&cfqq->sort_list)
  1818. && cfqq->dispatched && cfq_should_idle(cfqd, cfqq)) {
  1819. cfqq = NULL;
  1820. goto keep_queue;
  1821. } else
  1822. goto expire;
  1823. }
  1824. /*
  1825. * The active queue has requests and isn't expired, allow it to
  1826. * dispatch.
  1827. */
  1828. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  1829. goto keep_queue;
  1830. /*
  1831. * If another queue has a request waiting within our mean seek
  1832. * distance, let it run. The expire code will check for close
  1833. * cooperators and put the close queue at the front of the service
  1834. * tree. If possible, merge the expiring queue with the new cfqq.
  1835. */
  1836. new_cfqq = cfq_close_cooperator(cfqd, cfqq);
  1837. if (new_cfqq) {
  1838. if (!cfqq->new_cfqq)
  1839. cfq_setup_merge(cfqq, new_cfqq);
  1840. goto expire;
  1841. }
  1842. /*
  1843. * No requests pending. If the active queue still has requests in
  1844. * flight or is idling for a new request, allow either of these
  1845. * conditions to happen (or time out) before selecting a new queue.
  1846. */
  1847. if (timer_pending(&cfqd->idle_slice_timer) ||
  1848. (cfqq->dispatched && cfq_should_idle(cfqd, cfqq))) {
  1849. cfqq = NULL;
  1850. goto keep_queue;
  1851. }
  1852. expire:
  1853. cfq_slice_expired(cfqd, 0);
  1854. new_queue:
  1855. /*
  1856. * Current queue expired. Check if we have to switch to a new
  1857. * service tree
  1858. */
  1859. if (!new_cfqq)
  1860. cfq_choose_cfqg(cfqd);
  1861. cfqq = cfq_set_active_queue(cfqd, new_cfqq);
  1862. keep_queue:
  1863. return cfqq;
  1864. }
  1865. static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
  1866. {
  1867. int dispatched = 0;
  1868. while (cfqq->next_rq) {
  1869. cfq_dispatch_insert(cfqq->cfqd->queue, cfqq->next_rq);
  1870. dispatched++;
  1871. }
  1872. BUG_ON(!list_empty(&cfqq->fifo));
  1873. /* By default cfqq is not expired if it is empty. Do it explicitly */
  1874. __cfq_slice_expired(cfqq->cfqd, cfqq, 0);
  1875. return dispatched;
  1876. }
  1877. /*
  1878. * Drain our current requests. Used for barriers and when switching
  1879. * io schedulers on-the-fly.
  1880. */
  1881. static int cfq_forced_dispatch(struct cfq_data *cfqd)
  1882. {
  1883. struct cfq_queue *cfqq;
  1884. int dispatched = 0;
  1885. while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL)
  1886. dispatched += __cfq_forced_dispatch_cfqq(cfqq);
  1887. cfq_slice_expired(cfqd, 0);
  1888. BUG_ON(cfqd->busy_queues);
  1889. cfq_log(cfqd, "forced_dispatch=%d", dispatched);
  1890. return dispatched;
  1891. }
  1892. static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1893. {
  1894. unsigned int max_dispatch;
  1895. /*
  1896. * Drain async requests before we start sync IO
  1897. */
  1898. if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC])
  1899. return false;
  1900. /*
  1901. * If this is an async queue and we have sync IO in flight, let it wait
  1902. */
  1903. if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
  1904. return false;
  1905. max_dispatch = cfqd->cfq_quantum;
  1906. if (cfq_class_idle(cfqq))
  1907. max_dispatch = 1;
  1908. /*
  1909. * Does this cfqq already have too much IO in flight?
  1910. */
  1911. if (cfqq->dispatched >= max_dispatch) {
  1912. /*
  1913. * idle queue must always only have a single IO in flight
  1914. */
  1915. if (cfq_class_idle(cfqq))
  1916. return false;
  1917. /*
  1918. * We have other queues, don't allow more IO from this one
  1919. */
  1920. if (cfqd->busy_queues > 1)
  1921. return false;
  1922. /*
  1923. * Sole queue user, no limit
  1924. */
  1925. max_dispatch = -1;
  1926. }
  1927. /*
  1928. * Async queues must wait a bit before being allowed dispatch.
  1929. * We also ramp up the dispatch depth gradually for async IO,
  1930. * based on the last sync IO we serviced
  1931. */
  1932. if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) {
  1933. unsigned long last_sync = jiffies - cfqd->last_delayed_sync;
  1934. unsigned int depth;
  1935. depth = last_sync / cfqd->cfq_slice[1];
  1936. if (!depth && !cfqq->dispatched)
  1937. depth = 1;
  1938. if (depth < max_dispatch)
  1939. max_dispatch = depth;
  1940. }
  1941. /*
  1942. * If we're below the current max, allow a dispatch
  1943. */
  1944. return cfqq->dispatched < max_dispatch;
  1945. }
  1946. /*
  1947. * Dispatch a request from cfqq, moving them to the request queue
  1948. * dispatch list.
  1949. */
  1950. static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  1951. {
  1952. struct request *rq;
  1953. BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
  1954. if (!cfq_may_dispatch(cfqd, cfqq))
  1955. return false;
  1956. /*
  1957. * follow expired path, else get first next available
  1958. */
  1959. rq = cfq_check_fifo(cfqq);
  1960. if (!rq)
  1961. rq = cfqq->next_rq;
  1962. /*
  1963. * insert request into driver dispatch list
  1964. */
  1965. cfq_dispatch_insert(cfqd->queue, rq);
  1966. if (!cfqd->active_cic) {
  1967. struct cfq_io_context *cic = RQ_CIC(rq);
  1968. atomic_long_inc(&cic->ioc->refcount);
  1969. cfqd->active_cic = cic;
  1970. }
  1971. return true;
  1972. }
  1973. /*
  1974. * Find the cfqq that we need to service and move a request from that to the
  1975. * dispatch list
  1976. */
  1977. static int cfq_dispatch_requests(struct request_queue *q, int force)
  1978. {
  1979. struct cfq_data *cfqd = q->elevator->elevator_data;
  1980. struct cfq_queue *cfqq;
  1981. if (!cfqd->busy_queues)
  1982. return 0;
  1983. if (unlikely(force))
  1984. return cfq_forced_dispatch(cfqd);
  1985. cfqq = cfq_select_queue(cfqd);
  1986. if (!cfqq)
  1987. return 0;
  1988. /*
  1989. * Dispatch a request from this cfqq, if it is allowed
  1990. */
  1991. if (!cfq_dispatch_request(cfqd, cfqq))
  1992. return 0;
  1993. cfqq->slice_dispatch++;
  1994. cfq_clear_cfqq_must_dispatch(cfqq);
  1995. /*
  1996. * expire an async queue immediately if it has used up its slice. idle
  1997. * queue always expire after 1 dispatch round.
  1998. */
  1999. if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
  2000. cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
  2001. cfq_class_idle(cfqq))) {
  2002. cfqq->slice_end = jiffies + 1;
  2003. cfq_slice_expired(cfqd, 0);
  2004. }
  2005. cfq_log_cfqq(cfqd, cfqq, "dispatched a request");
  2006. return 1;
  2007. }
  2008. /*
  2009. * task holds one reference to the queue, dropped when task exits. each rq
  2010. * in-flight on this queue also holds a reference, dropped when rq is freed.
  2011. *
  2012. * Each cfq queue took a reference on the parent group. Drop it now.
  2013. * queue lock must be held here.
  2014. */
  2015. static void cfq_put_queue(struct cfq_queue *cfqq)
  2016. {
  2017. struct cfq_data *cfqd = cfqq->cfqd;
  2018. struct cfq_group *cfqg, *orig_cfqg;
  2019. BUG_ON(atomic_read(&cfqq->ref) <= 0);
  2020. if (!atomic_dec_and_test(&cfqq->ref))
  2021. return;
  2022. cfq_log_cfqq(cfqd, cfqq, "put_queue");
  2023. BUG_ON(rb_first(&cfqq->sort_list));
  2024. BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
  2025. cfqg = cfqq->cfqg;
  2026. orig_cfqg = cfqq->orig_cfqg;
  2027. if (unlikely(cfqd->active_queue == cfqq)) {
  2028. __cfq_slice_expired(cfqd, cfqq, 0);
  2029. cfq_schedule_dispatch(cfqd);
  2030. }
  2031. BUG_ON(cfq_cfqq_on_rr(cfqq));
  2032. kmem_cache_free(cfq_pool, cfqq);
  2033. cfq_put_cfqg(cfqg);
  2034. if (orig_cfqg)
  2035. cfq_put_cfqg(orig_cfqg);
  2036. }
  2037. /*
  2038. * Must always be called with the rcu_read_lock() held
  2039. */
  2040. static void
  2041. __call_for_each_cic(struct io_context *ioc,
  2042. void (*func)(struct io_context *, struct cfq_io_context *))
  2043. {
  2044. struct cfq_io_context *cic;
  2045. struct hlist_node *n;
  2046. hlist_for_each_entry_rcu(cic, n, &ioc->cic_list, cic_list)
  2047. func(ioc, cic);
  2048. }
  2049. /*
  2050. * Call func for each cic attached to this ioc.
  2051. */
  2052. static void
  2053. call_for_each_cic(struct io_context *ioc,
  2054. void (*func)(struct io_context *, struct cfq_io_context *))
  2055. {
  2056. rcu_read_lock();
  2057. __call_for_each_cic(ioc, func);
  2058. rcu_read_unlock();
  2059. }
  2060. static void cfq_cic_free_rcu(struct rcu_head *head)
  2061. {
  2062. struct cfq_io_context *cic;
  2063. cic = container_of(head, struct cfq_io_context, rcu_head);
  2064. kmem_cache_free(cfq_ioc_pool, cic);
  2065. elv_ioc_count_dec(cfq_ioc_count);
  2066. if (ioc_gone) {
  2067. /*
  2068. * CFQ scheduler is exiting, grab exit lock and check
  2069. * the pending io context count. If it hits zero,
  2070. * complete ioc_gone and set it back to NULL
  2071. */
  2072. spin_lock(&ioc_gone_lock);
  2073. if (ioc_gone && !elv_ioc_count_read(cfq_ioc_count)) {
  2074. complete(ioc_gone);
  2075. ioc_gone = NULL;
  2076. }
  2077. spin_unlock(&ioc_gone_lock);
  2078. }
  2079. }
  2080. static void cfq_cic_free(struct cfq_io_context *cic)
  2081. {
  2082. call_rcu(&cic->rcu_head, cfq_cic_free_rcu);
  2083. }
  2084. static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
  2085. {
  2086. unsigned long flags;
  2087. BUG_ON(!cic->dead_key);
  2088. spin_lock_irqsave(&ioc->lock, flags);
  2089. radix_tree_delete(&ioc->radix_root, cic->dead_key);
  2090. hlist_del_rcu(&cic->cic_list);
  2091. spin_unlock_irqrestore(&ioc->lock, flags);
  2092. cfq_cic_free(cic);
  2093. }
  2094. /*
  2095. * Must be called with rcu_read_lock() held or preemption otherwise disabled.
  2096. * Only two callers of this - ->dtor() which is called with the rcu_read_lock(),
  2097. * and ->trim() which is called with the task lock held
  2098. */
  2099. static void cfq_free_io_context(struct io_context *ioc)
  2100. {
  2101. /*
  2102. * ioc->refcount is zero here, or we are called from elv_unregister(),
  2103. * so no more cic's are allowed to be linked into this ioc. So it
  2104. * should be ok to iterate over the known list, we will see all cic's
  2105. * since no new ones are added.
  2106. */
  2107. __call_for_each_cic(ioc, cic_free_func);
  2108. }
  2109. static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2110. {
  2111. struct cfq_queue *__cfqq, *next;
  2112. if (unlikely(cfqq == cfqd->active_queue)) {
  2113. __cfq_slice_expired(cfqd, cfqq, 0);
  2114. cfq_schedule_dispatch(cfqd);
  2115. }
  2116. /*
  2117. * If this queue was scheduled to merge with another queue, be
  2118. * sure to drop the reference taken on that queue (and others in
  2119. * the merge chain). See cfq_setup_merge and cfq_merge_cfqqs.
  2120. */
  2121. __cfqq = cfqq->new_cfqq;
  2122. while (__cfqq) {
  2123. if (__cfqq == cfqq) {
  2124. WARN(1, "cfqq->new_cfqq loop detected\n");
  2125. break;
  2126. }
  2127. next = __cfqq->new_cfqq;
  2128. cfq_put_queue(__cfqq);
  2129. __cfqq = next;
  2130. }
  2131. cfq_put_queue(cfqq);
  2132. }
  2133. static void __cfq_exit_single_io_context(struct cfq_data *cfqd,
  2134. struct cfq_io_context *cic)
  2135. {
  2136. struct io_context *ioc = cic->ioc;
  2137. list_del_init(&cic->queue_list);
  2138. /*
  2139. * Make sure key == NULL is seen for dead queues
  2140. */
  2141. smp_wmb();
  2142. cic->dead_key = (unsigned long) cic->key;
  2143. cic->key = NULL;
  2144. if (ioc->ioc_data == cic)
  2145. rcu_assign_pointer(ioc->ioc_data, NULL);
  2146. if (cic->cfqq[BLK_RW_ASYNC]) {
  2147. cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]);
  2148. cic->cfqq[BLK_RW_ASYNC] = NULL;
  2149. }
  2150. if (cic->cfqq[BLK_RW_SYNC]) {
  2151. cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_SYNC]);
  2152. cic->cfqq[BLK_RW_SYNC] = NULL;
  2153. }
  2154. }
  2155. static void cfq_exit_single_io_context(struct io_context *ioc,
  2156. struct cfq_io_context *cic)
  2157. {
  2158. struct cfq_data *cfqd = cic->key;
  2159. if (cfqd) {
  2160. struct request_queue *q = cfqd->queue;
  2161. unsigned long flags;
  2162. spin_lock_irqsave(q->queue_lock, flags);
  2163. /*
  2164. * Ensure we get a fresh copy of the ->key to prevent
  2165. * race between exiting task and queue
  2166. */
  2167. smp_read_barrier_depends();
  2168. if (cic->key)
  2169. __cfq_exit_single_io_context(cfqd, cic);
  2170. spin_unlock_irqrestore(q->queue_lock, flags);
  2171. }
  2172. }
  2173. /*
  2174. * The process that ioc belongs to has exited, we need to clean up
  2175. * and put the internal structures we have that belongs to that process.
  2176. */
  2177. static void cfq_exit_io_context(struct io_context *ioc)
  2178. {
  2179. call_for_each_cic(ioc, cfq_exit_single_io_context);
  2180. }
  2181. static struct cfq_io_context *
  2182. cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  2183. {
  2184. struct cfq_io_context *cic;
  2185. cic = kmem_cache_alloc_node(cfq_ioc_pool, gfp_mask | __GFP_ZERO,
  2186. cfqd->queue->node);
  2187. if (cic) {
  2188. cic->last_end_request = jiffies;
  2189. INIT_LIST_HEAD(&cic->queue_list);
  2190. INIT_HLIST_NODE(&cic->cic_list);
  2191. cic->dtor = cfq_free_io_context;
  2192. cic->exit = cfq_exit_io_context;
  2193. elv_ioc_count_inc(cfq_ioc_count);
  2194. }
  2195. return cic;
  2196. }
  2197. static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc)
  2198. {
  2199. struct task_struct *tsk = current;
  2200. int ioprio_class;
  2201. if (!cfq_cfqq_prio_changed(cfqq))
  2202. return;
  2203. ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio);
  2204. switch (ioprio_class) {
  2205. default:
  2206. printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
  2207. case IOPRIO_CLASS_NONE:
  2208. /*
  2209. * no prio set, inherit CPU scheduling settings
  2210. */
  2211. cfqq->ioprio = task_nice_ioprio(tsk);
  2212. cfqq->ioprio_class = task_nice_ioclass(tsk);
  2213. break;
  2214. case IOPRIO_CLASS_RT:
  2215. cfqq->ioprio = task_ioprio(ioc);
  2216. cfqq->ioprio_class = IOPRIO_CLASS_RT;
  2217. break;
  2218. case IOPRIO_CLASS_BE:
  2219. cfqq->ioprio = task_ioprio(ioc);
  2220. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  2221. break;
  2222. case IOPRIO_CLASS_IDLE:
  2223. cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
  2224. cfqq->ioprio = 7;
  2225. cfq_clear_cfqq_idle_window(cfqq);
  2226. break;
  2227. }
  2228. /*
  2229. * keep track of original prio settings in case we have to temporarily
  2230. * elevate the priority of this queue
  2231. */
  2232. cfqq->org_ioprio = cfqq->ioprio;
  2233. cfqq->org_ioprio_class = cfqq->ioprio_class;
  2234. cfq_clear_cfqq_prio_changed(cfqq);
  2235. }
  2236. static void changed_ioprio(struct io_context *ioc, struct cfq_io_context *cic)
  2237. {
  2238. struct cfq_data *cfqd = cic->key;
  2239. struct cfq_queue *cfqq;
  2240. unsigned long flags;
  2241. if (unlikely(!cfqd))
  2242. return;
  2243. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  2244. cfqq = cic->cfqq[BLK_RW_ASYNC];
  2245. if (cfqq) {
  2246. struct cfq_queue *new_cfqq;
  2247. new_cfqq = cfq_get_queue(cfqd, BLK_RW_ASYNC, cic->ioc,
  2248. GFP_ATOMIC);
  2249. if (new_cfqq) {
  2250. cic->cfqq[BLK_RW_ASYNC] = new_cfqq;
  2251. cfq_put_queue(cfqq);
  2252. }
  2253. }
  2254. cfqq = cic->cfqq[BLK_RW_SYNC];
  2255. if (cfqq)
  2256. cfq_mark_cfqq_prio_changed(cfqq);
  2257. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  2258. }
  2259. static void cfq_ioc_set_ioprio(struct io_context *ioc)
  2260. {
  2261. call_for_each_cic(ioc, changed_ioprio);
  2262. ioc->ioprio_changed = 0;
  2263. }
  2264. static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2265. pid_t pid, bool is_sync)
  2266. {
  2267. RB_CLEAR_NODE(&cfqq->rb_node);
  2268. RB_CLEAR_NODE(&cfqq->p_node);
  2269. INIT_LIST_HEAD(&cfqq->fifo);
  2270. atomic_set(&cfqq->ref, 0);
  2271. cfqq->cfqd = cfqd;
  2272. cfq_mark_cfqq_prio_changed(cfqq);
  2273. if (is_sync) {
  2274. if (!cfq_class_idle(cfqq))
  2275. cfq_mark_cfqq_idle_window(cfqq);
  2276. cfq_mark_cfqq_sync(cfqq);
  2277. }
  2278. cfqq->pid = pid;
  2279. }
  2280. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2281. static void changed_cgroup(struct io_context *ioc, struct cfq_io_context *cic)
  2282. {
  2283. struct cfq_queue *sync_cfqq = cic_to_cfqq(cic, 1);
  2284. struct cfq_data *cfqd = cic->key;
  2285. unsigned long flags;
  2286. struct request_queue *q;
  2287. if (unlikely(!cfqd))
  2288. return;
  2289. q = cfqd->queue;
  2290. spin_lock_irqsave(q->queue_lock, flags);
  2291. if (sync_cfqq) {
  2292. /*
  2293. * Drop reference to sync queue. A new sync queue will be
  2294. * assigned in new group upon arrival of a fresh request.
  2295. */
  2296. cfq_log_cfqq(cfqd, sync_cfqq, "changed cgroup");
  2297. cic_set_cfqq(cic, NULL, 1);
  2298. cfq_put_queue(sync_cfqq);
  2299. }
  2300. spin_unlock_irqrestore(q->queue_lock, flags);
  2301. }
  2302. static void cfq_ioc_set_cgroup(struct io_context *ioc)
  2303. {
  2304. call_for_each_cic(ioc, changed_cgroup);
  2305. ioc->cgroup_changed = 0;
  2306. }
  2307. #endif /* CONFIG_CFQ_GROUP_IOSCHED */
  2308. static struct cfq_queue *
  2309. cfq_find_alloc_queue(struct cfq_data *cfqd, bool is_sync,
  2310. struct io_context *ioc, gfp_t gfp_mask)
  2311. {
  2312. struct cfq_queue *cfqq, *new_cfqq = NULL;
  2313. struct cfq_io_context *cic;
  2314. struct cfq_group *cfqg;
  2315. retry:
  2316. cfqg = cfq_get_cfqg(cfqd, 1);
  2317. cic = cfq_cic_lookup(cfqd, ioc);
  2318. /* cic always exists here */
  2319. cfqq = cic_to_cfqq(cic, is_sync);
  2320. /*
  2321. * Always try a new alloc if we fell back to the OOM cfqq
  2322. * originally, since it should just be a temporary situation.
  2323. */
  2324. if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  2325. cfqq = NULL;
  2326. if (new_cfqq) {
  2327. cfqq = new_cfqq;
  2328. new_cfqq = NULL;
  2329. } else if (gfp_mask & __GFP_WAIT) {
  2330. spin_unlock_irq(cfqd->queue->queue_lock);
  2331. new_cfqq = kmem_cache_alloc_node(cfq_pool,
  2332. gfp_mask | __GFP_ZERO,
  2333. cfqd->queue->node);
  2334. spin_lock_irq(cfqd->queue->queue_lock);
  2335. if (new_cfqq)
  2336. goto retry;
  2337. } else {
  2338. cfqq = kmem_cache_alloc_node(cfq_pool,
  2339. gfp_mask | __GFP_ZERO,
  2340. cfqd->queue->node);
  2341. }
  2342. if (cfqq) {
  2343. cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
  2344. cfq_init_prio_data(cfqq, ioc);
  2345. cfq_link_cfqq_cfqg(cfqq, cfqg);
  2346. cfq_log_cfqq(cfqd, cfqq, "alloced");
  2347. } else
  2348. cfqq = &cfqd->oom_cfqq;
  2349. }
  2350. if (new_cfqq)
  2351. kmem_cache_free(cfq_pool, new_cfqq);
  2352. return cfqq;
  2353. }
  2354. static struct cfq_queue **
  2355. cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio)
  2356. {
  2357. switch (ioprio_class) {
  2358. case IOPRIO_CLASS_RT:
  2359. return &cfqd->async_cfqq[0][ioprio];
  2360. case IOPRIO_CLASS_BE:
  2361. return &cfqd->async_cfqq[1][ioprio];
  2362. case IOPRIO_CLASS_IDLE:
  2363. return &cfqd->async_idle_cfqq;
  2364. default:
  2365. BUG();
  2366. }
  2367. }
  2368. static struct cfq_queue *
  2369. cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct io_context *ioc,
  2370. gfp_t gfp_mask)
  2371. {
  2372. const int ioprio = task_ioprio(ioc);
  2373. const int ioprio_class = task_ioprio_class(ioc);
  2374. struct cfq_queue **async_cfqq = NULL;
  2375. struct cfq_queue *cfqq = NULL;
  2376. if (!is_sync) {
  2377. async_cfqq = cfq_async_queue_prio(cfqd, ioprio_class, ioprio);
  2378. cfqq = *async_cfqq;
  2379. }
  2380. if (!cfqq)
  2381. cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask);
  2382. /*
  2383. * pin the queue now that it's allocated, scheduler exit will prune it
  2384. */
  2385. if (!is_sync && !(*async_cfqq)) {
  2386. atomic_inc(&cfqq->ref);
  2387. *async_cfqq = cfqq;
  2388. }
  2389. atomic_inc(&cfqq->ref);
  2390. return cfqq;
  2391. }
  2392. /*
  2393. * We drop cfq io contexts lazily, so we may find a dead one.
  2394. */
  2395. static void
  2396. cfq_drop_dead_cic(struct cfq_data *cfqd, struct io_context *ioc,
  2397. struct cfq_io_context *cic)
  2398. {
  2399. unsigned long flags;
  2400. WARN_ON(!list_empty(&cic->queue_list));
  2401. spin_lock_irqsave(&ioc->lock, flags);
  2402. BUG_ON(ioc->ioc_data == cic);
  2403. radix_tree_delete(&ioc->radix_root, (unsigned long) cfqd);
  2404. hlist_del_rcu(&cic->cic_list);
  2405. spin_unlock_irqrestore(&ioc->lock, flags);
  2406. cfq_cic_free(cic);
  2407. }
  2408. static struct cfq_io_context *
  2409. cfq_cic_lookup(struct cfq_data *cfqd, struct io_context *ioc)
  2410. {
  2411. struct cfq_io_context *cic;
  2412. unsigned long flags;
  2413. void *k;
  2414. if (unlikely(!ioc))
  2415. return NULL;
  2416. rcu_read_lock();
  2417. /*
  2418. * we maintain a last-hit cache, to avoid browsing over the tree
  2419. */
  2420. cic = rcu_dereference(ioc->ioc_data);
  2421. if (cic && cic->key == cfqd) {
  2422. rcu_read_unlock();
  2423. return cic;
  2424. }
  2425. do {
  2426. cic = radix_tree_lookup(&ioc->radix_root, (unsigned long) cfqd);
  2427. rcu_read_unlock();
  2428. if (!cic)
  2429. break;
  2430. /* ->key must be copied to avoid race with cfq_exit_queue() */
  2431. k = cic->key;
  2432. if (unlikely(!k)) {
  2433. cfq_drop_dead_cic(cfqd, ioc, cic);
  2434. rcu_read_lock();
  2435. continue;
  2436. }
  2437. spin_lock_irqsave(&ioc->lock, flags);
  2438. rcu_assign_pointer(ioc->ioc_data, cic);
  2439. spin_unlock_irqrestore(&ioc->lock, flags);
  2440. break;
  2441. } while (1);
  2442. return cic;
  2443. }
  2444. /*
  2445. * Add cic into ioc, using cfqd as the search key. This enables us to lookup
  2446. * the process specific cfq io context when entered from the block layer.
  2447. * Also adds the cic to a per-cfqd list, used when this queue is removed.
  2448. */
  2449. static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
  2450. struct cfq_io_context *cic, gfp_t gfp_mask)
  2451. {
  2452. unsigned long flags;
  2453. int ret;
  2454. ret = radix_tree_preload(gfp_mask);
  2455. if (!ret) {
  2456. cic->ioc = ioc;
  2457. cic->key = cfqd;
  2458. spin_lock_irqsave(&ioc->lock, flags);
  2459. ret = radix_tree_insert(&ioc->radix_root,
  2460. (unsigned long) cfqd, cic);
  2461. if (!ret)
  2462. hlist_add_head_rcu(&cic->cic_list, &ioc->cic_list);
  2463. spin_unlock_irqrestore(&ioc->lock, flags);
  2464. radix_tree_preload_end();
  2465. if (!ret) {
  2466. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  2467. list_add(&cic->queue_list, &cfqd->cic_list);
  2468. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  2469. }
  2470. }
  2471. if (ret)
  2472. printk(KERN_ERR "cfq: cic link failed!\n");
  2473. return ret;
  2474. }
  2475. /*
  2476. * Setup general io context and cfq io context. There can be several cfq
  2477. * io contexts per general io context, if this process is doing io to more
  2478. * than one device managed by cfq.
  2479. */
  2480. static struct cfq_io_context *
  2481. cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
  2482. {
  2483. struct io_context *ioc = NULL;
  2484. struct cfq_io_context *cic;
  2485. might_sleep_if(gfp_mask & __GFP_WAIT);
  2486. ioc = get_io_context(gfp_mask, cfqd->queue->node);
  2487. if (!ioc)
  2488. return NULL;
  2489. cic = cfq_cic_lookup(cfqd, ioc);
  2490. if (cic)
  2491. goto out;
  2492. cic = cfq_alloc_io_context(cfqd, gfp_mask);
  2493. if (cic == NULL)
  2494. goto err;
  2495. if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
  2496. goto err_free;
  2497. out:
  2498. smp_read_barrier_depends();
  2499. if (unlikely(ioc->ioprio_changed))
  2500. cfq_ioc_set_ioprio(ioc);
  2501. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  2502. if (unlikely(ioc->cgroup_changed))
  2503. cfq_ioc_set_cgroup(ioc);
  2504. #endif
  2505. return cic;
  2506. err_free:
  2507. cfq_cic_free(cic);
  2508. err:
  2509. put_io_context(ioc);
  2510. return NULL;
  2511. }
  2512. static void
  2513. cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
  2514. {
  2515. unsigned long elapsed = jiffies - cic->last_end_request;
  2516. unsigned long ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
  2517. cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
  2518. cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
  2519. cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
  2520. }
  2521. static void
  2522. cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2523. struct request *rq)
  2524. {
  2525. sector_t sdist;
  2526. u64 total;
  2527. if (!cfqq->last_request_pos)
  2528. sdist = 0;
  2529. else if (cfqq->last_request_pos < blk_rq_pos(rq))
  2530. sdist = blk_rq_pos(rq) - cfqq->last_request_pos;
  2531. else
  2532. sdist = cfqq->last_request_pos - blk_rq_pos(rq);
  2533. /*
  2534. * Don't allow the seek distance to get too large from the
  2535. * odd fragment, pagein, etc
  2536. */
  2537. if (cfqq->seek_samples <= 60) /* second&third seek */
  2538. sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024);
  2539. else
  2540. sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64);
  2541. cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8;
  2542. cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8;
  2543. total = cfqq->seek_total + (cfqq->seek_samples/2);
  2544. do_div(total, cfqq->seek_samples);
  2545. cfqq->seek_mean = (sector_t)total;
  2546. /*
  2547. * If this cfqq is shared between multiple processes, check to
  2548. * make sure that those processes are still issuing I/Os within
  2549. * the mean seek distance. If not, it may be time to break the
  2550. * queues apart again.
  2551. */
  2552. if (cfq_cfqq_coop(cfqq)) {
  2553. if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
  2554. cfqq->seeky_start = jiffies;
  2555. else if (!CFQQ_SEEKY(cfqq))
  2556. cfqq->seeky_start = 0;
  2557. }
  2558. }
  2559. /*
  2560. * Disable idle window if the process thinks too long or seeks so much that
  2561. * it doesn't matter
  2562. */
  2563. static void
  2564. cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2565. struct cfq_io_context *cic)
  2566. {
  2567. int old_idle, enable_idle;
  2568. /*
  2569. * Don't idle for async or idle io prio class
  2570. */
  2571. if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
  2572. return;
  2573. enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
  2574. if (cfqq->queued[0] + cfqq->queued[1] >= 4)
  2575. cfq_mark_cfqq_deep(cfqq);
  2576. if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle ||
  2577. (!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples)
  2578. && CFQQ_SEEKY(cfqq)))
  2579. enable_idle = 0;
  2580. else if (sample_valid(cic->ttime_samples)) {
  2581. if (cic->ttime_mean > cfqd->cfq_slice_idle)
  2582. enable_idle = 0;
  2583. else
  2584. enable_idle = 1;
  2585. }
  2586. if (old_idle != enable_idle) {
  2587. cfq_log_cfqq(cfqd, cfqq, "idle=%d", enable_idle);
  2588. if (enable_idle)
  2589. cfq_mark_cfqq_idle_window(cfqq);
  2590. else
  2591. cfq_clear_cfqq_idle_window(cfqq);
  2592. }
  2593. }
  2594. /*
  2595. * Check if new_cfqq should preempt the currently active queue. Return 0 for
  2596. * no or if we aren't sure, a 1 will cause a preempt.
  2597. */
  2598. static bool
  2599. cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  2600. struct request *rq)
  2601. {
  2602. struct cfq_queue *cfqq;
  2603. cfqq = cfqd->active_queue;
  2604. if (!cfqq)
  2605. return false;
  2606. if (cfq_class_idle(new_cfqq))
  2607. return false;
  2608. if (cfq_class_idle(cfqq))
  2609. return true;
  2610. /*
  2611. * if the new request is sync, but the currently running queue is
  2612. * not, let the sync request have priority.
  2613. */
  2614. if (rq_is_sync(rq) && !cfq_cfqq_sync(cfqq))
  2615. return true;
  2616. if (new_cfqq->cfqg != cfqq->cfqg)
  2617. return false;
  2618. if (cfq_slice_used(cfqq))
  2619. return true;
  2620. /* Allow preemption only if we are idling on sync-noidle tree */
  2621. if (cfqd->serving_type == SYNC_NOIDLE_WORKLOAD &&
  2622. cfqq_type(new_cfqq) == SYNC_NOIDLE_WORKLOAD &&
  2623. new_cfqq->service_tree->count == 2 &&
  2624. RB_EMPTY_ROOT(&cfqq->sort_list))
  2625. return true;
  2626. /*
  2627. * So both queues are sync. Let the new request get disk time if
  2628. * it's a metadata request and the current queue is doing regular IO.
  2629. */
  2630. if (rq_is_meta(rq) && !cfqq->meta_pending)
  2631. return true;
  2632. /*
  2633. * Allow an RT request to pre-empt an ongoing non-RT cfqq timeslice.
  2634. */
  2635. if (cfq_class_rt(new_cfqq) && !cfq_class_rt(cfqq))
  2636. return true;
  2637. if (!cfqd->active_cic || !cfq_cfqq_wait_request(cfqq))
  2638. return false;
  2639. /*
  2640. * if this request is as-good as one we would expect from the
  2641. * current cfqq, let it preempt
  2642. */
  2643. if (cfq_rq_close(cfqd, cfqq, rq))
  2644. return true;
  2645. return false;
  2646. }
  2647. /*
  2648. * cfqq preempts the active queue. if we allowed preempt with no slice left,
  2649. * let it have half of its nominal slice.
  2650. */
  2651. static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2652. {
  2653. cfq_log_cfqq(cfqd, cfqq, "preempt");
  2654. cfq_slice_expired(cfqd, 1);
  2655. /*
  2656. * Put the new queue at the front of the of the current list,
  2657. * so we know that it will be selected next.
  2658. */
  2659. BUG_ON(!cfq_cfqq_on_rr(cfqq));
  2660. cfq_service_tree_add(cfqd, cfqq, 1);
  2661. cfqq->slice_end = 0;
  2662. cfq_mark_cfqq_slice_new(cfqq);
  2663. }
  2664. /*
  2665. * Called when a new fs request (rq) is added (to cfqq). Check if there's
  2666. * something we should do about it
  2667. */
  2668. static void
  2669. cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
  2670. struct request *rq)
  2671. {
  2672. struct cfq_io_context *cic = RQ_CIC(rq);
  2673. cfqd->rq_queued++;
  2674. if (rq_is_meta(rq))
  2675. cfqq->meta_pending++;
  2676. cfq_update_io_thinktime(cfqd, cic);
  2677. cfq_update_io_seektime(cfqd, cfqq, rq);
  2678. cfq_update_idle_window(cfqd, cfqq, cic);
  2679. cfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
  2680. if (cfqq == cfqd->active_queue) {
  2681. /*
  2682. * Remember that we saw a request from this process, but
  2683. * don't start queuing just yet. Otherwise we risk seeing lots
  2684. * of tiny requests, because we disrupt the normal plugging
  2685. * and merging. If the request is already larger than a single
  2686. * page, let it rip immediately. For that case we assume that
  2687. * merging is already done. Ditto for a busy system that
  2688. * has other work pending, don't risk delaying until the
  2689. * idle timer unplug to continue working.
  2690. */
  2691. if (cfq_cfqq_wait_request(cfqq)) {
  2692. if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE ||
  2693. cfqd->busy_queues > 1) {
  2694. del_timer(&cfqd->idle_slice_timer);
  2695. cfq_clear_cfqq_wait_request(cfqq);
  2696. __blk_run_queue(cfqd->queue);
  2697. } else
  2698. cfq_mark_cfqq_must_dispatch(cfqq);
  2699. }
  2700. } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
  2701. /*
  2702. * not the active queue - expire current slice if it is
  2703. * idle and has expired it's mean thinktime or this new queue
  2704. * has some old slice time left and is of higher priority or
  2705. * this new queue is RT and the current one is BE
  2706. */
  2707. cfq_preempt_queue(cfqd, cfqq);
  2708. __blk_run_queue(cfqd->queue);
  2709. }
  2710. }
  2711. static void cfq_insert_request(struct request_queue *q, struct request *rq)
  2712. {
  2713. struct cfq_data *cfqd = q->elevator->elevator_data;
  2714. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2715. cfq_log_cfqq(cfqd, cfqq, "insert_request");
  2716. cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc);
  2717. rq_set_fifo_time(rq, jiffies + cfqd->cfq_fifo_expire[rq_is_sync(rq)]);
  2718. list_add_tail(&rq->queuelist, &cfqq->fifo);
  2719. cfq_add_rq_rb(rq);
  2720. cfq_rq_enqueued(cfqd, cfqq, rq);
  2721. }
  2722. /*
  2723. * Update hw_tag based on peak queue depth over 50 samples under
  2724. * sufficient load.
  2725. */
  2726. static void cfq_update_hw_tag(struct cfq_data *cfqd)
  2727. {
  2728. struct cfq_queue *cfqq = cfqd->active_queue;
  2729. if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth)
  2730. cfqd->hw_tag_est_depth = rq_in_driver(cfqd);
  2731. if (cfqd->hw_tag == 1)
  2732. return;
  2733. if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN &&
  2734. rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN)
  2735. return;
  2736. /*
  2737. * If active queue hasn't enough requests and can idle, cfq might not
  2738. * dispatch sufficient requests to hardware. Don't zero hw_tag in this
  2739. * case
  2740. */
  2741. if (cfqq && cfq_cfqq_idle_window(cfqq) &&
  2742. cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] <
  2743. CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN)
  2744. return;
  2745. if (cfqd->hw_tag_samples++ < 50)
  2746. return;
  2747. if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
  2748. cfqd->hw_tag = 1;
  2749. else
  2750. cfqd->hw_tag = 0;
  2751. }
  2752. static bool cfq_should_wait_busy(struct cfq_data *cfqd, struct cfq_queue *cfqq)
  2753. {
  2754. struct cfq_io_context *cic = cfqd->active_cic;
  2755. /* If there are other queues in the group, don't wait */
  2756. if (cfqq->cfqg->nr_cfqq > 1)
  2757. return false;
  2758. if (cfq_slice_used(cfqq))
  2759. return true;
  2760. /* if slice left is less than think time, wait busy */
  2761. if (cic && sample_valid(cic->ttime_samples)
  2762. && (cfqq->slice_end - jiffies < cic->ttime_mean))
  2763. return true;
  2764. /*
  2765. * If think times is less than a jiffy than ttime_mean=0 and above
  2766. * will not be true. It might happen that slice has not expired yet
  2767. * but will expire soon (4-5 ns) during select_queue(). To cover the
  2768. * case where think time is less than a jiffy, mark the queue wait
  2769. * busy if only 1 jiffy is left in the slice.
  2770. */
  2771. if (cfqq->slice_end - jiffies == 1)
  2772. return true;
  2773. return false;
  2774. }
  2775. static void cfq_completed_request(struct request_queue *q, struct request *rq)
  2776. {
  2777. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2778. struct cfq_data *cfqd = cfqq->cfqd;
  2779. const int sync = rq_is_sync(rq);
  2780. unsigned long now;
  2781. now = jiffies;
  2782. cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq));
  2783. cfq_update_hw_tag(cfqd);
  2784. WARN_ON(!cfqd->rq_in_driver[sync]);
  2785. WARN_ON(!cfqq->dispatched);
  2786. cfqd->rq_in_driver[sync]--;
  2787. cfqq->dispatched--;
  2788. if (cfq_cfqq_sync(cfqq))
  2789. cfqd->sync_flight--;
  2790. if (sync) {
  2791. RQ_CIC(rq)->last_end_request = now;
  2792. if (!time_after(rq->start_time + cfqd->cfq_fifo_expire[1], now))
  2793. cfqd->last_delayed_sync = now;
  2794. }
  2795. /*
  2796. * If this is the active queue, check if it needs to be expired,
  2797. * or if we want to idle in case it has no pending requests.
  2798. */
  2799. if (cfqd->active_queue == cfqq) {
  2800. const bool cfqq_empty = RB_EMPTY_ROOT(&cfqq->sort_list);
  2801. if (cfq_cfqq_slice_new(cfqq)) {
  2802. cfq_set_prio_slice(cfqd, cfqq);
  2803. cfq_clear_cfqq_slice_new(cfqq);
  2804. }
  2805. /*
  2806. * Should we wait for next request to come in before we expire
  2807. * the queue.
  2808. */
  2809. if (cfq_should_wait_busy(cfqd, cfqq)) {
  2810. cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
  2811. cfq_mark_cfqq_wait_busy(cfqq);
  2812. }
  2813. /*
  2814. * Idling is not enabled on:
  2815. * - expired queues
  2816. * - idle-priority queues
  2817. * - async queues
  2818. * - queues with still some requests queued
  2819. * - when there is a close cooperator
  2820. */
  2821. if (cfq_slice_used(cfqq) || cfq_class_idle(cfqq))
  2822. cfq_slice_expired(cfqd, 1);
  2823. else if (sync && cfqq_empty &&
  2824. !cfq_close_cooperator(cfqd, cfqq)) {
  2825. cfqd->noidle_tree_requires_idle |= !rq_noidle(rq);
  2826. /*
  2827. * Idling is enabled for SYNC_WORKLOAD.
  2828. * SYNC_NOIDLE_WORKLOAD idles at the end of the tree
  2829. * only if we processed at least one !rq_noidle request
  2830. */
  2831. if (cfqd->serving_type == SYNC_WORKLOAD
  2832. || cfqd->noidle_tree_requires_idle
  2833. || cfqq->cfqg->nr_cfqq == 1)
  2834. cfq_arm_slice_timer(cfqd);
  2835. }
  2836. }
  2837. if (!rq_in_driver(cfqd))
  2838. cfq_schedule_dispatch(cfqd);
  2839. }
  2840. /*
  2841. * we temporarily boost lower priority queues if they are holding fs exclusive
  2842. * resources. they are boosted to normal prio (CLASS_BE/4)
  2843. */
  2844. static void cfq_prio_boost(struct cfq_queue *cfqq)
  2845. {
  2846. if (has_fs_excl()) {
  2847. /*
  2848. * boost idle prio on transactions that would lock out other
  2849. * users of the filesystem
  2850. */
  2851. if (cfq_class_idle(cfqq))
  2852. cfqq->ioprio_class = IOPRIO_CLASS_BE;
  2853. if (cfqq->ioprio > IOPRIO_NORM)
  2854. cfqq->ioprio = IOPRIO_NORM;
  2855. } else {
  2856. /*
  2857. * unboost the queue (if needed)
  2858. */
  2859. cfqq->ioprio_class = cfqq->org_ioprio_class;
  2860. cfqq->ioprio = cfqq->org_ioprio;
  2861. }
  2862. }
  2863. static inline int __cfq_may_queue(struct cfq_queue *cfqq)
  2864. {
  2865. if (cfq_cfqq_wait_request(cfqq) && !cfq_cfqq_must_alloc_slice(cfqq)) {
  2866. cfq_mark_cfqq_must_alloc_slice(cfqq);
  2867. return ELV_MQUEUE_MUST;
  2868. }
  2869. return ELV_MQUEUE_MAY;
  2870. }
  2871. static int cfq_may_queue(struct request_queue *q, int rw)
  2872. {
  2873. struct cfq_data *cfqd = q->elevator->elevator_data;
  2874. struct task_struct *tsk = current;
  2875. struct cfq_io_context *cic;
  2876. struct cfq_queue *cfqq;
  2877. /*
  2878. * don't force setup of a queue from here, as a call to may_queue
  2879. * does not necessarily imply that a request actually will be queued.
  2880. * so just lookup a possibly existing queue, or return 'may queue'
  2881. * if that fails
  2882. */
  2883. cic = cfq_cic_lookup(cfqd, tsk->io_context);
  2884. if (!cic)
  2885. return ELV_MQUEUE_MAY;
  2886. cfqq = cic_to_cfqq(cic, rw_is_sync(rw));
  2887. if (cfqq) {
  2888. cfq_init_prio_data(cfqq, cic->ioc);
  2889. cfq_prio_boost(cfqq);
  2890. return __cfq_may_queue(cfqq);
  2891. }
  2892. return ELV_MQUEUE_MAY;
  2893. }
  2894. /*
  2895. * queue lock held here
  2896. */
  2897. static void cfq_put_request(struct request *rq)
  2898. {
  2899. struct cfq_queue *cfqq = RQ_CFQQ(rq);
  2900. if (cfqq) {
  2901. const int rw = rq_data_dir(rq);
  2902. BUG_ON(!cfqq->allocated[rw]);
  2903. cfqq->allocated[rw]--;
  2904. put_io_context(RQ_CIC(rq)->ioc);
  2905. rq->elevator_private = NULL;
  2906. rq->elevator_private2 = NULL;
  2907. cfq_put_queue(cfqq);
  2908. }
  2909. }
  2910. static struct cfq_queue *
  2911. cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
  2912. struct cfq_queue *cfqq)
  2913. {
  2914. cfq_log_cfqq(cfqd, cfqq, "merging with queue %p", cfqq->new_cfqq);
  2915. cic_set_cfqq(cic, cfqq->new_cfqq, 1);
  2916. cfq_mark_cfqq_coop(cfqq->new_cfqq);
  2917. cfq_put_queue(cfqq);
  2918. return cic_to_cfqq(cic, 1);
  2919. }
  2920. static int should_split_cfqq(struct cfq_queue *cfqq)
  2921. {
  2922. if (cfqq->seeky_start &&
  2923. time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
  2924. return 1;
  2925. return 0;
  2926. }
  2927. /*
  2928. * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
  2929. * was the last process referring to said cfqq.
  2930. */
  2931. static struct cfq_queue *
  2932. split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
  2933. {
  2934. if (cfqq_process_refs(cfqq) == 1) {
  2935. cfqq->seeky_start = 0;
  2936. cfqq->pid = current->pid;
  2937. cfq_clear_cfqq_coop(cfqq);
  2938. return cfqq;
  2939. }
  2940. cic_set_cfqq(cic, NULL, 1);
  2941. cfq_put_queue(cfqq);
  2942. return NULL;
  2943. }
  2944. /*
  2945. * Allocate cfq data structures associated with this request.
  2946. */
  2947. static int
  2948. cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
  2949. {
  2950. struct cfq_data *cfqd = q->elevator->elevator_data;
  2951. struct cfq_io_context *cic;
  2952. const int rw = rq_data_dir(rq);
  2953. const bool is_sync = rq_is_sync(rq);
  2954. struct cfq_queue *cfqq;
  2955. unsigned long flags;
  2956. might_sleep_if(gfp_mask & __GFP_WAIT);
  2957. cic = cfq_get_io_context(cfqd, gfp_mask);
  2958. spin_lock_irqsave(q->queue_lock, flags);
  2959. if (!cic)
  2960. goto queue_fail;
  2961. new_queue:
  2962. cfqq = cic_to_cfqq(cic, is_sync);
  2963. if (!cfqq || cfqq == &cfqd->oom_cfqq) {
  2964. cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask);
  2965. cic_set_cfqq(cic, cfqq, is_sync);
  2966. } else {
  2967. /*
  2968. * If the queue was seeky for too long, break it apart.
  2969. */
  2970. if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) {
  2971. cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
  2972. cfqq = split_cfqq(cic, cfqq);
  2973. if (!cfqq)
  2974. goto new_queue;
  2975. }
  2976. /*
  2977. * Check to see if this queue is scheduled to merge with
  2978. * another, closely cooperating queue. The merging of
  2979. * queues happens here as it must be done in process context.
  2980. * The reference on new_cfqq was taken in merge_cfqqs.
  2981. */
  2982. if (cfqq->new_cfqq)
  2983. cfqq = cfq_merge_cfqqs(cfqd, cic, cfqq);
  2984. }
  2985. cfqq->allocated[rw]++;
  2986. atomic_inc(&cfqq->ref);
  2987. spin_unlock_irqrestore(q->queue_lock, flags);
  2988. rq->elevator_private = cic;
  2989. rq->elevator_private2 = cfqq;
  2990. return 0;
  2991. queue_fail:
  2992. if (cic)
  2993. put_io_context(cic->ioc);
  2994. cfq_schedule_dispatch(cfqd);
  2995. spin_unlock_irqrestore(q->queue_lock, flags);
  2996. cfq_log(cfqd, "set_request fail");
  2997. return 1;
  2998. }
  2999. static void cfq_kick_queue(struct work_struct *work)
  3000. {
  3001. struct cfq_data *cfqd =
  3002. container_of(work, struct cfq_data, unplug_work);
  3003. struct request_queue *q = cfqd->queue;
  3004. spin_lock_irq(q->queue_lock);
  3005. __blk_run_queue(cfqd->queue);
  3006. spin_unlock_irq(q->queue_lock);
  3007. }
  3008. /*
  3009. * Timer running if the active_queue is currently idling inside its time slice
  3010. */
  3011. static void cfq_idle_slice_timer(unsigned long data)
  3012. {
  3013. struct cfq_data *cfqd = (struct cfq_data *) data;
  3014. struct cfq_queue *cfqq;
  3015. unsigned long flags;
  3016. int timed_out = 1;
  3017. cfq_log(cfqd, "idle timer fired");
  3018. spin_lock_irqsave(cfqd->queue->queue_lock, flags);
  3019. cfqq = cfqd->active_queue;
  3020. if (cfqq) {
  3021. timed_out = 0;
  3022. /*
  3023. * We saw a request before the queue expired, let it through
  3024. */
  3025. if (cfq_cfqq_must_dispatch(cfqq))
  3026. goto out_kick;
  3027. /*
  3028. * expired
  3029. */
  3030. if (cfq_slice_used(cfqq))
  3031. goto expire;
  3032. /*
  3033. * only expire and reinvoke request handler, if there are
  3034. * other queues with pending requests
  3035. */
  3036. if (!cfqd->busy_queues)
  3037. goto out_cont;
  3038. /*
  3039. * not expired and it has a request pending, let it dispatch
  3040. */
  3041. if (!RB_EMPTY_ROOT(&cfqq->sort_list))
  3042. goto out_kick;
  3043. /*
  3044. * Queue depth flag is reset only when the idle didn't succeed
  3045. */
  3046. cfq_clear_cfqq_deep(cfqq);
  3047. }
  3048. expire:
  3049. cfq_slice_expired(cfqd, timed_out);
  3050. out_kick:
  3051. cfq_schedule_dispatch(cfqd);
  3052. out_cont:
  3053. spin_unlock_irqrestore(cfqd->queue->queue_lock, flags);
  3054. }
  3055. static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
  3056. {
  3057. del_timer_sync(&cfqd->idle_slice_timer);
  3058. cancel_work_sync(&cfqd->unplug_work);
  3059. }
  3060. static void cfq_put_async_queues(struct cfq_data *cfqd)
  3061. {
  3062. int i;
  3063. for (i = 0; i < IOPRIO_BE_NR; i++) {
  3064. if (cfqd->async_cfqq[0][i])
  3065. cfq_put_queue(cfqd->async_cfqq[0][i]);
  3066. if (cfqd->async_cfqq[1][i])
  3067. cfq_put_queue(cfqd->async_cfqq[1][i]);
  3068. }
  3069. if (cfqd->async_idle_cfqq)
  3070. cfq_put_queue(cfqd->async_idle_cfqq);
  3071. }
  3072. static void cfq_cfqd_free(struct rcu_head *head)
  3073. {
  3074. kfree(container_of(head, struct cfq_data, rcu));
  3075. }
  3076. static void cfq_exit_queue(struct elevator_queue *e)
  3077. {
  3078. struct cfq_data *cfqd = e->elevator_data;
  3079. struct request_queue *q = cfqd->queue;
  3080. cfq_shutdown_timer_wq(cfqd);
  3081. spin_lock_irq(q->queue_lock);
  3082. if (cfqd->active_queue)
  3083. __cfq_slice_expired(cfqd, cfqd->active_queue, 0);
  3084. while (!list_empty(&cfqd->cic_list)) {
  3085. struct cfq_io_context *cic = list_entry(cfqd->cic_list.next,
  3086. struct cfq_io_context,
  3087. queue_list);
  3088. __cfq_exit_single_io_context(cfqd, cic);
  3089. }
  3090. cfq_put_async_queues(cfqd);
  3091. cfq_release_cfq_groups(cfqd);
  3092. blkiocg_del_blkio_group(&cfqd->root_group.blkg);
  3093. spin_unlock_irq(q->queue_lock);
  3094. cfq_shutdown_timer_wq(cfqd);
  3095. /* Wait for cfqg->blkg->key accessors to exit their grace periods. */
  3096. call_rcu(&cfqd->rcu, cfq_cfqd_free);
  3097. }
  3098. static void *cfq_init_queue(struct request_queue *q)
  3099. {
  3100. struct cfq_data *cfqd;
  3101. int i, j;
  3102. struct cfq_group *cfqg;
  3103. struct cfq_rb_root *st;
  3104. cfqd = kmalloc_node(sizeof(*cfqd), GFP_KERNEL | __GFP_ZERO, q->node);
  3105. if (!cfqd)
  3106. return NULL;
  3107. /* Init root service tree */
  3108. cfqd->grp_service_tree = CFQ_RB_ROOT;
  3109. /* Init root group */
  3110. cfqg = &cfqd->root_group;
  3111. for_each_cfqg_st(cfqg, i, j, st)
  3112. *st = CFQ_RB_ROOT;
  3113. RB_CLEAR_NODE(&cfqg->rb_node);
  3114. /* Give preference to root group over other groups */
  3115. cfqg->weight = 2*BLKIO_WEIGHT_DEFAULT;
  3116. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3117. /*
  3118. * Take a reference to root group which we never drop. This is just
  3119. * to make sure that cfq_put_cfqg() does not try to kfree root group
  3120. */
  3121. atomic_set(&cfqg->ref, 1);
  3122. blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
  3123. 0);
  3124. #endif
  3125. /*
  3126. * Not strictly needed (since RB_ROOT just clears the node and we
  3127. * zeroed cfqd on alloc), but better be safe in case someone decides
  3128. * to add magic to the rb code
  3129. */
  3130. for (i = 0; i < CFQ_PRIO_LISTS; i++)
  3131. cfqd->prio_trees[i] = RB_ROOT;
  3132. /*
  3133. * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues.
  3134. * Grab a permanent reference to it, so that the normal code flow
  3135. * will not attempt to free it.
  3136. */
  3137. cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0);
  3138. atomic_inc(&cfqd->oom_cfqq.ref);
  3139. cfq_link_cfqq_cfqg(&cfqd->oom_cfqq, &cfqd->root_group);
  3140. INIT_LIST_HEAD(&cfqd->cic_list);
  3141. cfqd->queue = q;
  3142. init_timer(&cfqd->idle_slice_timer);
  3143. cfqd->idle_slice_timer.function = cfq_idle_slice_timer;
  3144. cfqd->idle_slice_timer.data = (unsigned long) cfqd;
  3145. INIT_WORK(&cfqd->unplug_work, cfq_kick_queue);
  3146. cfqd->cfq_quantum = cfq_quantum;
  3147. cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0];
  3148. cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1];
  3149. cfqd->cfq_back_max = cfq_back_max;
  3150. cfqd->cfq_back_penalty = cfq_back_penalty;
  3151. cfqd->cfq_slice[0] = cfq_slice_async;
  3152. cfqd->cfq_slice[1] = cfq_slice_sync;
  3153. cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
  3154. cfqd->cfq_slice_idle = cfq_slice_idle;
  3155. cfqd->cfq_latency = 1;
  3156. cfqd->cfq_group_isolation = 0;
  3157. cfqd->hw_tag = -1;
  3158. /*
  3159. * we optimistically start assuming sync ops weren't delayed in last
  3160. * second, in order to have larger depth for async operations.
  3161. */
  3162. cfqd->last_delayed_sync = jiffies - HZ;
  3163. INIT_RCU_HEAD(&cfqd->rcu);
  3164. return cfqd;
  3165. }
  3166. static void cfq_slab_kill(void)
  3167. {
  3168. /*
  3169. * Caller already ensured that pending RCU callbacks are completed,
  3170. * so we should have no busy allocations at this point.
  3171. */
  3172. if (cfq_pool)
  3173. kmem_cache_destroy(cfq_pool);
  3174. if (cfq_ioc_pool)
  3175. kmem_cache_destroy(cfq_ioc_pool);
  3176. }
  3177. static int __init cfq_slab_setup(void)
  3178. {
  3179. cfq_pool = KMEM_CACHE(cfq_queue, 0);
  3180. if (!cfq_pool)
  3181. goto fail;
  3182. cfq_ioc_pool = KMEM_CACHE(cfq_io_context, 0);
  3183. if (!cfq_ioc_pool)
  3184. goto fail;
  3185. return 0;
  3186. fail:
  3187. cfq_slab_kill();
  3188. return -ENOMEM;
  3189. }
  3190. /*
  3191. * sysfs parts below -->
  3192. */
  3193. static ssize_t
  3194. cfq_var_show(unsigned int var, char *page)
  3195. {
  3196. return sprintf(page, "%d\n", var);
  3197. }
  3198. static ssize_t
  3199. cfq_var_store(unsigned int *var, const char *page, size_t count)
  3200. {
  3201. char *p = (char *) page;
  3202. *var = simple_strtoul(p, &p, 10);
  3203. return count;
  3204. }
  3205. #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
  3206. static ssize_t __FUNC(struct elevator_queue *e, char *page) \
  3207. { \
  3208. struct cfq_data *cfqd = e->elevator_data; \
  3209. unsigned int __data = __VAR; \
  3210. if (__CONV) \
  3211. __data = jiffies_to_msecs(__data); \
  3212. return cfq_var_show(__data, (page)); \
  3213. }
  3214. SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0);
  3215. SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1);
  3216. SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
  3217. SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
  3218. SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
  3219. SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
  3220. SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
  3221. SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
  3222. SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
  3223. SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
  3224. SHOW_FUNCTION(cfq_group_isolation_show, cfqd->cfq_group_isolation, 0);
  3225. #undef SHOW_FUNCTION
  3226. #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
  3227. static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
  3228. { \
  3229. struct cfq_data *cfqd = e->elevator_data; \
  3230. unsigned int __data; \
  3231. int ret = cfq_var_store(&__data, (page), count); \
  3232. if (__data < (MIN)) \
  3233. __data = (MIN); \
  3234. else if (__data > (MAX)) \
  3235. __data = (MAX); \
  3236. if (__CONV) \
  3237. *(__PTR) = msecs_to_jiffies(__data); \
  3238. else \
  3239. *(__PTR) = __data; \
  3240. return ret; \
  3241. }
  3242. STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0);
  3243. STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1,
  3244. UINT_MAX, 1);
  3245. STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1,
  3246. UINT_MAX, 1);
  3247. STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
  3248. STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
  3249. UINT_MAX, 0);
  3250. STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
  3251. STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
  3252. STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
  3253. STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
  3254. UINT_MAX, 0);
  3255. STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
  3256. STORE_FUNCTION(cfq_group_isolation_store, &cfqd->cfq_group_isolation, 0, 1, 0);
  3257. #undef STORE_FUNCTION
  3258. #define CFQ_ATTR(name) \
  3259. __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store)
  3260. static struct elv_fs_entry cfq_attrs[] = {
  3261. CFQ_ATTR(quantum),
  3262. CFQ_ATTR(fifo_expire_sync),
  3263. CFQ_ATTR(fifo_expire_async),
  3264. CFQ_ATTR(back_seek_max),
  3265. CFQ_ATTR(back_seek_penalty),
  3266. CFQ_ATTR(slice_sync),
  3267. CFQ_ATTR(slice_async),
  3268. CFQ_ATTR(slice_async_rq),
  3269. CFQ_ATTR(slice_idle),
  3270. CFQ_ATTR(low_latency),
  3271. CFQ_ATTR(group_isolation),
  3272. __ATTR_NULL
  3273. };
  3274. static struct elevator_type iosched_cfq = {
  3275. .ops = {
  3276. .elevator_merge_fn = cfq_merge,
  3277. .elevator_merged_fn = cfq_merged_request,
  3278. .elevator_merge_req_fn = cfq_merged_requests,
  3279. .elevator_allow_merge_fn = cfq_allow_merge,
  3280. .elevator_dispatch_fn = cfq_dispatch_requests,
  3281. .elevator_add_req_fn = cfq_insert_request,
  3282. .elevator_activate_req_fn = cfq_activate_request,
  3283. .elevator_deactivate_req_fn = cfq_deactivate_request,
  3284. .elevator_queue_empty_fn = cfq_queue_empty,
  3285. .elevator_completed_req_fn = cfq_completed_request,
  3286. .elevator_former_req_fn = elv_rb_former_request,
  3287. .elevator_latter_req_fn = elv_rb_latter_request,
  3288. .elevator_set_req_fn = cfq_set_request,
  3289. .elevator_put_req_fn = cfq_put_request,
  3290. .elevator_may_queue_fn = cfq_may_queue,
  3291. .elevator_init_fn = cfq_init_queue,
  3292. .elevator_exit_fn = cfq_exit_queue,
  3293. .trim = cfq_free_io_context,
  3294. },
  3295. .elevator_attrs = cfq_attrs,
  3296. .elevator_name = "cfq",
  3297. .elevator_owner = THIS_MODULE,
  3298. };
  3299. #ifdef CONFIG_CFQ_GROUP_IOSCHED
  3300. static struct blkio_policy_type blkio_policy_cfq = {
  3301. .ops = {
  3302. .blkio_unlink_group_fn = cfq_unlink_blkio_group,
  3303. .blkio_update_group_weight_fn = cfq_update_blkio_group_weight,
  3304. },
  3305. };
  3306. #else
  3307. static struct blkio_policy_type blkio_policy_cfq;
  3308. #endif
  3309. static int __init cfq_init(void)
  3310. {
  3311. /*
  3312. * could be 0 on HZ < 1000 setups
  3313. */
  3314. if (!cfq_slice_async)
  3315. cfq_slice_async = 1;
  3316. if (!cfq_slice_idle)
  3317. cfq_slice_idle = 1;
  3318. if (cfq_slab_setup())
  3319. return -ENOMEM;
  3320. elv_register(&iosched_cfq);
  3321. blkio_policy_register(&blkio_policy_cfq);
  3322. return 0;
  3323. }
  3324. static void __exit cfq_exit(void)
  3325. {
  3326. DECLARE_COMPLETION_ONSTACK(all_gone);
  3327. blkio_policy_unregister(&blkio_policy_cfq);
  3328. elv_unregister(&iosched_cfq);
  3329. ioc_gone = &all_gone;
  3330. /* ioc_gone's update must be visible before reading ioc_count */
  3331. smp_wmb();
  3332. /*
  3333. * this also protects us from entering cfq_slab_kill() with
  3334. * pending RCU callbacks
  3335. */
  3336. if (elv_ioc_count_read(cfq_ioc_count))
  3337. wait_for_completion(&all_gone);
  3338. cfq_slab_kill();
  3339. }
  3340. module_init(cfq_init);
  3341. module_exit(cfq_exit);
  3342. MODULE_AUTHOR("Jens Axboe");
  3343. MODULE_LICENSE("GPL");
  3344. MODULE_DESCRIPTION("Completely Fair Queueing IO scheduler");